summaryrefslogtreecommitdiff
path: root/runtime/onert
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/onert')
-rw-r--r--runtime/onert/CMakeLists.txt15
-rw-r--r--runtime/onert/api/CMakeLists.txt27
-rw-r--r--runtime/onert/api/include/nnfw.h490
-rw-r--r--runtime/onert/api/include/nnfw_experimental.h99
-rw-r--r--runtime/onert/api/include/nnfw_internal.h38
-rw-r--r--runtime/onert/api/include/nnfw_version.h26
-rw-r--r--runtime/onert/api/src/CustomKernel.cc114
-rw-r--r--runtime/onert/api/src/CustomKernel.h60
-rw-r--r--runtime/onert/api/src/CustomKernelRegistry.cc64
-rw-r--r--runtime/onert/api/src/CustomKernelRegistry.h64
-rw-r--r--runtime/onert/api/src/OpMap.lst152
-rw-r--r--runtime/onert/api/src/nnfw_api.cc362
-rw-r--r--runtime/onert/api/src/nnfw_api_internal.cc896
-rw-r--r--runtime/onert/api/src/nnfw_api_internal.h161
-rw-r--r--runtime/onert/api/src/nnfw_debug.cc29
-rw-r--r--runtime/onert/backend/CMakeLists.txt6
-rw-r--r--runtime/onert/backend/acl_cl/Backend.h71
-rw-r--r--runtime/onert/backend/acl_cl/CLTimer.h108
-rw-r--r--runtime/onert/backend/acl_cl/CMakeLists.txt19
-rw-r--r--runtime/onert/backend/acl_cl/Config.cc67
-rw-r--r--runtime/onert/backend/acl_cl/Config.h50
-rw-r--r--runtime/onert/backend/acl_cl/ConstantInitializer.cc146
-rw-r--r--runtime/onert/backend/acl_cl/ConstantInitializer.h48
-rw-r--r--runtime/onert/backend/acl_cl/KernelGenerator.cc1618
-rw-r--r--runtime/onert/backend/acl_cl/KernelGenerator.h102
-rw-r--r--runtime/onert/backend/acl_cl/Optimizer.cc58
-rw-r--r--runtime/onert/backend/acl_cl/Optimizer.h47
-rw-r--r--runtime/onert/backend/acl_cl/TensorBuilder.h39
-rw-r--r--runtime/onert/backend/acl_cl/TensorManager.h78
-rw-r--r--runtime/onert/backend/acl_cl/acl_cl.cc33
-rw-r--r--runtime/onert/backend/acl_cl/operand/CLSubTensor.cc44
-rw-r--r--runtime/onert/backend/acl_cl/operand/CLSubTensor.h62
-rw-r--r--runtime/onert/backend/acl_cl/operand/CLTensor.cc58
-rw-r--r--runtime/onert/backend/acl_cl/operand/CLTensor.h73
-rw-r--r--runtime/onert/backend/acl_cl/operand/ICLTensor.cc45
-rw-r--r--runtime/onert/backend/acl_cl/operand/ICLTensor.h52
-rw-r--r--runtime/onert/backend/acl_common/AclActivationBuilder.h125
-rw-r--r--runtime/onert/backend/acl_common/AclConstantInitializer.cc128
-rw-r--r--runtime/onert/backend/acl_common/AclConstantInitializer.h61
-rw-r--r--runtime/onert/backend/acl_common/AclFunction.h54
-rw-r--r--runtime/onert/backend/acl_common/AclInternalBufferManager.h97
-rw-r--r--runtime/onert/backend/acl_common/AclKernelGen.h342
-rw-r--r--runtime/onert/backend/acl_common/AclLinearMemoryManager.h110
-rw-r--r--runtime/onert/backend/acl_common/AclMemoryManager.h98
-rw-r--r--runtime/onert/backend/acl_common/AclSubTensorAnalyzer.h111
-rw-r--r--runtime/onert/backend/acl_common/AclTensorBuilder.h438
-rw-r--r--runtime/onert/backend/acl_common/AclTensorManager.h301
-rw-r--r--runtime/onert/backend/acl_common/AclTensorRegistry.h53
-rw-r--r--runtime/onert/backend/acl_common/CMakeLists.txt19
-rw-r--r--runtime/onert/backend/acl_common/Convert.cc365
-rw-r--r--runtime/onert/backend/acl_common/Convert.h95
-rw-r--r--runtime/onert/backend/acl_common/IACLTensor.cc77
-rw-r--r--runtime/onert/backend/acl_common/IACLTensor.h69
-rw-r--r--runtime/onert/backend/acl_common/ParentInfo.h44
-rw-r--r--runtime/onert/backend/acl_common/Swizzle.h160
-rw-r--r--runtime/onert/backend/acl_neon/Backend.h71
-rw-r--r--runtime/onert/backend/acl_neon/CMakeLists.txt19
-rw-r--r--runtime/onert/backend/acl_neon/Config.cc47
-rw-r--r--runtime/onert/backend/acl_neon/Config.h48
-rw-r--r--runtime/onert/backend/acl_neon/ConstantInitializer.cc90
-rw-r--r--runtime/onert/backend/acl_neon/ConstantInitializer.h44
-rw-r--r--runtime/onert/backend/acl_neon/KernelGenerator.cc1437
-rw-r--r--runtime/onert/backend/acl_neon/KernelGenerator.h96
-rw-r--r--runtime/onert/backend/acl_neon/Optimizer.cc58
-rw-r--r--runtime/onert/backend/acl_neon/Optimizer.h47
-rw-r--r--runtime/onert/backend/acl_neon/TensorBuilder.h39
-rw-r--r--runtime/onert/backend/acl_neon/TensorManager.h77
-rw-r--r--runtime/onert/backend/acl_neon/acl_neon.cc33
-rw-r--r--runtime/onert/backend/acl_neon/operand/INETensor.cc40
-rw-r--r--runtime/onert/backend/acl_neon/operand/INETensor.h46
-rw-r--r--runtime/onert/backend/acl_neon/operand/NESubTensor.cc44
-rw-r--r--runtime/onert/backend/acl_neon/operand/NESubTensor.h62
-rw-r--r--runtime/onert/backend/acl_neon/operand/NETensor.cc45
-rw-r--r--runtime/onert/backend/acl_neon/operand/NETensor.h64
-rw-r--r--runtime/onert/backend/cpu/Backend.h70
-rw-r--r--runtime/onert/backend/cpu/BackendContext.h60
-rw-r--r--runtime/onert/backend/cpu/CMakeLists.txt18
-rw-r--r--runtime/onert/backend/cpu/Config.cc32
-rw-r--r--runtime/onert/backend/cpu/Config.h48
-rw-r--r--runtime/onert/backend/cpu/ConstantInitializer.cc94
-rw-r--r--runtime/onert/backend/cpu/ConstantInitializer.h63
-rw-r--r--runtime/onert/backend/cpu/ExternalContext.h61
-rw-r--r--runtime/onert/backend/cpu/KernelGenerator.cc1289
-rw-r--r--runtime/onert/backend/cpu/KernelGenerator.h112
-rw-r--r--runtime/onert/backend/cpu/StaticTensorManager.cc107
-rw-r--r--runtime/onert/backend/cpu/StaticTensorManager.h64
-rw-r--r--runtime/onert/backend/cpu/Tensor.cc32
-rw-r--r--runtime/onert/backend/cpu/Tensor.h122
-rw-r--r--runtime/onert/backend/cpu/TensorBuilder.cc90
-rw-r--r--runtime/onert/backend/cpu/TensorBuilder.h74
-rw-r--r--runtime/onert/backend/cpu/cpu.cc33
-rw-r--r--runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc118
-rw-r--r--runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h56
-rw-r--r--runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc85
-rw-r--r--runtime/onert/backend/cpu/ops/BatchMatMulLayer.h72
-rw-r--r--runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc83
-rw-r--r--runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc226
-rw-r--r--runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h69
-rw-r--r--runtime/onert/backend/cpu/ops/BroadcastToLayer.cc74
-rw-r--r--runtime/onert/backend/cpu/ops/BroadcastToLayer.h56
-rw-r--r--runtime/onert/backend/cpu/ops/CompareLayer.cc175
-rw-r--r--runtime/onert/backend/cpu/ops/CompareLayer.h57
-rw-r--r--runtime/onert/backend/cpu/ops/ConcatLayer.cc143
-rw-r--r--runtime/onert/backend/cpu/ops/ConcatLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/ConvolutionLayer.cc211
-rw-r--r--runtime/onert/backend/cpu/ops/ConvolutionLayer.h96
-rw-r--r--runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc138
-rw-r--r--runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h77
-rw-r--r--runtime/onert/backend/cpu/ops/EinsumLayer.cc84
-rw-r--r--runtime/onert/backend/cpu/ops/EinsumLayer.h72
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc173
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h67
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc151
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h67
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc336
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h75
-rw-r--r--runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc51
-rw-r--r--runtime/onert/backend/cpu/ops/ExpandDimsLayer.h55
-rw-r--r--runtime/onert/backend/cpu/ops/FillLayer.cc75
-rw-r--r--runtime/onert/backend/cpu/ops/FillLayer.h54
-rw-r--r--runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc268
-rw-r--r--runtime/onert/backend/cpu/ops/FullyConnectedLayer.h90
-rw-r--r--runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc94
-rw-r--r--runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h73
-rw-r--r--runtime/onert/backend/cpu/ops/GatherLayer.cc95
-rw-r--r--runtime/onert/backend/cpu/ops/GatherLayer.h63
-rw-r--r--runtime/onert/backend/cpu/ops/L2NormLayer.cc71
-rw-r--r--runtime/onert/backend/cpu/ops/L2NormLayer.h55
-rw-r--r--runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc102
-rw-r--r--runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h64
-rw-r--r--runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc94
-rw-r--r--runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h60
-rw-r--r--runtime/onert/backend/cpu/ops/MeanLayer.cc81
-rw-r--r--runtime/onert/backend/cpu/ops/MeanLayer.h60
-rw-r--r--runtime/onert/backend/cpu/ops/OneHotLayer.cc69
-rw-r--r--runtime/onert/backend/cpu/ops/OneHotLayer.h67
-rw-r--r--runtime/onert/backend/cpu/ops/OperationUtils.cc266
-rw-r--r--runtime/onert/backend/cpu/ops/OperationUtils.h211
-rw-r--r--runtime/onert/backend/cpu/ops/PackLayer.cc96
-rw-r--r--runtime/onert/backend/cpu/ops/PackLayer.h56
-rw-r--r--runtime/onert/backend/cpu/ops/PadLayer.cc80
-rw-r--r--runtime/onert/backend/cpu/ops/PadLayer.h63
-rw-r--r--runtime/onert/backend/cpu/ops/PoolLayer.cc132
-rw-r--r--runtime/onert/backend/cpu/ops/PoolLayer.h68
-rw-r--r--runtime/onert/backend/cpu/ops/PowLayer.cc73
-rw-r--r--runtime/onert/backend/cpu/ops/PowLayer.h63
-rw-r--r--runtime/onert/backend/cpu/ops/RangeLayer.cc70
-rw-r--r--runtime/onert/backend/cpu/ops/RangeLayer.h54
-rw-r--r--runtime/onert/backend/cpu/ops/RankLayer.cc57
-rw-r--r--runtime/onert/backend/cpu/ops/RankLayer.h53
-rw-r--r--runtime/onert/backend/cpu/ops/ReduceLayer.cc227
-rw-r--r--runtime/onert/backend/cpu/ops/ReduceLayer.h85
-rw-r--r--runtime/onert/backend/cpu/ops/ReshapeLayer.cc53
-rw-r--r--runtime/onert/backend/cpu/ops/ReshapeLayer.h57
-rw-r--r--runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc119
-rw-r--r--runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h62
-rw-r--r--runtime/onert/backend/cpu/ops/ReverseLayer.cc68
-rw-r--r--runtime/onert/backend/cpu/ops/ReverseLayer.h58
-rw-r--r--runtime/onert/backend/cpu/ops/SelectLayer.cc89
-rw-r--r--runtime/onert/backend/cpu/ops/SelectLayer.h56
-rw-r--r--runtime/onert/backend/cpu/ops/ShapeLayer.cc81
-rw-r--r--runtime/onert/backend/cpu/ops/ShapeLayer.h55
-rw-r--r--runtime/onert/backend/cpu/ops/SliceLayer.cc109
-rw-r--r--runtime/onert/backend/cpu/ops/SliceLayer.h64
-rw-r--r--runtime/onert/backend/cpu/ops/SoftMaxLayer.cc148
-rw-r--r--runtime/onert/backend/cpu/ops/SoftMaxLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc114
-rw-r--r--runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h60
-rw-r--r--runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc74
-rw-r--r--runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h54
-rw-r--r--runtime/onert/backend/cpu/ops/SplitLayer.cc103
-rw-r--r--runtime/onert/backend/cpu/ops/SplitLayer.h58
-rw-r--r--runtime/onert/backend/cpu/ops/SplitVLayer.cc99
-rw-r--r--runtime/onert/backend/cpu/ops/SplitVLayer.h60
-rw-r--r--runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc66
-rw-r--r--runtime/onert/backend/cpu/ops/SquaredDiffLayer.h57
-rw-r--r--runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc68
-rw-r--r--runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/StridedSliceLayer.cc94
-rw-r--r--runtime/onert/backend/cpu/ops/StridedSliceLayer.h68
-rw-r--r--runtime/onert/backend/cpu/ops/TileLayer.cc77
-rw-r--r--runtime/onert/backend/cpu/ops/TileLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/TransposeLayer.cc111
-rw-r--r--runtime/onert/backend/cpu/ops/TransposeLayer.h59
-rw-r--r--runtime/onert/backend/cpu/ops/UnpackLayer.cc91
-rw-r--r--runtime/onert/backend/cpu/ops/UnpackLayer.h58
-rw-r--r--runtime/onert/core/CMakeLists.txt45
-rw-r--r--runtime/onert/core/include/backend/Backend.h50
-rw-r--r--runtime/onert/core/include/backend/BackendContext.h92
-rw-r--r--runtime/onert/core/include/backend/CustomKernelBuilder.h76
-rw-r--r--runtime/onert/core/include/backend/IConfig.h77
-rw-r--r--runtime/onert/core/include/backend/IConstantInitializer.h230
-rw-r--r--runtime/onert/core/include/backend/IDynamicTensorManager.h60
-rw-r--r--runtime/onert/core/include/backend/IExternalContext.h34
-rw-r--r--runtime/onert/core/include/backend/IKernelGenerator.h76
-rw-r--r--runtime/onert/core/include/backend/IMemoryManager.h49
-rw-r--r--runtime/onert/core/include/backend/IOptimizer.h51
-rw-r--r--runtime/onert/core/include/backend/IPortableTensor.h58
-rw-r--r--runtime/onert/core/include/backend/IStaticTensorManager.h35
-rw-r--r--runtime/onert/core/include/backend/ITensor.h111
-rw-r--r--runtime/onert/core/include/backend/ITensorBuilder.h108
-rw-r--r--runtime/onert/core/include/backend/ITensorManager.h52
-rw-r--r--runtime/onert/core/include/backend/ITensorRegister.h97
-rw-r--r--runtime/onert/core/include/backend/ITensorRegistry.h146
-rw-r--r--runtime/onert/core/include/backend/cpu_common/Allocator.h56
-rw-r--r--runtime/onert/core/include/backend/cpu_common/DynamicTensorManager.h76
-rw-r--r--runtime/onert/core/include/backend/cpu_common/IMemoryPlanner.h74
-rw-r--r--runtime/onert/core/include/backend/cpu_common/MemoryManager.h76
-rw-r--r--runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h68
-rw-r--r--runtime/onert/core/include/backend/cpu_common/Tensor.h184
-rw-r--r--runtime/onert/core/include/backend/cpu_common/TensorRegistry.h36
-rw-r--r--runtime/onert/core/include/compiler/BackendManager.h82
-rw-r--r--runtime/onert/core/include/compiler/BackendResolver.h60
-rw-r--r--runtime/onert/core/include/compiler/CodeMap.h45
-rw-r--r--runtime/onert/core/include/compiler/Compiler.h122
-rw-r--r--runtime/onert/core/include/compiler/ExecutionBuilder.h49
-rw-r--r--runtime/onert/core/include/compiler/LoweredGraph.h90
-rw-r--r--runtime/onert/core/include/compiler/StaticShapeInference.h141
-rw-r--r--runtime/onert/core/include/exec/DynamicShapeInference.h123
-rw-r--r--runtime/onert/core/include/exec/Execution.h162
-rw-r--r--runtime/onert/core/include/exec/FunctionSequence.h132
-rw-r--r--runtime/onert/core/include/exec/IExecutor.h75
-rw-r--r--runtime/onert/core/include/exec/IFunction.h36
-rw-r--r--runtime/onert/core/include/exec/IODescription.h72
-rw-r--r--runtime/onert/core/include/exec/NopFunction.h48
-rw-r--r--runtime/onert/core/include/ir/Coordinates.h122
-rw-r--r--runtime/onert/core/include/ir/Data.h107
-rw-r--r--runtime/onert/core/include/ir/DataType.h45
-rw-r--r--runtime/onert/core/include/ir/Graph.h124
-rw-r--r--runtime/onert/core/include/ir/Index.h45
-rw-r--r--runtime/onert/core/include/ir/InternalType.h52
-rw-r--r--runtime/onert/core/include/ir/Layout.h67
-rw-r--r--runtime/onert/core/include/ir/LowerInfoMap.h42
-rw-r--r--runtime/onert/core/include/ir/OpCode.h58
-rw-r--r--runtime/onert/core/include/ir/OpSequence.h102
-rw-r--r--runtime/onert/core/include/ir/OpSequences.h91
-rw-r--r--runtime/onert/core/include/ir/Operand.h117
-rw-r--r--runtime/onert/core/include/ir/OperandConstraint.h58
-rw-r--r--runtime/onert/core/include/ir/OperandIndexMap.h34
-rw-r--r--runtime/onert/core/include/ir/OperandIndexSequence.h95
-rw-r--r--runtime/onert/core/include/ir/OperandInfo.h141
-rw-r--r--runtime/onert/core/include/ir/Operands.h46
-rw-r--r--runtime/onert/core/include/ir/Operation.h76
-rw-r--r--runtime/onert/core/include/ir/OperationIndexMap.h34
-rw-r--r--runtime/onert/core/include/ir/OperationIndexSet.h65
-rw-r--r--runtime/onert/core/include/ir/OperationVisitor.h50
-rw-r--r--runtime/onert/core/include/ir/Operations.Include.h84
-rw-r--r--runtime/onert/core/include/ir/Operations.h43
-rw-r--r--runtime/onert/core/include/ir/Operations.lst87
-rw-r--r--runtime/onert/core/include/ir/Padding.h74
-rw-r--r--runtime/onert/core/include/ir/Shape.h151
-rw-r--r--runtime/onert/core/include/ir/Sparsity.h64
-rw-r--r--runtime/onert/core/include/ir/Subgraphs.h139
-rw-r--r--runtime/onert/core/include/ir/TypeInfo.h67
-rw-r--r--runtime/onert/core/include/ir/operand/LowerInfo.h69
-rw-r--r--runtime/onert/core/include/ir/operand/PermuteFactor.h130
-rw-r--r--runtime/onert/core/include/ir/operation/ArgMax.h62
-rw-r--r--runtime/onert/core/include/ir/operation/BCQFullyConnected.h67
-rw-r--r--runtime/onert/core/include/ir/operation/BCQGather.h66
-rw-r--r--runtime/onert/core/include/ir/operation/BatchMatMul.h63
-rw-r--r--runtime/onert/core/include/ir/operation/BatchToSpaceND.h51
-rw-r--r--runtime/onert/core/include/ir/operation/BinaryArithmetic.h73
-rw-r--r--runtime/onert/core/include/ir/operation/BroadcastTo.h52
-rw-r--r--runtime/onert/core/include/ir/operation/Comparison.h72
-rw-r--r--runtime/onert/core/include/ir/operation/Concat.h58
-rw-r--r--runtime/onert/core/include/ir/operation/Conv2D.h70
-rw-r--r--runtime/onert/core/include/ir/operation/ConvertFp16ToFp32.h49
-rw-r--r--runtime/onert/core/include/ir/operation/ConvertFp32ToFp16.h49
-rw-r--r--runtime/onert/core/include/ir/operation/Custom.h75
-rw-r--r--runtime/onert/core/include/ir/operation/DepthToSpace.h63
-rw-r--r--runtime/onert/core/include/ir/operation/DepthwiseConv2D.h70
-rw-r--r--runtime/onert/core/include/ir/operation/Einsum.h57
-rw-r--r--runtime/onert/core/include/ir/operation/ElementwiseActivation.h77
-rw-r--r--runtime/onert/core/include/ir/operation/ElementwiseBinary.h71
-rw-r--r--runtime/onert/core/include/ir/operation/ElementwiseUnary.h83
-rw-r--r--runtime/onert/core/include/ir/operation/EmbeddingLookup.h50
-rw-r--r--runtime/onert/core/include/ir/operation/ExpandDims.h52
-rw-r--r--runtime/onert/core/include/ir/operation/Fill.h50
-rw-r--r--runtime/onert/core/include/ir/operation/FullyConnected.h66
-rw-r--r--runtime/onert/core/include/ir/operation/FusedBatchNorm.h68
-rw-r--r--runtime/onert/core/include/ir/operation/Gather.h64
-rw-r--r--runtime/onert/core/include/ir/operation/HashtableLookup.h57
-rw-r--r--runtime/onert/core/include/ir/operation/If.h57
-rw-r--r--runtime/onert/core/include/ir/operation/InstanceNorm.h65
-rw-r--r--runtime/onert/core/include/ir/operation/L2Normalization.h49
-rw-r--r--runtime/onert/core/include/ir/operation/LSTM.h96
-rw-r--r--runtime/onert/core/include/ir/operation/LocalResponseNormalization.h66
-rw-r--r--runtime/onert/core/include/ir/operation/LogSoftmax.h64
-rw-r--r--runtime/onert/core/include/ir/operation/LowerInfo.h54
-rw-r--r--runtime/onert/core/include/ir/operation/MatrixBandPart.h53
-rw-r--r--runtime/onert/core/include/ir/operation/OneHot.h60
-rw-r--r--runtime/onert/core/include/ir/operation/PReLU.h50
-rw-r--r--runtime/onert/core/include/ir/operation/Pack.h52
-rw-r--r--runtime/onert/core/include/ir/operation/Pad.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Permute.h65
-rw-r--r--runtime/onert/core/include/ir/operation/Pool2D.h78
-rw-r--r--runtime/onert/core/include/ir/operation/Pow.h51
-rw-r--r--runtime/onert/core/include/ir/operation/RNN.h70
-rw-r--r--runtime/onert/core/include/ir/operation/Range.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Rank.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Reduce.h77
-rw-r--r--runtime/onert/core/include/ir/operation/Reshape.h62
-rw-r--r--runtime/onert/core/include/ir/operation/ResizeBilinear.h68
-rw-r--r--runtime/onert/core/include/ir/operation/ResizeNearestNeighbor.h67
-rw-r--r--runtime/onert/core/include/ir/operation/Reverse.h50
-rw-r--r--runtime/onert/core/include/ir/operation/Select.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Shape.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Slice.h51
-rw-r--r--runtime/onert/core/include/ir/operation/Softmax.h63
-rw-r--r--runtime/onert/core/include/ir/operation/SpaceToBatchND.h53
-rw-r--r--runtime/onert/core/include/ir/operation/SpaceToDepth.h63
-rw-r--r--runtime/onert/core/include/ir/operation/Split.h58
-rw-r--r--runtime/onert/core/include/ir/operation/SplitV.h59
-rw-r--r--runtime/onert/core/include/ir/operation/SquaredDifference.h50
-rw-r--r--runtime/onert/core/include/ir/operation/Squeeze.h62
-rw-r--r--runtime/onert/core/include/ir/operation/StatelessRandomUniform.h52
-rw-r--r--runtime/onert/core/include/ir/operation/StridedSlice.h68
-rw-r--r--runtime/onert/core/include/ir/operation/Tile.h52
-rw-r--r--runtime/onert/core/include/ir/operation/TopKV2.h69
-rw-r--r--runtime/onert/core/include/ir/operation/Transpose.h52
-rw-r--r--runtime/onert/core/include/ir/operation/TransposeConv.h68
-rw-r--r--runtime/onert/core/include/ir/operation/Unpack.h58
-rw-r--r--runtime/onert/core/include/ir/operation/While.h58
-rw-r--r--runtime/onert/core/include/util/Config.lst46
-rw-r--r--runtime/onert/core/include/util/ConfigSource.h58
-rw-r--r--runtime/onert/core/include/util/EnvConfigSource.h41
-rw-r--r--runtime/onert/core/include/util/Exceptions.h48
-rw-r--r--runtime/onert/core/include/util/GeneralConfigSource.h44
-rw-r--r--runtime/onert/core/include/util/IConfigSource.h46
-rw-r--r--runtime/onert/core/include/util/ITimer.h59
-rw-r--r--runtime/onert/core/include/util/Index.h169
-rw-r--r--runtime/onert/core/include/util/ObjectManager.h148
-rw-r--r--runtime/onert/core/include/util/Set.h166
-rw-r--r--runtime/onert/core/include/util/ShapeInference.h153
-rw-r--r--runtime/onert/core/include/util/Utils.h108
-rw-r--r--runtime/onert/core/include/util/logging.h67
-rw-r--r--runtime/onert/core/src/backend/BackendContext.cc55
-rw-r--r--runtime/onert/core/src/backend/IConstantInitializer.cc112
-rw-r--r--runtime/onert/core/src/backend/IPortableTensor.cc29
-rw-r--r--runtime/onert/core/src/backend/ITensor.cc34
-rw-r--r--runtime/onert/core/src/backend/controlflow/Backend.h86
-rw-r--r--runtime/onert/core/src/backend/controlflow/BackendContext.h60
-rw-r--r--runtime/onert/core/src/backend/controlflow/Config.cc37
-rw-r--r--runtime/onert/core/src/backend/controlflow/Config.h53
-rw-r--r--runtime/onert/core/src/backend/controlflow/ConstantInitializer.h52
-rw-r--r--runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc80
-rw-r--r--runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h75
-rw-r--r--runtime/onert/core/src/backend/controlflow/ExternalContext.h62
-rw-r--r--runtime/onert/core/src/backend/controlflow/KernelGenerator.cc158
-rw-r--r--runtime/onert/core/src/backend/controlflow/KernelGenerator.h73
-rw-r--r--runtime/onert/core/src/backend/controlflow/Tensor.h35
-rw-r--r--runtime/onert/core/src/backend/controlflow/TensorBuilder.cc116
-rw-r--r--runtime/onert/core/src/backend/controlflow/TensorBuilder.h83
-rw-r--r--runtime/onert/core/src/backend/controlflow/TensorRegistry.h133
-rw-r--r--runtime/onert/core/src/backend/controlflow/UserTensor.cc53
-rw-r--r--runtime/onert/core/src/backend/controlflow/UserTensor.h85
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/IfLayer.cc129
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/IfLayer.h60
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.cc79
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.h71
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.cc220
-rw-r--r--runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.h62
-rw-r--r--runtime/onert/core/src/backend/cpu_common/Allocator.cc38
-rw-r--r--runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc80
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryManager.cc108
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryPlanner.cc215
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryPlanner.h160
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryPlanner.test.cc193
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.cc53
-rw-r--r--runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.h47
-rw-r--r--runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc118
-rw-r--r--runtime/onert/core/src/backend/cpu_common/Tensor.cc97
-rw-r--r--runtime/onert/core/src/compiler/BackendManager.cc146
-rw-r--r--runtime/onert/core/src/compiler/BackendResolver.cc25
-rw-r--r--runtime/onert/core/src/compiler/Compiler.cc326
-rw-r--r--runtime/onert/core/src/compiler/ExecutorFactory.cc501
-rw-r--r--runtime/onert/core/src/compiler/ExecutorFactory.h73
-rw-r--r--runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc954
-rw-r--r--runtime/onert/core/src/compiler/Fp32ToFp16Converter.h101
-rw-r--r--runtime/onert/core/src/compiler/HEScheduler.cc593
-rw-r--r--runtime/onert/core/src/compiler/HEScheduler.h186
-rw-r--r--runtime/onert/core/src/compiler/IScheduler.h38
-rw-r--r--runtime/onert/core/src/compiler/Linear.cc231
-rw-r--r--runtime/onert/core/src/compiler/Linear.h54
-rw-r--r--runtime/onert/core/src/compiler/LoweredGraph.cc565
-rw-r--r--runtime/onert/core/src/compiler/ManualScheduler.cc124
-rw-r--r--runtime/onert/core/src/compiler/ManualScheduler.h47
-rw-r--r--runtime/onert/core/src/compiler/OperationValidator.cc244
-rw-r--r--runtime/onert/core/src/compiler/OperationValidator.h78
-rw-r--r--runtime/onert/core/src/compiler/ParamChecker.cc33
-rw-r--r--runtime/onert/core/src/compiler/ParamChecker.h73
-rw-r--r--runtime/onert/core/src/compiler/ShapeValidator.cc1021
-rw-r--r--runtime/onert/core/src/compiler/ShapeValidator.h102
-rw-r--r--runtime/onert/core/src/compiler/StaticShapeInference.cc1302
-rw-r--r--runtime/onert/core/src/compiler/TensorBuilders.h78
-rw-r--r--runtime/onert/core/src/compiler/TensorRegistries.h91
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantInsertionPass.cc93
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantInsertionPass.h76
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantLoweringPass.cc56
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantLoweringPass.h46
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantOutputPass.cc68
-rw-r--r--runtime/onert/core/src/compiler/pass/ConstantOutputPass.h63
-rw-r--r--runtime/onert/core/src/compiler/pass/LoweredOperandPass.h52
-rw-r--r--runtime/onert/core/src/compiler/pass/LoweredOperationPass.h52
-rw-r--r--runtime/onert/core/src/compiler/pass/OddOutputPass.cc90
-rw-r--r--runtime/onert/core/src/compiler/pass/OddOutputPass.h89
-rw-r--r--runtime/onert/core/src/compiler/pass/OperandPass.cc36
-rw-r--r--runtime/onert/core/src/compiler/pass/OperandPass.h54
-rw-r--r--runtime/onert/core/src/compiler/pass/OperationPass.cc38
-rw-r--r--runtime/onert/core/src/compiler/pass/OperationPass.h77
-rw-r--r--runtime/onert/core/src/compiler/pass/Pass.h55
-rw-r--r--runtime/onert/core/src/compiler/pass/PassRunner.cc45
-rw-r--r--runtime/onert/core/src/compiler/pass/PassRunner.h53
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationEliminationPass.cc167
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationEliminationPass.h65
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc216
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationInsertionPass.h58
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationOperationPass.cc351
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationOperationPass.h65
-rw-r--r--runtime/onert/core/src/dumper/dot/DotBuilder.cc83
-rw-r--r--runtime/onert/core/src/dumper/dot/DotBuilder.h62
-rw-r--r--runtime/onert/core/src/dumper/dot/DotDumper.cc201
-rw-r--r--runtime/onert/core/src/dumper/dot/DotDumper.h69
-rw-r--r--runtime/onert/core/src/dumper/dot/DotSubgraphInfo.cc58
-rw-r--r--runtime/onert/core/src/dumper/dot/DotSubgraphInfo.h61
-rw-r--r--runtime/onert/core/src/dumper/dot/Node.cc56
-rw-r--r--runtime/onert/core/src/dumper/dot/Node.h127
-rw-r--r--runtime/onert/core/src/dumper/dot/OperandNode.cc60
-rw-r--r--runtime/onert/core/src/dumper/dot/OperandNode.h79
-rw-r--r--runtime/onert/core/src/dumper/dot/OperationNode.cc46
-rw-r--r--runtime/onert/core/src/dumper/dot/OperationNode.h62
-rw-r--r--runtime/onert/core/src/exec/BackendSet.h40
-rw-r--r--runtime/onert/core/src/exec/DataflowExecutor.cc183
-rw-r--r--runtime/onert/core/src/exec/DataflowExecutor.h96
-rw-r--r--runtime/onert/core/src/exec/DynamicShapeInference.cc1236
-rw-r--r--runtime/onert/core/src/exec/ExecTime.cc137
-rw-r--r--runtime/onert/core/src/exec/ExecTime.h112
-rw-r--r--runtime/onert/core/src/exec/Execution.cc182
-rw-r--r--runtime/onert/core/src/exec/ExecutionObservee.cc64
-rw-r--r--runtime/onert/core/src/exec/ExecutionObservee.h56
-rw-r--r--runtime/onert/core/src/exec/ExecutionObservers.cc135
-rw-r--r--runtime/onert/core/src/exec/ExecutionObservers.h88
-rw-r--r--runtime/onert/core/src/exec/ExecutorBase.cc203
-rw-r--r--runtime/onert/core/src/exec/ExecutorBase.h107
-rw-r--r--runtime/onert/core/src/exec/FunctionSequence.cc93
-rw-r--r--runtime/onert/core/src/exec/IPermuteFunction.h224
-rw-r--r--runtime/onert/core/src/exec/JSONExecTime.cc231
-rw-r--r--runtime/onert/core/src/exec/JSONExecTime.h97
-rw-r--r--runtime/onert/core/src/exec/Job.cc33
-rw-r--r--runtime/onert/core/src/exec/Job.h69
-rw-r--r--runtime/onert/core/src/exec/LinearExecutor.cc67
-rw-r--r--runtime/onert/core/src/exec/LinearExecutor.h72
-rw-r--r--runtime/onert/core/src/exec/ParallelExecutor.cc156
-rw-r--r--runtime/onert/core/src/exec/ParallelExecutor.h69
-rw-r--r--runtime/onert/core/src/exec/ParallelScheduler.cc55
-rw-r--r--runtime/onert/core/src/exec/ParallelScheduler.h60
-rw-r--r--runtime/onert/core/src/exec/ShapeConverter.cc60
-rw-r--r--runtime/onert/core/src/exec/ShapeConverter.h39
-rw-r--r--runtime/onert/core/src/exec/ThreadPool.cc65
-rw-r--r--runtime/onert/core/src/exec/ThreadPool.h73
-rw-r--r--runtime/onert/core/src/exec/WorkQueue.cc104
-rw-r--r--runtime/onert/core/src/exec/WorkQueue.h87
-rw-r--r--runtime/onert/core/src/exec/feature/IndexIterator.h104
-rw-r--r--runtime/onert/core/src/exec/feature/Reader.h68
-rw-r--r--runtime/onert/core/src/exec/feature/nchw/Reader.h118
-rw-r--r--runtime/onert/core/src/exec/feature/nchw/View.h69
-rw-r--r--runtime/onert/core/src/exec/feature/nhwc/Reader.h120
-rw-r--r--runtime/onert/core/src/exec/feature/nhwc/View.h70
-rw-r--r--runtime/onert/core/src/interp/Buffer.h91
-rw-r--r--runtime/onert/core/src/interp/ExecEnv.h212
-rw-r--r--runtime/onert/core/src/interp/InterpExecutor.cc126
-rw-r--r--runtime/onert/core/src/interp/InterpExecutor.h70
-rw-r--r--runtime/onert/core/src/interp/InterpOps.lst73
-rw-r--r--runtime/onert/core/src/interp/Interpreter.cc184
-rw-r--r--runtime/onert/core/src/interp/Interpreter.h64
-rw-r--r--runtime/onert/core/src/interp/Registration.h43
-rw-r--r--runtime/onert/core/src/interp/Tensor.cc53
-rw-r--r--runtime/onert/core/src/interp/Tensor.h183
-rw-r--r--runtime/onert/core/src/interp/operations/BinaryArithmeticOps.cc205
-rw-r--r--runtime/onert/core/src/interp/operations/Concat.cc147
-rw-r--r--runtime/onert/core/src/interp/operations/Conv2D.cc151
-rw-r--r--runtime/onert/core/src/interp/operations/DepthwiseConv2D.cc156
-rw-r--r--runtime/onert/core/src/interp/operations/ElementwiseActivations.cc161
-rw-r--r--runtime/onert/core/src/interp/operations/FullyConnected.cc136
-rw-r--r--runtime/onert/core/src/interp/operations/Gather.cc138
-rw-r--r--runtime/onert/core/src/interp/operations/InstanceNorm.cc121
-rw-r--r--runtime/onert/core/src/interp/operations/OperationUtil.h203
-rw-r--r--runtime/onert/core/src/interp/operations/Pad.cc106
-rw-r--r--runtime/onert/core/src/interp/operations/Pool2D.cc140
-rw-r--r--runtime/onert/core/src/interp/operations/Reshape.cc63
-rw-r--r--runtime/onert/core/src/interp/operations/Softmax.cc123
-rw-r--r--runtime/onert/core/src/interp/operations/TransposeConv.cc141
-rw-r--r--runtime/onert/core/src/ir/Coordinates.cc50
-rw-r--r--runtime/onert/core/src/ir/DataType.cc55
-rw-r--r--runtime/onert/core/src/ir/Graph.cc146
-rw-r--r--runtime/onert/core/src/ir/GraphIterator.cc121
-rw-r--r--runtime/onert/core/src/ir/GraphIterator.h90
-rw-r--r--runtime/onert/core/src/ir/LayoutSet.cc66
-rw-r--r--runtime/onert/core/src/ir/LayoutSet.h58
-rw-r--r--runtime/onert/core/src/ir/OpCode.cc47
-rw-r--r--runtime/onert/core/src/ir/OpSequence.cc95
-rw-r--r--runtime/onert/core/src/ir/OpSequences.cc124
-rw-r--r--runtime/onert/core/src/ir/Operand.cc50
-rw-r--r--runtime/onert/core/src/ir/OperandIndexSequence.cc77
-rw-r--r--runtime/onert/core/src/ir/Operands.cc36
-rw-r--r--runtime/onert/core/src/ir/Operation.cc66
-rw-r--r--runtime/onert/core/src/ir/OperationCloner.cc42
-rw-r--r--runtime/onert/core/src/ir/OperationCloner.h46
-rw-r--r--runtime/onert/core/src/ir/OperationDumper.cc521
-rw-r--r--runtime/onert/core/src/ir/OperationDumper.h96
-rw-r--r--runtime/onert/core/src/ir/OperationIndexSet.cc37
-rw-r--r--runtime/onert/core/src/ir/Operations.cc37
-rw-r--r--runtime/onert/core/src/ir/Padding.cc160
-rw-r--r--runtime/onert/core/src/ir/Shape.cc114
-rw-r--r--runtime/onert/core/src/ir/TypeInfo.cc47
-rw-r--r--runtime/onert/core/src/ir/operation/ArgMax.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/BCQFullyConnected.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/BCQGather.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/BatchMatMul.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/BatchToSpaceND.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/BinaryArithmetic.cc52
-rw-r--r--runtime/onert/core/src/ir/operation/BroadcastTo.cc38
-rw-r--r--runtime/onert/core/src/ir/operation/Comparison.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Concat.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Conv2D.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/ConvertFp16ToFp32.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/ConvertFp32ToFp16.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Custom.cc44
-rw-r--r--runtime/onert/core/src/ir/operation/DepthToSpace.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/DepthwiseConv2D.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Einsum.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/ElementwiseActivation.cc72
-rw-r--r--runtime/onert/core/src/ir/operation/ElementwiseBinary.cc52
-rw-r--r--runtime/onert/core/src/ir/operation/ElementwiseUnary.cc67
-rw-r--r--runtime/onert/core/src/ir/operation/EmbeddingLookup.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/ExpandDims.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Fill.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/FullyConnected.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/FusedBatchNorm.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/Gather.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/HashtableLookup.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/If.cc32
-rw-r--r--runtime/onert/core/src/ir/operation/InstanceNorm.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/L2Normalization.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/LSTM.cc48
-rw-r--r--runtime/onert/core/src/ir/operation/LocalResponseNormalization.cc41
-rw-r--r--runtime/onert/core/src/ir/operation/LogSoftmax.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/LowerInfo.cc34
-rw-r--r--runtime/onert/core/src/ir/operation/MatrixBandPart.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/OneHot.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/PReLU.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Pack.cc33
-rw-r--r--runtime/onert/core/src/ir/operation/Pad.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Permute.cc41
-rw-r--r--runtime/onert/core/src/ir/operation/Pool2D.cc51
-rw-r--r--runtime/onert/core/src/ir/operation/Pow.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/RNN.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Range.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Rank.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Reduce.cc56
-rw-r--r--runtime/onert/core/src/ir/operation/Reshape.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/ResizeBilinear.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/ResizeNearestNeighbor.cc41
-rw-r--r--runtime/onert/core/src/ir/operation/Reverse.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Select.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/Shape.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/Slice.cc36
-rw-r--r--runtime/onert/core/src/ir/operation/Softmax.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/SpaceToBatchND.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/SpaceToDepth.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Split.cc33
-rw-r--r--runtime/onert/core/src/ir/operation/SplitV.cc33
-rw-r--r--runtime/onert/core/src/ir/operation/SquaredDifference.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Squeeze.cc37
-rw-r--r--runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/StridedSlice.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Tile.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/TopKV2.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Transpose.cc39
-rw-r--r--runtime/onert/core/src/ir/operation/TransposeConv.cc40
-rw-r--r--runtime/onert/core/src/ir/operation/Unpack.cc33
-rw-r--r--runtime/onert/core/src/ir/operation/While.cc33
-rw-r--r--runtime/onert/core/src/ir/verifier/Verifier.cc132
-rw-r--r--runtime/onert/core/src/ir/verifier/Verifier.h68
-rw-r--r--runtime/onert/core/src/library_info.cc17
-rw-r--r--runtime/onert/core/src/util/ConfigSource.cc122
-rw-r--r--runtime/onert/core/src/util/EnvConfigSource.cc40
-rw-r--r--runtime/onert/core/src/util/EventCollector.cc109
-rw-r--r--runtime/onert/core/src/util/EventCollector.h51
-rw-r--r--runtime/onert/core/src/util/EventCollectorGlobal.cc94
-rw-r--r--runtime/onert/core/src/util/EventCollectorGlobal.h155
-rw-r--r--runtime/onert/core/src/util/EventRecorder.cc31
-rw-r--r--runtime/onert/core/src/util/EventRecorder.h69
-rw-r--r--runtime/onert/core/src/util/EventWriter.cc574
-rw-r--r--runtime/onert/core/src/util/EventWriter.h51
-rw-r--r--runtime/onert/core/src/util/GeneralConfigSource.cc45
-rw-r--r--runtime/onert/core/src/util/ShapeInference.cc1130
-rw-r--r--runtime/onert/core/src/util/logging.cc23
-rw-r--r--runtime/onert/frontend/CMakeLists.txt1
-rw-r--r--runtime/onert/frontend/base_loader/CMakeLists.txt11
-rw-r--r--runtime/onert/frontend/base_loader/include/base_loader.h1523
-rw-r--r--runtime/onert/frontend/circle/CMakeLists.txt15
-rw-r--r--runtime/onert/frontend/circle/include/circle_loader.h33
-rw-r--r--runtime/onert/frontend/circle/src/circle_loader.cc221
-rw-r--r--runtime/onert/frontend/circle_schema/CMakeLists.txt7
-rw-r--r--runtime/onert/frontend/circle_schema/include/circle_schema_generated.h10112
-rw-r--r--runtime/onert/frontend/nnapi/CMakeLists.txt27
-rw-r--r--runtime/onert/frontend/nnapi/compilation.cc106
-rw-r--r--runtime/onert/frontend/nnapi/event.cc36
-rw-r--r--runtime/onert/frontend/nnapi/execution.cc504
-rw-r--r--runtime/onert/frontend/nnapi/memory.cc42
-rw-r--r--runtime/onert/frontend/nnapi/model.cc416
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc45
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h47
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc42
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h44
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc335
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h77
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc46
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h39
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc287
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h75
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc25
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc100
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h79
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc1914
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/OperationFactory.h60
-rw-r--r--runtime/onert/frontend/tflite/CMakeLists.txt14
-rw-r--r--runtime/onert/frontend/tflite/include/tflite_loader.h34
-rw-r--r--runtime/onert/frontend/tflite/src/tflite_loader.cc126
-rw-r--r--runtime/onert/frontend/tflite/src/tflite_schema_generated.h9553
-rw-r--r--runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs795
-rw-r--r--runtime/onert/frontend/tflite/tflite_schema.fbs1095
-rw-r--r--runtime/onert/sample/CMakeLists.txt1
-rw-r--r--runtime/onert/sample/minimal/CMakeLists.txt10
-rw-r--r--runtime/onert/sample/minimal/README.md13
-rw-r--r--runtime/onert/sample/minimal/src/minimal.cc71
-rw-r--r--runtime/onert/test/CMakeLists.txt15
-rw-r--r--runtime/onert/test/core/compiler/Scheduler.cc587
-rw-r--r--runtime/onert/test/core/exec/ExecInstance.cc297
-rw-r--r--runtime/onert/test/core/exec/ExecTime.test.cc105
-rw-r--r--runtime/onert/test/core/interp/ExecManager.cc361
-rw-r--r--runtime/onert/test/graph/Graph.cc52
-rw-r--r--runtime/onert/test/graph/Index.cc34
-rw-r--r--runtime/onert/test/graph/MockNode.h47
-rw-r--r--runtime/onert/test/graph/operand/IndexSet.cc52
-rw-r--r--runtime/onert/test/graph/operand/LayoutSet.cc58
-rw-r--r--runtime/onert/test/graph/operand/Set.cc45
-rw-r--r--runtime/onert/test/graph/operand/UseDef.cc85
-rw-r--r--runtime/onert/test/graph/operation/Set.cc33
-rw-r--r--runtime/onert/test/graph/operation/SetIO.cc99
-rw-r--r--runtime/onert/test/graph/verifier/Verifier.cc98
-rw-r--r--runtime/onert/test/ir/Shape.cc58
-rw-r--r--runtime/onert/test/util/ObjectManager.cc97
-rw-r--r--runtime/onert/test/util/ShapeInference.cc544
656 files changed, 0 insertions, 91959 deletions
diff --git a/runtime/onert/CMakeLists.txt b/runtime/onert/CMakeLists.txt
deleted file mode 100644
index 88d52a5bd..000000000
--- a/runtime/onert/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-if(NOT BUILD_ONERT)
- return()
-endif(NOT BUILD_ONERT)
-
-add_subdirectory(backend)
-add_subdirectory(frontend)
-add_subdirectory(core)
-add_subdirectory(api)
-add_subdirectory(sample)
-
-if(NOT ENABLE_TEST)
- return()
-endif(NOT ENABLE_TEST)
-
-add_subdirectory(test)
diff --git a/runtime/onert/api/CMakeLists.txt b/runtime/onert/api/CMakeLists.txt
deleted file mode 100644
index 9c6dd90cc..000000000
--- a/runtime/onert/api/CMakeLists.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-file(GLOB_RECURSE API_SRC "*.cc")
-
-set(ONERT_DEV nnfw-dev)
-add_library(${ONERT_DEV} SHARED ${API_SRC})
-
-# Public headers to publish
-# nnfw_internal.h is header for runtime developer, so it will not be installed
-# But runtime developer can use nnfw_internal.h by linking nnfw-dev
-set(NNFW_API_HEADERS include/nnfw.h include/nnfw_experimental.h)
-
-target_link_libraries(${ONERT_DEV} PUBLIC nnfw-nnapi-header)
-target_link_libraries(${ONERT_DEV} PRIVATE onert_core)
-target_link_libraries(${ONERT_DEV} PRIVATE jsoncpp tflite_loader circle_loader ${LIB_PTHREAD})
-target_link_libraries(${ONERT_DEV} PRIVATE nnfw_common)
-target_link_libraries(${ONERT_DEV} PRIVATE nnfw_coverage)
-# NOTE Below line is added to remove warning for android build
-# It will be removed after android build uses gold linker
-if (ANDROID)
- target_link_libraries(${ONERT_DEV} INTERFACE log)
-endif (ANDROID)
-
-target_include_directories(${ONERT_DEV} PUBLIC include)
-set_target_properties(${ONERT_DEV} PROPERTIES PUBLIC_HEADER "${NNFW_API_HEADERS}")
-
-install(TARGETS ${ONERT_DEV}
- LIBRARY DESTINATION lib
- PUBLIC_HEADER DESTINATION include/nnfw)
diff --git a/runtime/onert/api/include/nnfw.h b/runtime/onert/api/include/nnfw.h
deleted file mode 100644
index 9348df6ae..000000000
--- a/runtime/onert/api/include/nnfw.h
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file nnfw.h
- * @brief This file describes runtime API
- */
-#ifndef __NNFW_H__
-#define __NNFW_H__
-
-#include <stddef.h>
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @brief Session to query with runtime
- *
- * <p>nnfw_session is started and passed by calling {@link nnfw_create_session}.
- * Each session has its own inference environment, such as model to inference, backend usage, etc.
- *
- * <p>Load model by calling {@link nnfw_load_model_from_file}
- *
- * <p>After loading, prepare inference by calling {@link nnfw_prepare}.
- * Application can set runtime environment before prepare by calling
- * {@link nnfw_set_available_backends} and {@link nnfw_set_op_backend}, and it is optional.
- *
- * <p>Application can inference by calling {@link nnfw_run}.
- * Before inference, application has responsibility to set input tensor to set input data by calling
- * {@link nnfw_set_output}, and output tensor to get output by calling {@link nnfw_set_input}
- *
- * <p>To support input and output setting, application can get
- * input and output tensor information by calling<ul>
- * <li>{@link nnfw_input_size}</li>
- * <li>{@link nnfw_output_size}</li>
- * <li>{@link nnfw_input_tensorinfo}</li>
- * <li>{@link nnfw_output_tensorinfo}</li>
- * </ul>
- *
- * <p>Application can inference many times using one session,
- * but next inference can do after prior inference end
- *
- * <p>Application cannot use muitiple model using one session
- */
-typedef struct nnfw_session nnfw_session;
-
-/**
- * @brief Tensor types
- *
- * The type of tensor represented in {@link nnfw_tensorinfo}
- */
-typedef enum {
- /** A tensor of 32 bit floating point */
- NNFW_TYPE_TENSOR_FLOAT32 = 0,
- /** A tensor of 32 bit signed integer */
- NNFW_TYPE_TENSOR_INT32 = 1,
- /**
- * A tensor of 8 bit integers that represent real numbers.
- *
- * real_value = (integer_value - zeroPoint) * scale.
- */
- NNFW_TYPE_TENSOR_QUANT8_ASYMM = 2,
- /** A tensor of boolean */
- NNFW_TYPE_TENSOR_BOOL = 3,
-
- /** A tensor of 8 bit unsigned integer */
- NNFW_TYPE_TENSOR_UINT8 = 4,
-
- /** A tensor of 64 bit signed integer */
- NNFW_TYPE_TENSOR_INT64 = 5,
-
-} NNFW_TYPE;
-
-/**
- * @brief Result values returned from a call to an API function
- */
-typedef enum {
- /** Successful */
- NNFW_STATUS_NO_ERROR = 0,
- /**
- * An error code for general use.
- * Mostly used when there is no specific value for that certain situation.
- */
- NNFW_STATUS_ERROR = 1,
- /** Unexpected null argument is given. */
- NNFW_STATUS_UNEXPECTED_NULL = 2,
- /** When a function was called but it is not valid for the current session state. */
- NNFW_STATUS_INVALID_STATE = 3,
- /** When it is out of memory */
- NNFW_STATUS_OUT_OF_MEMORY = 4,
- /** When it was given an insufficient output buffer */
- NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE = 5,
-} NNFW_STATUS;
-
-/**
- * @brief Data format of a tensor
- */
-typedef enum {
- /** Don't care layout */
- NNFW_LAYOUT_NONE = 0,
- /**
- * Channel last layout
- * If rank is 4, layout is NHWC
- */
- NNFW_LAYOUT_CHANNELS_LAST = 1,
- /**
- * Channel first layout
- * If rank is 4, layout is NCHW
- */
- NNFW_LAYOUT_CHANNELS_FIRST = 2,
-} NNFW_LAYOUT;
-
-/**
- * @brief Information ID for retrieving information on nnfw (e.g. version)
- */
-typedef enum {
- /** nnfw runtime version
- * Its value is uint32 in 0xMMmmmmPP, where MM = major, mmmm = minor, PP = patch.
- */
- NNFW_INFO_ID_VERSION = 0,
-} NNFW_INFO_ID;
-
-/**
- * @brief Maximum rank expressible with nnfw
- */
-#define NNFW_MAX_RANK (6)
-
-/**
- * @brief tensor info describes the type and shape of tensors
- *
- * <p>This structure is used to describe input and output tensors.
- * Application can get input and output tensor type and shape described in model by using
- * {@link nnfw_input_tensorinfo} and {@link nnfw_output_tensorinfo}
- *
- * <p>Maximum rank is 6 (NNFW_MAX_RANK). And tensor's dimension value is filled in 'dims' field from
- * index 0.
- * For example, if tensor's rank is 4,
- * application can get dimension value from dims[0], dims[1], dims[2], and dims[3]
- */
-typedef struct nnfw_tensorinfo
-{
- /** The data type */
- NNFW_TYPE dtype;
- /** The number of dimensions (rank) */
- int32_t rank;
- /**
- * The dimension of tensor.
- * Maximum rank is 6 (NNFW_MAX_RANK).
- */
- int32_t dims[NNFW_MAX_RANK];
-} nnfw_tensorinfo;
-
-/**
- * @brief Create a new session instance.
- *
- * <p>This only creates a session.
- * Model is loaded after {@link nnfw_load_model_from_file} is invoked.
- * And inference is performed after {@link nnfw_run} is invoked.
- *
- * <p>{@link nnfw_close_session} should be called once
- * if session is no longer need
- *
- * @param[out] session The session to be created
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_create_session(nnfw_session **session);
-
-/**
- * @brief Close a session instance
- *
- * After called, access to closed session by application will be invalid
- *
- * @param[in] session The session to be closed
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_close_session(nnfw_session *session);
-
-/**
- * @brief Load model from nnpackage file or directory
- *
- * The length of \p package_file_path must not execeed 1024 bytes including zero at the end.
- *
- * @param[in] session nnfw_session loading the given nnpackage file/dir
- * @param[in] package_file_path Path to the nnpackage file or unzipped directory to be loaded
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path);
-
-/**
- * @brief Apply i-th input's tensor info to resize input tensor
- *
- * This function should be called before {@link nnfw_prepare} is invoked, and
- * should be called after {@link nnfw_load_model_from_file} is invoked
- * See {@link nnfw_prepare} for information applying updated tensor info
- * If this function is called many times for same index, tensor info is overwritten
- *
- * @deprecated Deprecated since 1.7.0. Use {@link nnfw_set_input_tensorinfo} instead.
- *
- * @param[in] session Session to the input tensor info is to be set
- * @param[in] index Index of input to be applied (0-indexed)
- * @param[in] tensor_info Tensor info to be applied
- * @return @c NNFW_STATUS_NO_ERROR if successful, otherwise return @c NNFW_STATUS_ERROR
- */
-NNFW_STATUS nnfw_apply_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo tensor_info);
-
-/**
- * @brief Set input model's tensor info for resizing
- *
- * This function can be called at any time after calling {@link nnfw_model_load_from_file}. Changing
- * input tensor's shape will cause shape inference for the model. There are two different types of
- * shape inference - static and dynamic. Which one to use is depend on the current state of the
- * session.
- * When it is called after calling {@link nnfw_model_load_from_file} and before calling {@link
- * nnfw_prepare}, this info will be used when {@link nnfw_prepare}. And it will perform static shape
- * inference for all tensors.
- * When it is called after calling {@link nnfw_prepare} or even after {@link nnfw_run}, this info
- * will be used when {@link nnfw_run}. And the shapes of the tensors are determined on the fly.
- * If this function is called many times for the same index, it is overwritten.
- *
- * @param[in] session Session to the input tensor info is to be set
- * @param[in] index Index of input to be set (0-indexed)
- * @param[in] tensor_info Tensor info to be set
- * @return @c NNFW_STATUS_NO_ERROR if successful, otherwise return @c NNFW_STATUS_ERROR
- */
-NNFW_STATUS nnfw_set_input_tensorinfo(nnfw_session *session, uint32_t index,
- const nnfw_tensorinfo *tensor_info);
-
-/**
- * @brief Prepare session to be ready for inference
- *
- * This phase may finalize model compilation, scheduling, and additional settings.
- * If {@link nnfw_apply_tensor} is called to apply input tensor info different with model
- * before this function, tries to resize all tensors.
- *
- * @param[in] session the session to be prepared
- * @return @c NNFW_STATUS_NO_ERROR if successful, otherwise return @c NNFW_STATUS_ERROR
- */
-NNFW_STATUS nnfw_prepare(nnfw_session *session);
-
-/**
- * @brief Run inference
- *
- * <p>This function should be called after model is loaded by {@link nnfw_load_model_from_file},
- * session is prepared for inference by {@link nnfw_prepare}, set input and output buffers
- * by {@link nnfw_set_input} and {@link nnfw_set_output}.</p>
- *
- * <p>This function return after inference is finished.</p>
- *
- * @param[in] session The session to run inference
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_run(nnfw_session *session);
-
-/**
- * @brief Run inference asynchronously
- *
- * <p>This function must be called after model is loaded by {@link nnfw_load_model_from_file},
- * session is prepared for inference by {@link nnfw_prepare}, set input and output buffers
- * by {@link nnfw_set_input} and {@link nnfw_set_output}.</p>
- *
- * <p>This function returns immediately after starting a thread to run the inference.
- * To get the result of it or to do the next inference with {@link nnfw_run} or
- * {@link nnfw_run_async}, {@link nnfw_await} must be called to ensure the current asynchronous
- * inference has finished. Only one asynchronous inference is allowed at a time for a session.
- * If this function is called while the previous one is still running, it returns an error.</p>
- *
- * @param[in] session The session to run inference
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_run_async(nnfw_session *session);
-
-/**
- * @brief Wait for asynchronous run to finish
- *
- * <p>This function must be called after calling {@link nnfw_run_asnyc}, and can be called only once
- * for a {@link nnfw_run_async} call.
- *
- * <p>When this function returns, it means that this session has finished the asynchronous run. Then
- * the user can safely use the output data.</p>
- *
- * <p>This function returns after the asynchronous inference is finished.</p>
- *
- * @param[in] session The session to run inference
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_await(nnfw_session *session);
-
-/**
- * @brief Set input buffer
- *
- * This function must be called after {@link nnfw_prepare}, \p buffer given to this function can be
- * reused for many inferences. \p length must be greater or equal than the operand requires. To
- * specify an optional input, you can either not call this for that input or call this with \p
- * buffer of NULL and \p length of 0.
- *
- * @param[in] session Session to the input is to be set
- * @param[in] index Index of input to be set (0-indexed)
- * @param[in] type Type of the input
- * @param[in] buffer Raw buffer for input
- * @param[in] length Size of bytes of input buffer
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_input(nnfw_session *session, uint32_t index, NNFW_TYPE type,
- const void *buffer, size_t length);
-
-/**
- * @brief Set output buffer
- *
- * This function must be called after {@link nnfw_prepare}, \p buffer given to this function can be
- * reused for many inferences. \p length must be greater or equal than the operand requires. An
- * output operand can have unspecified shape and deduced dynamically during the execution. You must
- * provide \p buffer large enough.
- *
- * @param[in] session Session from inference output is to be extracted
- * @param[in] index Index of output to be set (0-indexed)
- * @param[in] type Type of the output
- * @param[out] buffer Raw buffer for output
- * @param[in] length Size of bytes of output buffer
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer,
- size_t length);
-
-/**
- * @brief Get the number of inputs
- *
- * Application can call this function to get number of inputs defined in loaded model.
- * This function should be called after {@link nnfw_load_model_from_file} is invoked to load model
- *
- * @param[in] session Session from input information is to be extracted
- * @param[out] number Variable which the number of inputs is put into
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_input_size(nnfw_session *session, uint32_t *number);
-
-/**
- * @brief Get the number of outputs
- *
- * Application can call this function to get number of outputs defined in loaded model.
- * This function should be called after {@link nnfw_load_model_from_file} is invoked to load model
- *
- * @param[in] session Session from output information is to be extracted
- * @param[out] number Variable which the number of outputs is put into
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_output_size(nnfw_session *session, uint32_t *number);
-
-/**
- * @brief Set the layout of an input
- *
- * The input that does not call this has NNFW_LAYOUT_NHWC layout
- *
- * @param[in] session session from inference input is to be extracted
- * @param[in] index index of input to be set (0-indexed)
- * @param[in] layout layout to set to target input
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_input_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout);
-
-/**
- * @brief Set the layout of an output
- *
- * The output that does not call this has NNFW_LAYOUT_NHWC layout
- *
- * @param[in] session session from inference output is to be extracted
- * @param[in] index index of output to be set (0-indexed)
- * @param[in] layout layout to set to target output
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_output_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout);
-
-/**
- * @brief Get i-th input tensor info
- *
- * <p>Before {@link nnfw_prepare} is invoked, this function return tensor info in model,
- * so updated tensor info by {@link nnfw_apply_tensorinfo} is not returned.</p>
- *
- * <p>After {@link nnfw_prepare} is invoked, this function return updated tensor info
- * if tensor info is updated by {@link nnfw_apply_tensorinfo}.</p>
- *
- * @param[in] session Session from input information is to be extracted
- * @param[in] index Index of input
- * @param[out] tensor_info Tensor info (shape, type, etc)
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo *tensor_info);
-
-/**
- * @brief Get i-th output tensor info
- *
- * <p>After {@link nnfw_load_model_from_file} and before {@link nnfw_prepare} is invoked, it returns
- * tensor info in the model.</p>
- *
- * <p>After {@link nnfw_prepare} and before {@link nnfw_run} is invoked, this function returns
- * updated tensor info if tensor info is updated by {@link nnfw_set_input_tensorinfo}.</p>
- *
- * <p>After {@link nnfw_run} is invoked(at least once), it returns the updated tensor info during
- * the latest execution.</p>
- *
- * @param[in] session Session from output information is to be extracted
- * @param[in] index Index of output
- * @param[out] tensor_info Tensor info (shape, type, etc)
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo *tensor_info);
-
-/**
- * @brief Set available backends
- *
- * This function should be called before {@link nnfw_prepare} is invoked.
- *
- * <p>Supported backends differs on each platforms.
- * For example, `x86_64` supports "cpu" only.
- * Multiple backends can be set and they must be separated by a semicolon (ex: "acl_cl;cpu").
- * For each backend string, `libbackend_{backend}.so` will be dynamically loaded during
- * {@link nnfw_prepare}.
- * Among the multiple backends, the 1st element is used as the default backend.</p>
- *
- * @param[in] session session to which avilable backends are set
- * @param[in] backends available backends on which nnfw uses
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backends);
-
-/**
- * @brief Set the operation's backend
- *
- * This function should be called before {@link nnfw_prepare} is invoked.
- *
- * <p>The backend for op has higher priority than available backends specified by
- * {@link nnfw_set_available_backends}.</p>
- *
- * @deprecated Deprecated since 1.8.0.
- *
- * @param[in] session session to be modified
- * @param[in] op operation to be set
- * @param[in] backend bakcend on which operation run
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_op_backend(nnfw_session *session, const char *op, const char *backend);
-
-/**
- * @brief Retrieve uint32 type of nnfw information for given information ID.
- *
- * <p>Retrieves the information of property given by information id </p>
- *
- * @note: The input session could be null for global information (e.g. runtime version).*
- *
- * @param[in] session session to be queried on.
- * @param[in] information ID to be queried
- * @param[out] val uint32 value to be returned.
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_query_info_u32(nnfw_session *session, NNFW_INFO_ID id, uint32_t *val);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/runtime/onert/api/include/nnfw_experimental.h b/runtime/onert/api/include/nnfw_experimental.h
deleted file mode 100644
index 94f781988..000000000
--- a/runtime/onert/api/include/nnfw_experimental.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_EXPERIMENTAL_H__
-#define __NNFW_EXPERIMENTAL_H__
-
-#include "nnfw.h"
-
-// Used for custom kernel development
-
-/*
- * operand type, used only for custom operations
- */
-typedef struct
-{
- nnfw_tensorinfo type;
- void *allocation;
-} nnfw_operand;
-
-/*
- * Used as input to custom operation eval function
- */
-typedef struct
-{
- size_t ninputs;
- nnfw_operand *inputs;
-
- size_t noutputs;
- nnfw_operand *outputs;
-} nnfw_custom_kernel_params;
-
-/*
- * Custom kernel evaluation function
- *
- * param[in] params custom operation parameters
- * param[in] userdata pointer to user-specified buffer( kernel instance specific )
- */
-typedef void (*nnfw_custom_eval)(nnfw_custom_kernel_params *params, char *userdata,
- size_t userdata_size);
-
-/*
- * custom operation registration info
- */
-typedef struct
-{
- nnfw_custom_eval eval_function;
-} custom_kernel_registration_info;
-
-NNFW_STATUS nnfw_register_custom_op_info(nnfw_session *session, const char *id,
- custom_kernel_registration_info *info);
-
-/**
- * @brief Get the input tensor index by name
- *
- * This function finds an input tensor of the given name.
- * If found, the index value is set to the address that @c index points to, and returns
- * @c NNFW_STATUS_NO_ERROR. Otherwise, @c index is unchanged and returns @c NNFW_STATUS_ERROR .
- *
- * @note If two or more input tensors are of the same name, the one with the lowest index is always
- * returned.
- *
- * @param[in] session the session object
- * @param[in] tensorname the name of the tensor to find, a null terminated char pointer string
- * @param[out] index the index to be ret
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_input_tensorindex(nnfw_session *session, const char *tensorname, uint32_t *index);
-
-/**
- * @brief Get the input tensor index by name
- *
- * This function finds an input tensor of the given name.
- * If found, the index value is set to the address that @c index points to, and returns
- * @c NNFW_STATUS_NO_ERROR. Otherwise, @c index is unchanged and returns @c NNFW_STATUS_ERROR .
- *
- * @note If two or more input tensors are of the same name, the one with the lowest index is always
- * returned.
- *
- * @param[in] session the session object
- * @param[in] tensorname the name of the tensor to find, a null terminated char pointer string
- * @param[out] index the index to be ret
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_output_tensorindex(nnfw_session *session, const char *tensorname, uint32_t *index);
-
-#endif // __NNFW_EXPERIMENTAL_H__
diff --git a/runtime/onert/api/include/nnfw_internal.h b/runtime/onert/api/include/nnfw_internal.h
deleted file mode 100644
index eb4b6d629..000000000
--- a/runtime/onert/api/include/nnfw_internal.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_INTERNAL_H__
-#define __NNFW_INTERNAL_H__
-
-#include "nnfw.h"
-
-NNFW_STATUS nnfw_set_config(nnfw_session *session, const char *key, const char *value);
-
-NNFW_STATUS nnfw_get_config(nnfw_session *session, const char *key, char *value, size_t value_size);
-
-/**
- * @brief Load a circle model from buffer.
- *
- * The buffer must outlive the session.
- *
- * @param[in] session session
- * @param[in] buffer Pointer to the buffer
- * @param[in] size Buffer size
- * @return NNFW_STATUS
- */
-NNFW_STATUS nnfw_load_circle_from_buffer(nnfw_session *session, uint8_t *buffer, size_t size);
-
-#endif // __NNFW_INTERNAL_H__
diff --git a/runtime/onert/api/include/nnfw_version.h b/runtime/onert/api/include/nnfw_version.h
deleted file mode 100644
index 8c6ea3994..000000000
--- a/runtime/onert/api/include/nnfw_version.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_VERSION_H__
-#define __NNFW_VERSION_H__
-
-/**
- * NNFW_VERSION is a uint32 value representing nnfw runtime version
- * in 0xMMmmmmPP, where MM = major, mmmm = minor, PP = patch
- */
-#define NNFW_VERSION 0x01000a00
-
-#endif // __NNFW_VERSION_H__
diff --git a/runtime/onert/api/src/CustomKernel.cc b/runtime/onert/api/src/CustomKernel.cc
deleted file mode 100644
index 3f3a5d81e..000000000
--- a/runtime/onert/api/src/CustomKernel.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "CustomKernel.h"
-
-namespace onert
-{
-namespace frontend
-{
-namespace custom
-{
-
-using namespace backend::custom;
-
-class APIConverter
-{
-public:
- static nnfw_operand convertOperand(void *alloc, const TypeInfo &type)
- {
- nnfw_operand api_operand;
- api_operand.allocation = alloc;
- api_operand.type = convertType(type);
- return api_operand;
- }
-
- static nnfw_tensorinfo convertType(const TypeInfo &type)
- {
- nnfw_tensorinfo api_type;
- api_type.rank = type.shape.rank();
- assert(type.shape.rank() <= 6);
- std::copy(type.shape.dims().begin(), type.shape.dims().end(), std::begin(api_type.dims));
-
- switch (type.dtype)
- {
- case ir::DataType::FLOAT32:
- api_type.dtype = NNFW_TYPE_TENSOR_FLOAT32;
- break;
- case ir::DataType::INT32:
- api_type.dtype = NNFW_TYPE_TENSOR_INT32;
- break;
- case ir::DataType::QUANT_UINT8_ASYMM:
- api_type.dtype = NNFW_TYPE_TENSOR_QUANT8_ASYMM;
- break;
- case ir::DataType::BOOL8:
- api_type.dtype = NNFW_TYPE_TENSOR_BOOL;
- break;
- default:
- throw std::runtime_error("Unsupported tensor datatype");
- }
- return api_type;
- }
-};
-
-Kernel::Kernel(const nnfw_custom_eval evalFunction)
- : _in_params(), _userdata(nullptr), _userdata_size(0), _evalFunction(evalFunction)
-{
-}
-
-void Kernel::configure(CustomKernelConfigParams &&inParams)
-{
- _userdata = inParams.userdata;
- _userdata_size = inParams.userdata_size;
-
- _in_params = std::move(inParams);
-}
-
-void Kernel::run()
-{
- nnfw_custom_kernel_params params;
-
- // set input tensor buffer and types
- params.ninputs = _in_params.input_tensors.size();
- params.inputs = new nnfw_operand[params.ninputs];
-
- for (size_t i = 0; i < params.ninputs; ++i)
- {
- auto *buf = _in_params.input_tensors[i]->buffer();
- assert(buf);
- params.inputs[i] = APIConverter::convertOperand(buf, _in_params.input_types[i]);
- }
-
- // set output tensor buffer and types
- params.noutputs = _in_params.output_tensors.size();
- params.outputs = new nnfw_operand[params.noutputs];
-
- for (size_t i = 0; i < params.noutputs; ++i)
- {
- auto *buf = _in_params.output_tensors[i]->buffer();
- assert(buf);
- params.outputs[i] = APIConverter::convertOperand(buf, _in_params.output_types[i]);
- }
-
- _evalFunction(&params, _userdata, _userdata_size);
-
- delete[] params.inputs;
- delete[] params.outputs;
-}
-
-} // namespace custom
-} // namespace frontend
-} // namespace onert
diff --git a/runtime/onert/api/src/CustomKernel.h b/runtime/onert/api/src/CustomKernel.h
deleted file mode 100644
index a42f7a639..000000000
--- a/runtime/onert/api/src/CustomKernel.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CUSTOM_KERNEL_H__
-#define __ONERT_BACKEND_CUSTOM_KERNEL_H__
-
-#include "nnfw_experimental.h"
-
-#include "backend/CustomKernelBuilder.h"
-#include "exec/IFunction.h"
-
-#include <vector>
-
-namespace onert
-{
-namespace frontend
-{
-namespace custom
-{
-
-class Kernel : public ::onert::exec::IFunction
-{
-public:
- explicit Kernel(nnfw_custom_eval evalFunction);
-
- backend::custom::CustomKernelConfigParams _in_params;
-
- char *_userdata;
- size_t _userdata_size;
-
- nnfw_custom_eval _evalFunction;
- // nnfw_custom_type_infer _type_infer_function; //Unused for now
-
- /**
- * Fills _params field used later by user specified eval function
- * @param inParams custom kernel parameters
- */
- virtual void configure(backend::custom::CustomKernelConfigParams &&inParams);
-
- void run() override;
-};
-
-} // namespace custom
-} // namespace frontend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CUSTOM_KERNEL_H__
diff --git a/runtime/onert/api/src/CustomKernelRegistry.cc b/runtime/onert/api/src/CustomKernelRegistry.cc
deleted file mode 100644
index 7812609d1..000000000
--- a/runtime/onert/api/src/CustomKernelRegistry.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "CustomKernelRegistry.h"
-
-#include <memory>
-
-namespace onert
-{
-namespace frontend
-{
-namespace custom
-{
-
-void KernelRegistry::registerKernel(const std::string &id, nnfw_custom_eval evalFunction)
-{
- _storage.emplace(id, evalFunction);
-}
-
-std::shared_ptr<backend::custom::IKernelBuilder> KernelRegistry::getBuilder()
-{
- return std::make_unique<KernelBuilder>(this);
-}
-
-std::unique_ptr<Kernel> KernelRegistry::buildKernelForOp(const std::string &id)
-{
- auto it = _storage.find(id);
- if (it == _storage.end())
- {
- throw std::runtime_error("Unable to find associated kernel for op");
- }
-
- return std::make_unique<Kernel>(it->second);
-}
-
-// Kernel builder
-std::unique_ptr<exec::IFunction>
-KernelBuilder::buildKernel(const std::string &id,
- backend::custom::CustomKernelConfigParams &&params) const
-{
- auto kernel = _registry->buildKernelForOp(id);
- kernel->configure(std::move(params));
-
- return kernel;
-}
-
-KernelBuilder::KernelBuilder(KernelRegistry *registry) : _registry(registry) {}
-
-} // namespace custom
-} // namespace frontend
-} // namespace onert
diff --git a/runtime/onert/api/src/CustomKernelRegistry.h b/runtime/onert/api/src/CustomKernelRegistry.h
deleted file mode 100644
index fe60d5bcc..000000000
--- a/runtime/onert/api/src/CustomKernelRegistry.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
-#define __ONERT_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
-
-#include "CustomKernel.h"
-
-#include <unordered_map>
-#include <functional>
-#include <memory>
-
-#include <iostream>
-
-namespace onert
-{
-namespace frontend
-{
-namespace custom
-{
-
-class KernelRegistry
-{
-public:
- void registerKernel(const std::string &id, nnfw_custom_eval evalFunction);
-
- std::shared_ptr<backend::custom::IKernelBuilder> getBuilder();
- std::unique_ptr<Kernel> buildKernelForOp(const std::string &id);
-
-private:
- std::unordered_map<std::string, nnfw_custom_eval> _storage;
-};
-
-class KernelBuilder : public backend::custom::IKernelBuilder
-{
-public:
- KernelBuilder(KernelRegistry *registry);
-
- std::unique_ptr<exec::IFunction>
- buildKernel(const std::string &id,
- backend::custom::CustomKernelConfigParams &&params) const override;
-
-private:
- KernelRegistry *_registry;
-};
-
-} // namespace custom
-} // namespace frontend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
diff --git a/runtime/onert/api/src/OpMap.lst b/runtime/onert/api/src/OpMap.lst
deleted file mode 100644
index fa8afae5e..000000000
--- a/runtime/onert/api/src/OpMap.lst
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MAP_MACRO
-#error Define MAP_MACRO before including this file
-#endif
-
-// Table: circle operation ID order
-// circle operation | onert internal operation
-MAP_MACRO(ADD , Add)
-MAP_MACRO(AVERAGE_POOL_2D , AvgPool2D)
-MAP_MACRO(CONCATENATION , Concat)
-MAP_MACRO(CONV_2D , Conv2D)
-MAP_MACRO(DEPTHWISE_CONV_2D , DepthwiseConv2D)
-// DEPTH_TO_SPACE
-MAP_MACRO(DEQUANTIZE , Dequantize)
-MAP_MACRO(EMBEDDING_LOOKUP , EmbeddingLookup)
-MAP_MACRO(FLOOR , Floor)
-MAP_MACRO(FULLY_CONNECTED , FullyConnected)
-MAP_MACRO(HASHTABLE_LOOKUP , HashtableLookup)
-MAP_MACRO(L2_NORMALIZATION , L2Normalization)
-MAP_MACRO(L2_POOL_2D , L2Pool2D)
-MAP_MACRO(LOCAL_RESPONSE_NORMALIZATION , LocalResponseNormalization)
-MAP_MACRO(LOGISTIC , Logistic)
-// LSH_PROJECTION
-MAP_MACRO(LSTM , LSTM)
-MAP_MACRO(MAX_POOL_2D , MaxPool2D)
-MAP_MACRO(MUL , Mul)
-MAP_MACRO(RELU , ReLU)
-MAP_MACRO(RELU_N1_TO_1 , ReLU1)
-MAP_MACRO(RELU6 , ReLU6)
-MAP_MACRO(RESHAPE , Reshape)
-MAP_MACRO(RESIZE_BILINEAR , ResizeBilinear)
-MAP_MACRO(RNN , RNN)
-MAP_MACRO(SOFTMAX , Softmax)
-MAP_MACRO(SPACE_TO_DEPTH , SpaceToDepth)
-// SVDF
-MAP_MACRO(TANH , Tanh)
-// CONCAT_EMBEDDINGS
-// SKIP_GRAM
-// CALL
-MAP_MACRO(CUSTOM , Custom)
-// EMBEDDING_LOOKUP_SPARSE
-MAP_MACRO(PAD , Pad)
-// UNIDIRECTIONAL_SEQUENCE_RNN
-MAP_MACRO(GATHER , Gather)
-MAP_MACRO(BATCH_TO_SPACE_ND , BatchToSpaceND)
-MAP_MACRO(SPACE_TO_BATCH_ND , SpaceToBatchND)
-MAP_MACRO(TRANSPOSE , Transpose)
-MAP_MACRO(MEAN , Mean)
-MAP_MACRO(SUB , Sub)
-MAP_MACRO(DIV , Div)
-MAP_MACRO(SQUEEZE , Squeeze)
-// UNIDIRECTIONAL_SEQUENCE_LSTM
-MAP_MACRO(STRIDED_SLICE , StridedSlice)
-// BIDIRECTIONAL_SEQUENCE_RNN
-MAP_MACRO(EXP , Exp)
-MAP_MACRO(TOPK_V2 , TopKV2)
-MAP_MACRO(SPLIT , Split)
-// LOG_SOFTMAX
-// DELEGATE
-// BIDIRECTIONAL_SEQUENCE_LSTM
-MAP_MACRO(CAST , Cast)
-MAP_MACRO(PRELU , PReLU)
-MAP_MACRO(MAXIMUM , Max)
-MAP_MACRO(ARG_MAX , ArgMax)
-MAP_MACRO(MINIMUM , Min)
-// LESS (Comparison) ?
-MAP_MACRO(NEG , Neg)
-// PADV2
-// GREATER (Comparison) ?
-// GREATER_EQUAL (Comparison) ?
-// LESS_EQUAL (Comparison) ?
-MAP_MACRO(SELECT , Select)
-MAP_MACRO(SLICE , Slice)
-MAP_MACRO(SIN , Sin)
-MAP_MACRO(TRANSPOSE_CONV , TransposeConv)
-// SPARSE_TO_DENSE
-MAP_MACRO(TILE , Tile)
-MAP_MACRO(EXPAND_DIMS , ExpandDims)
-// EQUAL (Comparison) ?
-// NOT_EQUAL (Comparison) ?
-MAP_MACRO(LOG , Log)
-MAP_MACRO(SUM , ReduceSum)
-MAP_MACRO(SQRT , SQRT)
-MAP_MACRO(RSQRT , RSQRT)
-MAP_MACRO(SHAPE , Shape)
-MAP_MACRO(POW , Pow)
-// ARG_MIN
-// FAKE_QUANT
-MAP_MACRO(REDUCE_PROD , ReduceProd)
-MAP_MACRO(REDUCE_MAX , ReduceMax)
-MAP_MACRO(PACK , Pack)
-MAP_MACRO(LOGICAL_OR , LogicalOr)
-MAP_MACRO(ONE_HOT , OneHot)
-MAP_MACRO(LOGICAL_AND , LogicalAnd)
-MAP_MACRO(LOGICAL_NOT , LogicalNot)
-MAP_MACRO(UNPACK , Unpack)
-MAP_MACRO(REDUCE_MIN , ReduceMin)
-// FLOOR_DIV
-MAP_MACRO(REDUCE_ANY , ReduceAny)
-// SQUARE
-MAP_MACRO(ZEROS_LIKE , ZerosLike)
-MAP_MACRO(FILL , Fill)
-// FLOOR_MOD
-MAP_MACRO(RANGE , Range)
-// RESIZE_NEAREST_NEIGHBOR
-// LEAKY_RELU
-MAP_MACRO(SQUARED_DIFFERENCE , SquaredDifference)
-// MIRROR_PAD
-MAP_MACRO(ABS , Abs)
-// SPLIT_V
-// UNIQUE
-// CEIL
-MAP_MACRO(REVERSE_V2 , Reverse)
-// ADD_N
-// GATHER_ND
-MAP_MACRO(COS , Cos)
-// WHERE
-// RANK
-// ELU
-// REVERSE_SEQUENCE
-// MATRIX_DIAG
-// QUANTIZE
-// MATRIX_SET_DIAG
-MAP_MACRO(ROUND , Round)
-// HARD_SWISH
-MAP_MACRO(IF , If)
-MAP_MACRO(WHILE , While)
-// NON_MAX_SUPPRESSION_V4
-// NON_MAX_SUPPRESSION_V5
-// SCATTER_ND
-// SELECT_V2 (Select) ?
-// DENSIFY
-// SEGMENT_SUM
-MAP_MACRO(BATCH_MATMUL , BatchMatMul)
-MAP_MACRO(BCQ_GATHER , BCQGather)
-MAP_MACRO(BCQ_FULLY_CONNECTED , BCQFullyConnected)
-MAP_MACRO(INSTANCE_NORM , InstanceNorm)
diff --git a/runtime/onert/api/src/nnfw_api.cc b/runtime/onert/api/src/nnfw_api.cc
deleted file mode 100644
index ff5e679da..000000000
--- a/runtime/onert/api/src/nnfw_api.cc
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "nnfw_api_internal.h"
-#include "nnfw_version.h"
-
-// Double-check enum value changes
-
-#define STATIC_ASSERT_ENUM_CHECK(ENUM, VAL) static_assert((ENUM) == (VAL), #ENUM " has changed")
-
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_FLOAT32, 0);
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_INT32, 1);
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_QUANT8_ASYMM, 2);
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_BOOL, 3);
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_UINT8, 4);
-STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_INT64, 5);
-
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_NO_ERROR, 0);
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_ERROR, 1);
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_UNEXPECTED_NULL, 2);
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_INVALID_STATE, 3);
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_OUT_OF_MEMORY, 4);
-STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE, 5);
-
-STATIC_ASSERT_ENUM_CHECK(NNFW_LAYOUT_NONE, 0);
-STATIC_ASSERT_ENUM_CHECK(NNFW_LAYOUT_CHANNELS_LAST, 1);
-STATIC_ASSERT_ENUM_CHECK(NNFW_LAYOUT_CHANNELS_FIRST, 2);
-
-STATIC_ASSERT_ENUM_CHECK(NNFW_INFO_ID_VERSION, 0);
-
-#undef STATIC_ASSERT_ENUM_CHECK
-
-#define NNFW_RETURN_ERROR_IF_NULL(p) \
- do \
- { \
- if ((p) == NULL) \
- return NNFW_STATUS_UNEXPECTED_NULL; \
- } while (0)
-
-/*
- * Create a new session instance
- *
- * @param session the session to be created
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_create_session(nnfw_session **session)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
-
- *session = new (std::nothrow) nnfw_session();
- if (*session == nullptr)
- return NNFW_STATUS_OUT_OF_MEMORY;
- return NNFW_STATUS_NO_ERROR;
-}
-
-/*
- * Close a session instance
- *
- * @param session the session to be closed
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_close_session(nnfw_session *session)
-{
- delete session;
- return NNFW_STATUS_NO_ERROR;
-}
-
-/*
- * Load model from nnpackage file or directory
- *
- * @param session nnfw_session loading the given nnpackage file/dir
- * @param package_file_path path to the nnpackage file or unzipped directory to be loaded
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *pacakge_file_path)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->load_model_from_file(pacakge_file_path);
-}
-
-/*
- * Prepare session to be ready for inference
- * This phase may finalize model compilation, scheduling, and additional settings.
- *
- * @param session the session to be prepared
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_prepare(nnfw_session *session)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->prepare();
-}
-
-/*
- * Run inference
- *
- * @param session the session to run inference
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_run(nnfw_session *session)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->run();
-}
-
-NNFW_STATUS nnfw_run_async(nnfw_session *session)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->run_async();
-}
-
-NNFW_STATUS nnfw_await(nnfw_session *session)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->await();
-}
-
-/*
- * Set input
- *
- * @param session session to the input is to be set
- * @param index index of input to be set (0-indexed)
- * @param type type of the input
- * @param buffer raw buffer for input
- * @param length size of bytes of input
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-
-NNFW_STATUS nnfw_set_input(nnfw_session *session, uint32_t index, NNFW_TYPE type,
- const void *buffer, size_t length)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_input(index, type, buffer, length);
-}
-
-/*
- * Set output
- *
- * @param session session from inference output is to be extracted
- * @param index index of output to be set (0-indexed)
- * @param type type of the output
- * @param buffer raw buffer for output
- * @param length size of bytes of output
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-
-NNFW_STATUS nnfw_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer,
- size_t length)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_output(index, type, buffer, length);
-}
-
-/*
- * Get the number of inputs
- *
- * @param[in] session session from input information is to be extracted
- * @param[out] number variable which the number of inputs is put into
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-
-NNFW_STATUS nnfw_input_size(nnfw_session *session, uint32_t *number)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->input_size(number);
-}
-
-/*
- * Get the number of outputs
- *
- * @param[in] session session from output information is to be extracted
- * @param[out] number variable which the number of outputs is put into
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_output_size(nnfw_session *session, uint32_t *number)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->output_size(number);
-}
-
-/*
- * Set the layout of an input
- * @note The input that does not call this has NNFW_LAYOUT_CHANNELS_LAST layout
- *
- * @param[in] session session from inference input is to be extracted
- * @param[in] index index of input to be set (0-indexed)
- * @param[in] layout layout to set to target input
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_input_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_input_layout(index, layout);
-}
-
-/*
- * Set the layout of an output
- * @note The output that does not call this has NNFW_LAYOUT_CHANNELS_LAST layout
- *
- * @param[in] session session from inference output is to be extracted
- * @param[in] index index of output to be set (0-indexed)
- * @param[in] layout layout to set to target output
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_output_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_output_layout(index, layout);
-}
-
-/*
- * Get i-th input tensor info
- *
- * @param[in] session session from input information is to be extracted
- * @param[in] index index of input
- * @param[out] tensor_info nnfw_tensor_info
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo *tensor_info)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->input_tensorinfo(index, tensor_info);
-}
-
-/*
- * Get i-th output tensor info
- *
- * @param[in] session session from output information is to be extracted
- * @param[in] index index of output
- * @param[out] tensor_info nnfw_tensor_info
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo *tensor_info)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->output_tensorinfo(index, tensor_info);
-}
-
-/*
- * Register custom operation
- * @param session session to register this operation
- * @param id operation id
- * @param info registration info ( eval function, etc. )
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_register_custom_op_info(nnfw_session *session, const char *id,
- custom_kernel_registration_info *info)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->register_custom_operation(id, info->eval_function);
-}
-
-NNFW_STATUS nnfw_apply_tensorinfo(nnfw_session *session, uint32_t index,
- nnfw_tensorinfo tensor_info)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->apply_tensorinfo(index, tensor_info);
-}
-
-NNFW_STATUS nnfw_set_input_tensorinfo(nnfw_session *session, uint32_t index,
- const nnfw_tensorinfo *tensor_info)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_input_tensorinfo(index, tensor_info);
-}
-
-/*
- * Set available backends
- *
- * @param[in] session session to which a avilable backends are set
- * @param[in] backends available backends on which nnfw uses
- */
-NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backends)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_available_backends(backends);
-}
-
-/*
- * Set the operation's backend
- *
- * @param[in] session session to be modified
- * @param[in] op operation to be set
- * @param[in] backend bakcend on which operation run
- *
- * @return NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_set_op_backend(nnfw_session *session, const char *op, const char *backend)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->set_op_backend(op, backend);
-}
-
-/*
- * Retrieve uint32 type of nnfw information for given information ID.
- *
- * @param[in] session session to be queried on
- * @param[in] information ID to be queried
- * @param[out] val uint32 value to be returned
- *
- * @return @c NNFW_STATUS_NO_ERROR if successful
- */
-NNFW_STATUS nnfw_query_info_u32(nnfw_session *session, NNFW_INFO_ID id, uint32_t *val)
-{
- (void)session;
- switch (id)
- {
- case NNFW_INFO_ID_VERSION:
- if (val)
- {
- *val = NNFW_VERSION;
- return NNFW_STATUS_NO_ERROR;
- }
- break;
- default:
- return NNFW_STATUS_ERROR;
- }
- // It should not be reached.
- return NNFW_STATUS_ERROR;
-}
-
-NNFW_STATUS nnfw_load_circle_from_buffer(nnfw_session *session, uint8_t *buffer, size_t size)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->load_circle_from_buffer(buffer, size);
-}
-
-NNFW_STATUS nnfw_input_tensorindex(nnfw_session *session, const char *tensorname, uint32_t *index)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->input_tensorindex(tensorname, index);
-}
-
-NNFW_STATUS nnfw_output_tensorindex(nnfw_session *session, const char *tensorname, uint32_t *index)
-{
- NNFW_RETURN_ERROR_IF_NULL(session);
- return session->output_tensorindex(tensorname, index);
-}
diff --git a/runtime/onert/api/src/nnfw_api_internal.cc b/runtime/onert/api/src/nnfw_api_internal.cc
deleted file mode 100644
index aa066e190..000000000
--- a/runtime/onert/api/src/nnfw_api_internal.cc
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "nnfw_api_internal.h"
-#include "CustomKernelRegistry.h"
-#include "compiler/Compiler.h"
-#include "util/ConfigSource.h"
-#include "util/Exceptions.h"
-#include "exec/Execution.h"
-#include "circle_loader.h"
-#include "tflite_loader.h"
-#include "json/json.h"
-#include "ir/OpCode.h"
-#include <fstream>
-#include <iostream>
-#include <string>
-#include <vector>
-#include <dirent.h>
-#include <util/ConfigSource.h>
-#include <misc/string_helpers.h>
-
-/*
- * API does not accept string argument longer than max length below
- */
-#define MAX_BACKEND_NAME_LENGTH 32
-#define MAX_OP_NAME_LENGTH 64
-#define MAX_PATH_LENGTH 1024
-#define MAX_TENSOR_NAME_LENGTH 64
-
-// Is null-terminating in length ?
-static bool null_terminating(const char *str, uint32_t length)
-{
- for (uint32_t i = 0; i < length; i++)
- {
- if (*(str + i) == '\0')
- {
- return true;
- }
- }
- return false;
-}
-
-static onert::ir::Layout convertLayout(NNFW_LAYOUT layout)
-{
- if (layout == NNFW_LAYOUT_CHANNELS_LAST)
- {
- return onert::ir::Layout::NHWC;
- }
- else if (layout == NNFW_LAYOUT_CHANNELS_FIRST)
- {
- return onert::ir::Layout::NCHW;
- }
- return onert::ir::Layout::UNKNOWN;
-}
-
-NNFW_STATUS getTensorIndexImpl(const onert::ir::Graph &graph, const char *tensorname,
- uint32_t *index, bool is_input)
-{
- if (!tensorname || !index)
- return NNFW_STATUS_UNEXPECTED_NULL;
-
- if (!null_terminating(tensorname, MAX_TENSOR_NAME_LENGTH))
- {
- std::cerr << "nnpackage path is too long" << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- auto ind_found = is_input ? graph.getInputIndex(tensorname) : graph.getOutputIndex(tensorname);
-
- if (ind_found.undefined())
- {
- // Not found
- return NNFW_STATUS_ERROR;
- }
- else
- {
- *index = ind_found.value();
- return NNFW_STATUS_NO_ERROR;
- }
-}
-
-nnfw_session::nnfw_session()
- : _subgraphs{nullptr}, _execution{nullptr},
- _kernel_registry{std::make_shared<onert::frontend::custom::KernelRegistry>()}
-{
- // DO NOTHING
-}
-
-nnfw_session::~nnfw_session() = default;
-
-NNFW_STATUS nnfw_session::load_circle_from_buffer(uint8_t *buffer, size_t size)
-{
- if (!isStateInitialized())
- return NNFW_STATUS_INVALID_STATE;
-
- if (!buffer)
- return NNFW_STATUS_UNEXPECTED_NULL;
-
- if (size == 0)
- return NNFW_STATUS_ERROR;
-
- try
- {
- _subgraphs = onert::circle_loader::loadModel(buffer, size);
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during model loading : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- _compiler = std::make_unique<onert::compiler::Compiler>(_subgraphs);
-
- _state = State::MODEL_LOADED;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::load_model_from_file(const char *package_dir)
-{
- if (!isStateInitialized())
- return NNFW_STATUS_INVALID_STATE;
-
- if (!package_dir)
- {
- std::cerr << "package_dir is null." << std::endl;
- return NNFW_STATUS_UNEXPECTED_NULL;
- }
-
- if (!null_terminating(package_dir, MAX_PATH_LENGTH))
- {
- std::cerr << "nnpackage path is too long" << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- // TODO : add support for zipped package file load
- DIR *dir;
- if (!(dir = opendir(package_dir)))
- {
- std::cerr << "invalid nnpackge directory: " << package_dir << std::endl;
- return NNFW_STATUS_ERROR;
- }
- closedir(dir);
-
- try
- {
- std::string manifest_file_name(package_dir);
- manifest_file_name += "/metadata/MANIFEST";
- std::ifstream mfs(manifest_file_name);
-
- // extract the filename of the first(index 0) model
- // e.g. In MANIFEST file, { "models" : [ "firstmodel.tflite", "2nd.tflite" ] }
- Json::Value root;
- mfs >> root;
- const Json::Value &models = root["models"];
- const Json::Value &model_types = root["model-types"];
-
- auto model_file_path = package_dir + std::string("/") + models[0].asString(); // first model
- auto model_type = model_types[0].asString(); // first model's type
- if (model_type == "tflite")
- {
- _subgraphs = onert::tflite_loader::loadModel(model_file_path.c_str());
- }
- else if (model_type == "circle")
- {
- _subgraphs = onert::circle_loader::loadModel(model_file_path.c_str());
- }
- else
- {
- std::cerr << "Unsupported model type in MANIFEST" << std::endl;
- return NNFW_STATUS_ERROR;
- }
- _subgraphs->primary()->bindKernelBuilder(_kernel_registry->getBuilder());
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during model loading : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- _compiler = std::make_unique<onert::compiler::Compiler>(_subgraphs);
-
- _state = State::MODEL_LOADED;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::prepare()
-{
- // NOTE. If users want to run prepare() more than one time, this could be removed.
- if (!isStateModelLoaded())
- {
- std::cerr << "Error during model prepare : ";
- if (isStateInitialized())
- {
- std::cerr << "prepare should be run once";
- }
- else
- {
- std::cerr << "invalid state";
- }
- std::cerr << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- if (!_subgraphs || !primary_subgraph() || primary_subgraph()->isBuildingPhase())
- {
- std::cerr << "Error during model prepare : "
- << "prepare should be run after load_model" << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- try
- {
- _subgraphs.reset();
- std::shared_ptr<onert::exec::ExecutorMap> executors = _compiler->compile();
- _execution = std::make_shared<onert::exec::Execution>(executors);
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during model prepare : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- _state = State::PREPARED;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::run()
-{
- if (!isStatePreparedOrFinishedRun())
- {
- std::cerr << "Error during nnfw_session::run : "
- << "run should be run after prepare" << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- try
- {
- _execution->execute();
- }
- catch (const onert::InsufficientBufferSizeException &e)
- {
- // Currently insufficient buffer always means output buffer.
- std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
- return NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE;
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- _state = State::FINISHED_RUN;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::run_async()
-{
- if (!isStatePreparedOrFinishedRun())
- {
- std::cerr << "Error during nnfw_session::run_async : "
- << "run_async should be run after prepare" << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- _execution->startExecute();
-
- _state = State::RUNNING;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::await()
-{
- if (!isStateRunning())
- {
- std::cerr << "Error during nnfw_session::run_await : "
- << "run_await should be run after run_async" << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- _execution->waitFinish();
-
- _state = State::FINISHED_RUN;
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_input(uint32_t index, NNFW_TYPE /*type*/, const void *buffer,
- size_t length)
-{
- if (!isStatePreparedOrFinishedRun())
- {
- std::cerr << "Error during nnfw_session::set_input : invalid state" << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- if (!buffer && length != 0)
- {
- std::cerr
- << "Error during nnfw_session::set_input : given buffer is NULL but the length is not 0"
- << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- try
- {
- _execution->setInput(onert::ir::IOIndex(index), buffer, length);
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_input : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE /*type*/, void *buffer,
- size_t length)
-{
- if (!isStatePreparedOrFinishedRun())
- {
- std::cerr << "Error during nnfw_session::set_output : invalid state" << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- if (!buffer && length != 0)
- {
- std::cerr
- << "Error during nnfw_session::set_output : given buffer is NULL but the length is not 0"
- << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- try
- {
- _execution->setOutput(onert::ir::IOIndex(index), buffer, length);
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_output : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::input_size(uint32_t *number)
-{
- if (isStateInitialized()) // Model is not loaded
- return NNFW_STATUS_INVALID_STATE;
-
- try
- {
- if (number == nullptr)
- {
- std::cerr << "Error during nnfw_session::input_size, number is null pointer." << std::endl;
- return NNFW_STATUS_UNEXPECTED_NULL;
- }
- *number = primary_subgraph()->getInputs().size();
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::input_size : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::output_size(uint32_t *number)
-{
- if (isStateInitialized()) // Model is not loaded
- return NNFW_STATUS_INVALID_STATE;
-
- try
- {
- if (number == nullptr)
- {
- std::cerr << "Error during nnfw_session::output_size, number is null pointer." << std::endl;
- return NNFW_STATUS_UNEXPECTED_NULL;
- }
- *number = primary_subgraph()->getOutputs().size();
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::output_size" << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_input_layout(uint32_t index, NNFW_LAYOUT layout)
-{
- try
- {
- if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
- layout != NNFW_LAYOUT_CHANNELS_LAST)
- {
- std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
- return NNFW_STATUS_ERROR;
- }
- _execution->setInputLayout(onert::ir::IOIndex(index), convertLayout(layout));
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_input_layout : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_output_layout(uint32_t index, NNFW_LAYOUT layout)
-{
- try
- {
- if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
- layout != NNFW_LAYOUT_CHANNELS_LAST)
- {
- std::cerr << "Error during nnfw_session::set_output_layout, not supported layout"
- << std::endl;
- return NNFW_STATUS_ERROR;
- }
- _execution->setOutputLayout(onert::ir::IOIndex(index), convertLayout(layout));
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_output_layout : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-static NNFW_TYPE datatype_to_nnfw_dtype(onert::ir::DataType dt)
-{
- using onert::ir::DataType;
- switch (dt)
- {
- case DataType::FLOAT32:
- return NNFW_TYPE_TENSOR_FLOAT32;
- case DataType::INT32:
- return NNFW_TYPE_TENSOR_INT32;
- case DataType::QUANT_UINT8_ASYMM:
- return NNFW_TYPE_TENSOR_QUANT8_ASYMM;
- case DataType::BOOL8:
- return NNFW_TYPE_TENSOR_BOOL;
- case DataType::UINT8:
- return NNFW_TYPE_TENSOR_UINT8;
- case DataType::INT64:
- return NNFW_TYPE_TENSOR_INT64;
- case DataType::UINT32:
- case DataType::QUANT_INT8_SYMM:
- default:
- throw std::runtime_error("Error: Model has type that runtime API does not support.");
- }
-}
-
-NNFW_STATUS nnfw_session::apply_tensorinfo(uint32_t index, nnfw_tensorinfo ti)
-{
- // sanity check
- {
- if (isStateInitialized())
- {
- std::cerr << "Error during set_input_tensorinfo : should be run after load_model"
- << std::endl;
- return NNFW_STATUS_INVALID_STATE;
- }
-
- if (ti.rank <= 0 || ti.rank > NNFW_MAX_RANK)
- {
- std::cerr << "unsupported rank: " << ti.rank << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- for (int32_t i = 0; i < ti.rank; ++i)
- {
- if (ti.dims[i] <= 0)
- {
- std::cerr << "dim must be positive integer but was " << ti.dims[i] << std::endl;
- return NNFW_STATUS_ERROR;
- }
- }
- }
-
- auto ind = primary_subgraph()->getInputs().at(index);
- auto &input = primary_subgraph()->operands().at(ind);
-
- onert::ir::Shape new_shape(ti.rank);
- for (int32_t i = 0; i < ti.rank; i++)
- new_shape.dim(i) = ti.dims[i];
-
- // if passed shape is same with the shape of model, do nothing
- if (input.info().shape() == new_shape)
- return NNFW_STATUS_NO_ERROR;
-
- if (!isStatePreparedOrFinishedRun())
- {
- // In this case, if we apply input shape in primary_subgraph, it will propagate after
- // compilation and excution
-
- // overwrite input shape with the shape from ti
- input.info().shape(new_shape);
- }
- else // when called after nnfw_session::prepare()
- {
- _execution->changeInputShape(onert::ir::IOIndex(index), new_shape);
- }
-
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_input_tensorinfo(uint32_t index, const nnfw_tensorinfo *ti)
-{
- nnfw_tensorinfo ti_copy = *ti;
- return apply_tensorinfo(index, ti_copy);
-}
-
-NNFW_STATUS nnfw_session::input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
-{
- if (isStateInitialized())
- return NNFW_STATUS_INVALID_STATE;
-
- try
- {
- if (ti == nullptr)
- {
- std::cerr << "Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer."
- << std::endl;
- return NNFW_STATUS_UNEXPECTED_NULL;
- }
- if (index >= primary_subgraph()->getInputs().size())
- {
- std::cerr << "Error during nnfw_session::input_tensorinfo, index is out of range."
- << std::endl;
- return NNFW_STATUS_ERROR;
- }
- auto opidx = primary_subgraph()->getInputs().at(index);
- auto shape = primary_subgraph()->operands().at(opidx).shape();
- if (isStatePreparedOrFinishedRun())
- shape = _execution->getInputShape(onert::ir::IOIndex{index});
- ti->rank = shape.rank();
- for (int j = 0; j < ti->rank; ++j)
- {
- ti->dims[j] = shape.dim(j);
- }
- ti->dtype = datatype_to_nnfw_dtype(primary_subgraph()->operands().at(opidx).typeInfo().type());
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::input_tensorinfo : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
-{
- if (isStateInitialized())
- return NNFW_STATUS_INVALID_STATE;
-
- if (ti == nullptr)
- {
- std::cerr << "Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer."
- << std::endl;
- return NNFW_STATUS_UNEXPECTED_NULL;
- }
-
- if (index >= primary_subgraph()->getOutputs().size())
- {
- std::cerr << "Error during nnfw_session::output_tensorinfo, index is out of range."
- << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- try
- {
- auto opidx = primary_subgraph()->getOutputs().at(index);
- auto shape = primary_subgraph()->operands().at(opidx).shape();
- // If it is called after `nnfw_run` then get the shape from Execution, not from the graph
- if (isStateFinishedRun())
- shape = _execution->getOutputShape(onert::ir::IOIndex{index});
- ti->rank = shape.rank();
- for (int j = 0; j < ti->rank; ++j)
- {
- ti->dims[j] = shape.dim(j);
- }
- ti->dtype = datatype_to_nnfw_dtype(primary_subgraph()->operands().at(opidx).typeInfo().type());
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::output_tensorinfo : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
-
- return NNFW_STATUS_NO_ERROR;
-}
-NNFW_STATUS nnfw_session::register_custom_operation(const std::string &id,
- nnfw_custom_eval eval_func)
-{
- _kernel_registry->registerKernel(id, eval_func);
- return NNFW_STATUS_NO_ERROR;
-}
-
-static std::string get_op_backend_string(std::string op)
-{
-#define MAP_MACRO(CircleName, OneRTName) {#CircleName, #OneRTName},
-
- static std::unordered_map<std::string, std::string> operation_map = {
-#include "OpMap.lst"
- };
-
-#undef MAP_MACRO
-
- auto n = operation_map.find(op);
-
- if (n == operation_map.end())
- {
- // this return value is handled by a caller to return error code
- return std::string("");
- }
- else
- {
- return n->second;
- }
-}
-
-NNFW_STATUS nnfw_session::set_available_backends(const char *backends)
-{
- if (!isStateModelLoaded())
- return NNFW_STATUS_INVALID_STATE;
-
- try
- {
- if (!backends)
- return NNFW_STATUS_UNEXPECTED_NULL;
- if (null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false)
- return NNFW_STATUS_ERROR;
-
- auto &options = _compiler->options();
-
- using namespace onert::util;
-
- options.backend_list = nnfw::misc::split(std::string{backends}, ';');
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_available_backends : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_op_backend(const char *op, const char *backend)
-{
- if (!isStateModelLoaded())
- return NNFW_STATUS_INVALID_STATE;
-
- try
- {
- if (!op || !backend)
- return NNFW_STATUS_UNEXPECTED_NULL;
- if (!null_terminating(op, MAX_OP_NAME_LENGTH) ||
- !null_terminating(backend, MAX_BACKEND_NAME_LENGTH))
- return NNFW_STATUS_ERROR;
-
- auto key = get_op_backend_string(op);
-
- if (key.empty())
- {
- return NNFW_STATUS_ERROR;
- }
-
- auto &opcode_to_backend = _compiler->options().manual_scheduler_options.opcode_to_backend;
- opcode_to_backend.emplace(onert::ir::toOpCode(key), backend);
- }
- catch (const std::exception &e)
- {
- std::cerr << "Error during nnfw_session::set_op_backend : " << e.what() << std::endl;
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-NNFW_STATUS nnfw_session::set_config(const char *key, const char *value)
-{
- if (!isStateModelLoaded())
- return NNFW_STATUS_INVALID_STATE;
-
- if (!key || !value)
- return NNFW_STATUS_UNEXPECTED_NULL;
-
- auto &options = _compiler->options();
-
- using namespace onert::util;
-
- const std::string skey = key;
-
- if (skey == config::TRACE_FILEPATH)
- {
- options.trace_filepath = value;
- }
- else if (skey == config::GRAPH_DOT_DUMP)
- {
- options.graph_dump_level = toInt(value);
- }
- else if (skey == config::OP_SEQ_MAX_NODE)
- {
- options.op_seq_max_node = toInt(value);
- }
- else if (skey == config::EXECUTOR)
- {
- options.executor = value;
- }
- else if (skey == config::OP_BACKEND_ALLOPS)
- {
- options.manual_scheduler_options.backend_for_all = value;
- }
- else if (skey == config::USE_SCHEDULER)
- {
- options.he_scheduler = toBool(value);
- }
- else if (skey == config::PROFILING_MODE)
- {
- options.he_profiling_mode = toBool(value);
- }
- else if (skey == config::DISABLE_COMPILE)
- {
- options.disable_compile = toBool(value);
- }
- else
- {
- return NNFW_STATUS_ERROR;
- }
- return NNFW_STATUS_NO_ERROR;
-}
-
-onert::ir::Graph *nnfw_session::primary_subgraph()
-{
- if (_subgraphs)
- {
- assert(!_execution);
- return _subgraphs->primary().get();
- }
- else
- {
- assert(_execution);
- // TODO Remove const_cast
- // We assumed the graph will not change after compilation, but shape could change
- return const_cast<onert::ir::Graph *>(&_execution->primary_subgraph());
- }
-}
-
-NNFW_STATUS nnfw_session::get_config(const char *key, char *value, size_t value_size)
-{
- if (!isStateModelLoaded())
- return NNFW_STATUS_INVALID_STATE;
-
- if (!key || !value)
- return NNFW_STATUS_UNEXPECTED_NULL;
-
- auto &options = _compiler->options();
-
- auto check_boundary = [](size_t dest_size, std::string &src) {
- if (dest_size < src.length() + 1 /* for '\0' */)
- {
- std::cerr << "buffer is small to copy config value." << std::endl;
- return false;
- }
- return true;
- };
-
- if (key == onert::util::config::BACKENDS)
- {
- if (options.backend_list.size() == 0)
- return NNFW_STATUS_NO_ERROR; // no setting backend is not an error of get_config_str()
-
- auto str = nnfw::misc::join(options.backend_list.begin(), options.backend_list.end(), ";");
-
- if (!check_boundary(value_size, str))
- return NNFW_STATUS_ERROR;
-
- strncpy(value, str.c_str(), value_size);
- }
- else if (key == onert::util::config::EXECUTOR)
- {
- if (!check_boundary(value_size, options.executor))
- return NNFW_STATUS_ERROR;
-
- strncpy(value, options.executor.c_str(), options.executor.length());
- }
- else
- {
- return NNFW_STATUS_ERROR;
- }
-
- return NNFW_STATUS_NO_ERROR;
-}
-
-bool nnfw_session::isStateInitialized()
-{
- if (_state == State::INITIALIZED)
- {
- assert(!_subgraphs);
- assert(!_compiler);
- assert(!_execution);
- return true;
- }
- else
- {
- return false;
- }
-}
-
-bool nnfw_session::isStateModelLoaded()
-{
- if (_state == State::MODEL_LOADED)
- {
- assert(_subgraphs);
- assert(_compiler);
- assert(!_execution);
- assert(!primary_subgraph()->isBuildingPhase());
- return true;
- }
- else
- {
- return false;
- }
-}
-
-bool nnfw_session::isStatePrepared()
-{
- if (_state == State::PREPARED)
- {
- assert(!_subgraphs);
- assert(_compiler);
- assert(_execution);
- assert(!primary_subgraph()->isBuildingPhase());
- return true;
- }
- else
- {
- return false;
- }
-}
-
-bool nnfw_session::isStateRunning()
-{
- if (_state == State::RUNNING)
- {
- assert(!_subgraphs);
- assert(_compiler);
- assert(_execution);
- assert(!primary_subgraph()->isBuildingPhase());
- return true;
- }
- return false;
-}
-
-bool nnfw_session::isStateFinishedRun()
-{
- if (_state == State::FINISHED_RUN)
- {
- assert(!_subgraphs);
- assert(_compiler);
- assert(_execution);
- assert(!primary_subgraph()->isBuildingPhase());
- return true;
- }
- else
- {
- return false;
- }
-}
-
-bool nnfw_session::isStatePreparedOrFinishedRun()
-{
- return isStatePrepared() || isStateFinishedRun();
-}
-
-NNFW_STATUS nnfw_session::input_tensorindex(const char *tensorname, uint32_t *index)
-{
- return getTensorIndexImpl(*primary_subgraph(), tensorname, index, true);
-}
-
-NNFW_STATUS nnfw_session::output_tensorindex(const char *tensorname, uint32_t *index)
-{
- return getTensorIndexImpl(*primary_subgraph(), tensorname, index, false);
-}
diff --git a/runtime/onert/api/src/nnfw_api_internal.h b/runtime/onert/api/src/nnfw_api_internal.h
deleted file mode 100644
index 604ba38b4..000000000
--- a/runtime/onert/api/src/nnfw_api_internal.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __API_NNFW_API_INTERNAL_H__
-#define __API_NNFW_API_INTERNAL_H__
-
-#include "nnfw.h"
-#include "nnfw_experimental.h"
-
-#include <util/GeneralConfigSource.h>
-
-#include <string>
-#include <memory>
-
-namespace onert
-{
-namespace frontend
-{
-namespace custom
-{
-class KernelRegistry;
-}
-} // namespace frontend
-namespace exec
-{
-class Execution;
-} // namespace exec
-namespace ir
-{
-class Graph;
-class Subgraphs;
-} // namespace ir
-namespace compiler
-{
-class Compiler;
-} // namespace compiler
-} // namespace onert
-
-struct nnfw_session
-{
-private:
- /**
- * @brief Enum class to express the session's state
- *
- * State transition diagram:
- *
- * +--------------+
- * | INITIALIZED |
- * +--------------+
- * |
- * | load_model
- * v
- * +--------------+
- * | MODEL_LOADED |
- * +--------------+
- * |
- * | prepare
- * v
- * +--------------+
- * | PREPARED | --------+
- * +--------------+ |
- * | |
- * | run |
- * v |
- * +--------------+ run |
- * | | -----+ |
- * +-----> | FINISHED_RUN | | | run_async
- * | | | <----+ |
- * | +--------------+ |
- * | | |
- * | await | run_async |
- * | v |
- * | +--------------+ |
- * +------ | RUNNING | <-------+
- * +--------------+
- */
- enum class State
- {
- INITIALIZED, //< Session is initialized and nothing has done to it
- MODEL_LOADED, //< Model is loaded
- PREPARED, //< Prepared(compiled) for execution
- RUNNING, //< Execution is in progress (only for asynchronous execution)
- FINISHED_RUN //< Executed at least once
- };
-
-public:
- nnfw_session();
- ~nnfw_session();
-
- NNFW_STATUS load_model_from_file(const char *package_file_path);
- NNFW_STATUS prepare();
- NNFW_STATUS run();
-
- NNFW_STATUS run_async();
- NNFW_STATUS await();
-
- NNFW_STATUS set_input(uint32_t index, NNFW_TYPE type, const void *buffer, size_t length);
- NNFW_STATUS set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length);
-
- NNFW_STATUS input_size(uint32_t *number);
- NNFW_STATUS output_size(uint32_t *number);
-
- NNFW_STATUS set_input_layout(uint32_t index, NNFW_LAYOUT layout);
- NNFW_STATUS set_output_layout(uint32_t index, NNFW_LAYOUT layout);
-
- NNFW_STATUS apply_tensorinfo(uint32_t index, nnfw_tensorinfo ti); // Will be deprecated
- NNFW_STATUS set_input_tensorinfo(uint32_t index, const nnfw_tensorinfo *ti);
-
- NNFW_STATUS input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti);
- NNFW_STATUS output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti);
-
- NNFW_STATUS set_available_backends(const char *backends);
- NNFW_STATUS set_op_backend(const char *op, const char *backend);
-
- //
- // Internal-only API
- //
-
- NNFW_STATUS set_config(const char *key, const char *value);
- NNFW_STATUS get_config(const char *key, char *value, size_t value_size);
- NNFW_STATUS load_circle_from_buffer(uint8_t *buffer, size_t size);
-
- //
- // Experimental API
- //
-
- NNFW_STATUS register_custom_operation(const std::string &id, nnfw_custom_eval eval_func);
- NNFW_STATUS input_tensorindex(const char *tensorname, uint32_t *index);
- NNFW_STATUS output_tensorindex(const char *tensorname, uint32_t *index);
-
-private:
- onert::ir::Graph *primary_subgraph();
- bool isStateInitialized();
- bool isStateModelLoaded();
- bool isStatePrepared();
- bool isStateRunning();
- bool isStateFinishedRun();
- bool isStatePreparedOrFinishedRun();
-
-private:
- State _state{State::INITIALIZED};
- std::shared_ptr<onert::ir::Subgraphs> _subgraphs;
- std::unique_ptr<onert::compiler::Compiler> _compiler;
- std::shared_ptr<onert::exec::Execution> _execution;
- std::shared_ptr<onert::frontend::custom::KernelRegistry> _kernel_registry;
-};
-
-#endif // __API_NNFW_API_INTERNAL_H__
diff --git a/runtime/onert/api/src/nnfw_debug.cc b/runtime/onert/api/src/nnfw_debug.cc
deleted file mode 100644
index b9f110390..000000000
--- a/runtime/onert/api/src/nnfw_debug.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "nnfw_api_internal.h"
-
-#include <util/ConfigSource.h>
-
-NNFW_STATUS nnfw_set_config(nnfw_session *session, const char *key, const char *value)
-{
- return session->set_config(key, value);
-}
-
-NNFW_STATUS nnfw_get_config(nnfw_session *session, const char *key, char *value, size_t value_size)
-{
- return session->get_config(key, value, value_size);
-}
diff --git a/runtime/onert/backend/CMakeLists.txt b/runtime/onert/backend/CMakeLists.txt
deleted file mode 100644
index 42d622aa8..000000000
--- a/runtime/onert/backend/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LIB_ONERT_BACKEND_ACL_COMMON onert_backend_acl_common)
-
-add_subdirectory(cpu)
-add_subdirectory(acl_cl)
-add_subdirectory(acl_neon)
-add_subdirectory(acl_common)
diff --git a/runtime/onert/backend/acl_cl/Backend.h b/runtime/onert/backend/acl_cl/Backend.h
deleted file mode 100644
index 5c5041378..000000000
--- a/runtime/onert/backend/acl_cl/Backend.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_BACKEND_H__
-#define __ONERT_BACKEND_ACL_CL_BACKEND_H__
-
-#include <memory>
-#include <backend/Backend.h>
-
-#include "Config.h"
-#include "ConstantInitializer.h"
-#include "KernelGenerator.h"
-#include "TensorManager.h"
-#include "Optimizer.h"
-#include "AclTensorRegistry.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class Backend : public ::onert::backend::Backend
-{
-public:
- Backend() : _config{std::make_shared<Config>()} {}
-
- std::shared_ptr<IConfig> config() const override { return _config; }
-
- std::unique_ptr<BackendContext> newContext(const ir::Graph &graph,
- const std::shared_ptr<custom::IKernelBuilder> &,
- bool is_linear_executor) const override
- {
- const auto &operands = graph.operands();
- const auto &operations = graph.operations();
- auto context = std::make_unique<BackendContext>(this, &graph);
- auto tm = createTensorManager(is_linear_executor);
- auto tr = std::make_shared<acl_common::AclTensorRegistry<TensorManager>>(tm);
- auto tb = std::make_shared<TensorBuilder>(operands, tm, tr);
- context->tensor_registry = tr;
- context->tensor_builder = tb;
- context->constant_initializer = std::make_shared<ConstantInitializer>(operands, tr);
- context->kernel_gen = std::make_shared<KernelGenerator>(operands, operations, tb, tr);
- context->tensor_register = nullptr;
- context->optimizer = std::make_shared<Optimizer>(context.get());
- return context;
- }
-
-private:
- std::shared_ptr<IConfig> _config;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_BACKEND_H__
diff --git a/runtime/onert/backend/acl_cl/CLTimer.h b/runtime/onert/backend/acl_cl/CLTimer.h
deleted file mode 100644
index 722dc68ef..000000000
--- a/runtime/onert/backend/acl_cl/CLTimer.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_CLTIMER_H__
-#define __ONERT_BACKEND_ACL_CL_CLTIMER_H__
-
-#include <util/ITimer.h>
-#include <arm_compute/core/CL/OpenCL.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <chrono>
-#include <list>
-#include <sstream>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-/**
- * @brief Class to measure CL kernels execution time
- */
-class CLTimer : public util::ITimer
-{
-public:
- /**
- * @brief This function replaces CL function, which enqueues a command to execute a kernel
- * with a wrapper which remembers enqueued kernels
- */
- void handleBegin() override
- {
- _measured_events.clear();
-
- _origin_enqueue_function = arm_compute::CLSymbols::get().clEnqueueNDRangeKernel_ptr;
-
- auto _timer_enqueue_function = [this](cl_command_queue command_queue, cl_kernel kernel,
- cl_uint work_dim, const size_t *gwo, const size_t *gws,
- const size_t *lws, cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list, cl_event *usr_event) {
- cl_event event;
- cl_int enqueue_res =
- this->_origin_enqueue_function(command_queue, kernel, work_dim, gwo, gws, lws,
- num_events_in_wait_list, event_wait_list, &event);
- this->_measured_events.emplace_back(event);
-
- // According to spec, if NULL was provided in usr_event - event shouldn't be returned
- if (usr_event != nullptr)
- {
- clRetainEvent(event);
- *usr_event = event;
- }
- return enqueue_res;
- };
- arm_compute::CLSymbols::get().clEnqueueNDRangeKernel_ptr = _timer_enqueue_function;
-
- // Set CL_QUEUE_PROFILING_ENABLE flag for the CL command-queue, if it isn't already set
- auto &cl_scheduler = arm_compute::CLScheduler::get();
- auto props = cl_scheduler.queue().getInfo<CL_QUEUE_PROPERTIES>();
- if ((props & CL_QUEUE_PROFILING_ENABLE) == 0)
- {
- cl_scheduler.set_queue(
- cl::CommandQueue(cl_scheduler.context(), props | CL_QUEUE_PROFILING_ENABLE));
- }
- };
-
- /**
- * @brief Get timer result by addition executed CL kernels durations
- */
- void handleEnd() override
- {
- _timer_res = 0;
- for (auto const &event : _measured_events)
- {
- cl_ulong start;
- cl_ulong end;
- event.getProfilingInfo(CL_PROFILING_COMMAND_START, &start);
- event.getProfilingInfo(CL_PROFILING_COMMAND_END, &end);
- _timer_res += (end - start) / 1000.f; // nanoseconds -> microseconds
- }
-
- // Restore origin CL enqueue function
- arm_compute::CLSymbols::get().clEnqueueNDRangeKernel_ptr = _origin_enqueue_function;
- };
-
-private:
- std::function<decltype(clEnqueueNDRangeKernel)> _origin_enqueue_function;
- std::list<::cl::Event> _measured_events;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_CLTIMER_H__
diff --git a/runtime/onert/backend/acl_cl/CMakeLists.txt b/runtime/onert/backend/acl_cl/CMakeLists.txt
deleted file mode 100644
index 6f91d9691..000000000
--- a/runtime/onert/backend/acl_cl/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Unsupported architecture
-nnfw_find_package(ARMCompute QUIET)
-if(NOT ARMCompute_FOUND)
- return()
-endif(NOT ARMCompute_FOUND)
-
-set(LIB_ONERT_BACKEND_ACL_CL onert_backend_acl_cl)
-
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_library(${LIB_ONERT_BACKEND_ACL_CL} SHARED ${SOURCES})
-
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE ${LIB_ONERT_BACKEND_ACL_COMMON})
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE nnfw_common)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE nnfw_coverage)
-
-set_target_properties(${LIB_ONERT_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
-
-install(TARGETS ${LIB_ONERT_BACKEND_ACL_CL} DESTINATION lib)
diff --git a/runtime/onert/backend/acl_cl/Config.cc b/runtime/onert/backend/acl_cl/Config.cc
deleted file mode 100644
index 8017bdb0b..000000000
--- a/runtime/onert/backend/acl_cl/Config.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// For CLKernelLibraryEx initialization
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/CLKernelLibraryEx.h"
-
-#include <util/ConfigSource.h>
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-
-#include "Config.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-bool Config::initialize()
-{
- if (!arm_compute::opencl_is_available())
- {
- return false;
- }
- arm_compute::CLScheduler::get().default_init();
- // NOTE CLKernelLibraryEx must use the same context as CLScheduler
- // It did not check whether another device is available.
- arm_compute::CLKernelLibraryEx::get().init(
- "./cl_kernels/", arm_compute::CLScheduler::get().context(), cl::Device::getDefault());
-
- return true;
-}
-
-ir::Layout Config::supportLayout(const ir::Operation &, ir::Layout frontend_layout)
-{
- const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT);
- if (acl_layout_str == "NHWC")
- {
- return ir::Layout::NHWC;
- }
- else if (acl_layout_str == "NCHW")
- {
- return ir::Layout::NCHW;
- }
-
- return frontend_layout;
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/Config.h b/runtime/onert/backend/acl_cl/Config.h
deleted file mode 100644
index f71e81b6a..000000000
--- a/runtime/onert/backend/acl_cl/Config.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_CONFIG_H__
-#define __ONERT_BACKEND_ACL_CL_CONFIG_H__
-
-#include "CLTimer.h"
-#include <memory>
-#include <backend/IConfig.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class Config : public IConfig
-{
-public:
- std::string id() override { return "acl_cl"; }
- bool initialize() override;
- bool supportPermutation() override { return true; }
- ir::Layout supportLayout(const ir::Operation &node, ir::Layout frontend_layout) override;
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return true; }
- void sync() const override { arm_compute::CLScheduler::get().sync(); }
-
- std::unique_ptr<util::ITimer> timer() override { return std::make_unique<CLTimer>(); }
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_CONFIG_H__
diff --git a/runtime/onert/backend/acl_cl/ConstantInitializer.cc b/runtime/onert/backend/acl_cl/ConstantInitializer.cc
deleted file mode 100644
index b45b91058..000000000
--- a/runtime/onert/backend/acl_cl/ConstantInitializer.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <AclActivationBuilder.h>
-#include <AclFunction.h>
-#include <Convert.h>
-#include <Swizzle.h>
-
-#include "ConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg)
- : acl_common::AclConstantInitializer{operands, tensor_reg}
-{
- // DO NOTHING
-}
-
-void ConstantInitializer::visit(const ir::operation::EmbeddingLookup &node)
-{
- copyInputInitialize(node, ir::operation::EmbeddingLookup::LOOKUPS);
-}
-
-void ConstantInitializer::visit(const ir::operation::Gather &node)
-{
- copyInputInitialize(node, ir::operation::Gather::INDICES);
-}
-
-void ConstantInitializer::visit(const ir::operation::HashtableLookup &node)
-{
- copyInputInitialize(node, ir::operation::HashtableLookup::LOOKUPS);
- copyInputInitialize(node, ir::operation::HashtableLookup::KEYS);
-}
-
-void ConstantInitializer::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto &block_size_index = node.getInputs().at(ir::operation::SpaceToBatchND::BLOCK_SIZE);
- const auto &block_size_obj = _operands.at(block_size_index);
-
- if (block_size_obj.isConstant())
- {
- _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::ITensor &obj) {
- assert(model_obj.data());
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data()->base());
- assert(model_obj.shape().rank() == 1);
- obj.access([&](ITensor &tensor) {
- for (size_t i = 0; i < shape.num_elements(); ++i)
- {
- const int32_t value = base[shape.num_elements() - i - 1];
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer() +
- tensor.calcOffset({static_cast<int32_t>(i)}));
- *into = value;
- }
- });
- };
- }
-
- const auto &paddings_index = node.getInputs().at(ir::operation::SpaceToBatchND::PADDINGS);
- const auto &paddings_obj = _operands.at(paddings_index);
- if (paddings_obj.isConstant())
- {
- _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::ITensor &obj) {
- assert(model_obj.data());
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data()->base());
- assert(model_obj.shape().rank() == 2);
- assert(obj.dimension(0) == 2);
- obj.access([&](ITensor &tensor) {
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- for (auto j = 0; j < shape.dim(1); ++j)
- {
- const int32_t value = base[i * 2 + j];
- int32_t *into = reinterpret_cast<int32_t *>(
- tensor.buffer() + tensor.calcOffset({shape.dim(0) - i - 1, j}));
- *into = value;
- }
- }
- });
- };
- }
-}
-
-void ConstantInitializer::visit(const ir::operation::Reverse &node)
-{
- const auto &output_index = node.getOutputs().at(0);
-
- const auto &input_index = node.getInputs().at(ir::operation::Reverse::Input::INPUT);
- const auto &input_obj = _operands.at(input_index);
-
- const auto &axis_index = node.getInputs().at(ir::operation::Reverse::Input::AXIS);
- const auto &axis_obj = _operands.at(axis_index);
-
- const auto ifm_rank = input_obj.shape().rank();
- const auto frontend_layout = this->_current_op_seq_layout;
-
- auto output_tensor = this->_tensor_reg->getITensor(output_index);
- const auto backend_layout = output_tensor->layout();
-
- if (axis_obj.isConstant())
- {
- _init_map[axis_index] = [ifm_rank, frontend_layout, backend_layout](const ir::Operand &operand,
- backend::ITensor &obj) {
- assert(operand.data());
-
- const auto axis_value = *(reinterpret_cast<const int32_t *>(operand.data()->base()));
- int32_t axis_tmp = axis_value;
- if (axis_tmp < 0)
- {
- axis_tmp = axis_tmp + ifm_rank;
- }
-
- auto axis =
- acl_common::ToARMComputeAxis(ifm_rank, axis_tmp, frontend_layout, backend_layout).value();
-
- obj.access([&](ITensor &tensor) {
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer());
- *into = (int32_t)axis;
- });
- };
- }
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/ConstantInitializer.h b/runtime/onert/backend/acl_cl/ConstantInitializer.h
deleted file mode 100644
index 9f3acb461..000000000
--- a/runtime/onert/backend/acl_cl/ConstantInitializer.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__
-#define __ONERT_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__
-
-#include "AclConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class ConstantInitializer : public acl_common::AclConstantInitializer
-{
-public:
- ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg);
-
-public:
- using acl_common::AclConstantInitializer::visit;
- void visit(const ir::operation::EmbeddingLookup &) final;
- void visit(const ir::operation::Gather &) final;
- void visit(const ir::operation::HashtableLookup &) final;
- void visit(const ir::operation::SpaceToBatchND &) final;
- void visit(const ir::operation::Reverse &) final;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/backend/acl_cl/KernelGenerator.cc b/runtime/onert/backend/acl_cl/KernelGenerator.cc
deleted file mode 100644
index cc9afcaeb..000000000
--- a/runtime/onert/backend/acl_cl/KernelGenerator.cc
+++ /dev/null
@@ -1,1618 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "KernelGenerator.h"
-
-#include <arm_compute/runtime/CL/CLFunctions.h> // Include all ARM Compute CL functions
-#include <arm_compute/runtime/CL/CLFunctionsEx.h> // Include all ARM Compute EX CL functions
-
-#include <AclActivationBuilder.h>
-#include <AclFunction.h>
-#include <Convert.h>
-#include <Swizzle.h>
-
-#include "ir/Index.h"
-#include "ir/DataType.h"
-#include "ir/InternalType.h"
-#include "exec/NopFunction.h"
-#include "exec/FunctionSequence.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-#include "AclKernelGen.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-using ::onert::backend::acl_common::asAclFunction;
-using ActivationBuilder = ::onert::backend::acl_common::AclActivationBuilder<
- ::arm_compute::ICLTensor, ::arm_compute::CLActivationLayer, acl_common::AclFunction>;
-
-KernelGenerator::KernelGenerator(
- const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> &tensor_reg)
- : _ctx(operands_ctx), _operations_ctx(operations_ctx), _tensor_builder(tensor_builder),
- _tensor_reg(tensor_reg), _current_op_seq_layout(ir::Layout::UNKNOWN)
-{
- // DO NOTHING
-}
-
-void KernelGenerator::visit(const ir::OpSequence &op_seq)
-{
- // TODO Move this to IKernelGenerator
- // (all derivatives have the same implementation for this)
- assert(!_return_fn_seq);
- _return_fn_seq = std::make_unique<exec::FunctionSequence>();
- _return_fn_seq->enableDynamicShapeInferer(false);
-
- _current_op_seq_layout = op_seq.getLayout();
- for (const auto &operation_idx : op_seq.operations())
- {
- const auto &node = _operations_ctx.at(operation_idx);
- node.accept(*this);
- _return_fn_seq->append(releaseFunction());
- }
-}
-
-void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::BatchToSpaceND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto block_size_tensor = _tensor_reg->getAclTensor(block_size_index);
-
- assert(_ctx.at(block_size_index).data());
-
- auto fn = acl_common::generateLayer<arm_compute::CLBatchToSpaceLayer>(
- ifm_tensor->handle(), block_size_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::BinaryArithmetic &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::RHS)};
-
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- const auto act_info = acl_common::asActivationLayerInfo(activation);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().arithmetic_type)
- {
- case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
- {
- fn = acl_common::generateLayer<arm_compute::CLArithmeticAddition>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
- arm_compute::ConvertPolicy::SATURATE, act_info);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
- {
- fn = acl_common::generateLayer<arm_compute::CLArithmeticSubtraction>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
- arm_compute::ConvertPolicy::SATURATE, act_info);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
- {
- fn = acl_common::generateLayer<arm_compute::CLPixelWiseMultiplication>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), 1.0, // scale
- arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
- act_info);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
- {
- fn = acl_common::generateLayer<arm_compute::CLArithmeticDivision>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), act_info);
- break;
- }
- default:
- assert(false && "The BinaryArithmetic operation supports only binary arithmetic operations");
- break;
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Conv2D &node)
-{
- using ir::operation::Conv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- const auto stride = node.param().stride;
- const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
- ker_width, ker_height);
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
-
- const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
- const auto act_info = acl_common::asActivationLayerInfo(activation);
-
- auto fn = acl_common::generateLayer<arm_compute::CLConvolutionLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), ifm_tensor->handle(),
- ker_tensor->handle(), bias_tensor->handle(), ofm_tensor->handle(), conv_info,
- ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
-{
- using ir::operation::DepthwiseConv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [1, kernel_height, kernel_width, depth_out].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- const auto stride = node.param().stride;
- const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
- ker_width, ker_height);
- const auto multiplier = node.param().multiplier;
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
-
- const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
- const auto act_info = acl_common::asActivationLayerInfo(activation);
-
- {
- auto fn = acl_common::generateLayer<arm_compute::CLDepthwiseConvolutionLayer>(
- ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), ofm_tensor->handle(),
- conv_info, multiplier, act_info);
-
- _return_fn = asAclFunction(std::move(fn));
- }
-}
-
-void KernelGenerator::visit(const ir::operation::Concat &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- std::vector<ir::OperandIndex> input_indexes;
-
- for (const auto &input : node.getInputs())
- input_indexes.emplace_back(input);
-
- const auto axis = node.param().axis;
-
- // Concat elimination check
- bool eliminated = _tensor_builder->areSubTensorsOf(ofm_index, node.getInputs());
- if (eliminated)
- {
- // If concat eliminated, return a NOP IFunction
- VERBOSE(acl_cl_KernelGenerator_Concat) << "Concat eliminated" << std::endl;
- _return_fn = std::make_unique<exec::NopFunction>();
- return;
- }
-
- auto output_tensor = _tensor_reg->getAclTensor(ofm_index);
- std::vector<::arm_compute::ICLTensor *> input_tensors;
- for (auto &ifm_ind : input_indexes)
- input_tensors.emplace_back(_tensor_reg->getAclTensor(ifm_ind)->handle());
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- if (input_indexes.size() < 2)
- {
- fn = acl_common::generateLayer<arm_compute::CLCopy>(input_tensors.at(0),
- output_tensor->handle());
- }
- else
- {
- const auto rank = _ctx.at(ofm_index).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- const auto fixed_axis =
- acl_common::ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value();
- fn = acl_common::generateLayer<::arm_compute::CLConcatenateLayer>(
- input_tensors, output_tensor->handle(), fixed_axis);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::FullyConnected &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- const auto activation = node.param().activation;
-
- auto fn = acl_common::kernelGenFullyConnected<acl_common::AclFunction, ::arm_compute::ICLTensor,
- ::arm_compute::CLFullyConnectedReshapingLayer>(
- node, _ctx, _tensor_builder, _tensor_reg, _current_op_seq_layout);
- _return_fn = std::make_unique<exec::FunctionSequence>(
- std::move(fn), ActivationBuilder::generate(activation, output_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::Reduce &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto axes_index{node.getInputs().at(ir::operation::Reduce::Input::AXES)};
- const auto keep_dims{node.param().keep_dims};
- const auto reduce_type = node.param().reduce_type;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- // Convert to ACL axes taking into account negative values and possible duplicates.
- const auto &axes = _ctx.at(axes_index);
- const auto input_rank = _ctx.at(input_index).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = input_tensor->layout();
-
- std::unique_ptr<arm_compute::IFunction> fn;
- if (reduce_type == ir::operation::Reduce::ReduceType::MEAN)
- {
- const auto acl_axes =
- acl_common::asCoordinates(axes, input_rank, frontend_layout, backend_layout);
- fn = acl_common::generateLayer<arm_compute::CLReduceMean>(input_tensor->handle(), acl_axes,
- keep_dims, output_tensor->handle());
- }
- else
- {
- const auto acl_axes = acl_common::asSet(axes, input_rank, frontend_layout, backend_layout);
-
- fn = acl_common::generateLayer<arm_compute::CLReduceOperation>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- output_tensor->handle(), acl_axes, keep_dims, acl_common::convertReduceType(reduce_type));
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Reshape &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- // NOTE This operation must not be changed the layout from frontend to backend
- // So, PermutationOperationPass makes layouts of frontend and backend the same.
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- assert((_ctx.at(input_index).shape().rank() < 4 && _ctx.at(output_index).shape().rank() < 4) ||
- frontend_layout == backend_layout);
- UNUSED_RELEASE(frontend_layout);
- UNUSED_RELEASE(backend_layout);
-
- auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Squeeze &node)
-{
- // Squeeze is identical to reshape except that it has an optional dimensions input.
- // In addition, optional dims_index is ignored since output tensor already has squeezed shape
- // by freezer and toco
- // TODO Support multi-layout for frontend and backend
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
- const auto dims{node.param().dims};
- const auto ndim{node.param().ndim};
- (void)dims;
- (void)ndim;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Softmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Softmax::Input::INPUT)};
-
- const auto beta = node.param().beta;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLSoftmaxLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- output_tensor->handle(), beta);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Slice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Slice::Input::INPUT)};
- const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)};
- const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)};
-
- auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
- auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = inputData_tensor->layout();
-
- // Set initializers for indices data such as order of inputData
- int input_rank = _ctx.at(input_index).shape().rank();
- std::vector<int32_t> starts;
- std::vector<int32_t> ends;
- starts.resize(input_rank, 0);
- ends.resize(input_rank, 0);
- {
- assert(_ctx.at(begins_index).data());
- assert(_ctx.at(sizes_index).data());
- auto beginData_base = _ctx.at(begins_index).data()->base();
- auto sizeData_base = _ctx.at(sizes_index).data()->base();
- const int beginData_size = _ctx.at(begins_index).shape().num_elements();
- const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();
-
- using ir::DataType;
-
- UNUSED_RELEASE(beginData_size);
- UNUSED_RELEASE(sizeData_size);
-
- assert(_ctx.at(begins_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(sizes_index).typeInfo().type() == DataType::INT32);
- assert(beginData_size == input_rank);
- assert(sizeData_size == input_rank);
-
- assert(beginData_base != nullptr);
- for (int n = 0; n < input_rank; ++n)
- {
- auto axis = ::onert::backend::acl_common::ToARMComputeAxis(input_rank, n, frontend_layout,
- backend_layout)
- .value();
-
- int32_t begin_value = *(reinterpret_cast<const int32_t *>(beginData_base) + n);
- starts[axis] = begin_value;
-
- int32_t size_value = *(reinterpret_cast<const int32_t *>(sizeData_base) + n);
- ends[axis] = begin_value + size_value;
- }
- }
-
- ::arm_compute::Coordinates starts_set;
- ::arm_compute::Coordinates ends_set;
-
- for (size_t i = 0; i < starts.size(); ++i)
- {
- starts_set.set(i, starts[i]);
- ends_set.set(i, ends[i]);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::CLSlice>(
- inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::StridedSlice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
- const auto starts_index{node.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
- const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
- const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
-
- auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
- auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = inputData_tensor->layout();
-
- // Set initializers for indices data such as order of inputData
- int input_rank = _ctx.at(input_index).shape().rank();
- std::vector<int32_t> starts;
- std::vector<int32_t> ends;
- std::vector<int32_t> strides;
- starts.resize(input_rank, 0);
- ends.resize(input_rank, 0);
- strides.resize(input_rank, 0);
- {
- assert(_ctx.at(starts_index).data());
- assert(_ctx.at(ends_index).data());
- assert(_ctx.at(strides_index).data());
- auto startData_base = _ctx.at(starts_index).data()->base();
- auto endData_base = _ctx.at(ends_index).data()->base();
- auto stridesData_base = _ctx.at(strides_index).data()->base();
- const int startData_size = _ctx.at(starts_index).shape().num_elements();
- const int endData_size = _ctx.at(ends_index).shape().num_elements();
- const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
-
- using ir::DataType;
-
- UNUSED_RELEASE(startData_size);
- UNUSED_RELEASE(endData_size);
- UNUSED_RELEASE(stridesData_size);
-
- assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
- assert(startData_size == input_rank);
- assert(endData_size == input_rank);
- assert(stridesData_size == input_rank);
-
- assert(startData_base != nullptr);
- for (int n = 0; n < input_rank; ++n)
- {
- auto axis = ::onert::backend::acl_common::ToARMComputeAxis(input_rank, n, frontend_layout,
- backend_layout)
- .value();
-
- int32_t start_value = *(reinterpret_cast<const int32_t *>(startData_base) + n);
- starts[axis] = start_value;
-
- int32_t end_value = *(reinterpret_cast<const int32_t *>(endData_base) + n);
- ends[axis] = end_value;
-
- int32_t strides_value = *(reinterpret_cast<const int32_t *>(stridesData_base) + n);
- strides[axis] = strides_value;
- }
- }
-
- // Set mask bits such as order of inputData
- const auto begin_mask = acl_common::ReorderBits<int32_t>(node.param().begin_mask, input_rank,
- frontend_layout, backend_layout);
- const auto end_mask = acl_common::ReorderBits<int32_t>(node.param().end_mask, input_rank,
- frontend_layout, backend_layout);
- const auto shrink_axis_mask = acl_common::ReorderBits<int32_t>(
- node.param().shrink_axis_mask, input_rank, frontend_layout, backend_layout);
-
- ::arm_compute::Coordinates starts_set;
- ::arm_compute::Coordinates ends_set;
- ::arm_compute::BiStrides strides_set;
-
- for (size_t i = 0; i < starts.size(); ++i)
- {
- starts_set.set(i, starts[i]);
- ends_set.set(i, ends[i]);
- strides_set.set(i, strides[i]);
- }
-
- // Disable applied dim_correction
- if (inputData_tensor->num_dimensions() != inputData_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(inputData_tensor);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::CLStridedSlice>(
- inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set, strides_set,
- begin_mask, end_mask, shrink_axis_mask);
-
- // Revert disabling applied dim_correction
- if (inputData_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(inputData_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Transpose &node)
-{
- const auto ofm_idx{node.getOutputs().at(0)};
- const auto ifm_idx{node.getInputs().at(ir::operation::Transpose::Input::INPUT)};
- const auto perm_idx{node.getInputs().at(ir::operation::Transpose::Input::PERMUTATION)};
-
- const auto rank = _ctx.at(ifm_idx).shape().rank();
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_idx);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_idx);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = ifm_tensor->layout();
-
- const auto &perms = _ctx.at(perm_idx);
- std::vector<int32_t> pv;
- if (perms.shape() == ir::Shape{0})
- {
- pv.resize(rank);
- std::iota(pv.begin(), pv.end(), 0);
- std::reverse(pv.begin(), pv.end());
- }
- else
- {
- pv = _ctx.at(perm_idx).asVector<int32_t>();
- }
-
- std::unique_ptr<arm_compute::IFunction> fn;
- if (rank == 1)
- {
- fn = acl_common::generateLayer<arm_compute::CLCopy>(ifm_tensor->handle(), ofm_tensor->handle());
- }
- else if (rank == 2)
- {
- assert(pv.size() == 2 && pv.at(0) == 1 && pv.at(1) == 0);
- fn = acl_common::generateLayer<arm_compute::CLTranspose>(ifm_tensor->handle(),
- ofm_tensor->handle());
- }
- else
- {
- auto backend_pv =
- acl_common::getARMComputePermutationVector(rank, pv, frontend_layout, backend_layout);
-
- fn = acl_common::generateLayer<arm_compute::CLPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), backend_pv);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseActivation &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ElementwiseActivation::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const ::arm_compute::ActivationLayerInfo act_info = acl_common::asActivationLayerInfo(
- node.param().op_type, node.param().alpha, node.param().beta);
-
- auto fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), act_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseBinary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().op_type)
- {
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND:
- {
- fn = acl_common::generateLayer<arm_compute::CLBinaryLogicalOp>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle(),
- arm_compute::BinaryLogicalOperation::AND);
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR:
- {
- fn = acl_common::generateLayer<arm_compute::CLBitwiseOr>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX:
- {
- fn = acl_common::generateLayer<arm_compute::CLElementwiseMax>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN:
- {
- fn = acl_common::generateLayer<arm_compute::CLElementwiseMin>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- default:
- {
- std::string err_msg("acl_cl KernelGenerator : " + node.name() +
- "is not elementwise-binary operations");
- assert(false && err_msg.c_str());
- break;
- }
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseUnary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().op_type)
- {
- case ir::operation::ElementwiseUnary::Type::ABS:
- {
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::ABS};
-
- fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
- input_tensor->handle(), output_tensor->handle(), act_info);
- break;
- }
- case ir::operation::ElementwiseUnary::Type::CAST:
- {
- if (input_tensor->data_type() == output_tensor->data_type())
- {
- fn = acl_common::generateLayer<arm_compute::CLCopy>(input_tensor->handle(),
- output_tensor->handle());
- }
- else if (_ctx.at(input_index).typeInfo().type() == ir::DataType::BOOL8)
- {
- fn = acl_common::generateLayer<arm_compute::CLCastBool>(input_tensor->handle(),
- output_tensor->handle());
- }
- else
- {
- // TODO Support converting float to int32 as round down
- fn = acl_common::generateLayer<arm_compute::CLCast>(
- input_tensor->handle(), output_tensor->handle(), arm_compute::ConvertPolicy::SATURATE);
- }
- break;
- }
- case ir::operation::ElementwiseUnary::Type::DEQUANTIZE:
- {
- fn = acl_common::generateLayer<arm_compute::CLDequantizationLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::EXP:
- {
- fn = acl_common::generateLayer<arm_compute::CLExpLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::FLOOR:
- {
- fn = acl_common::generateLayer<arm_compute::CLFloor>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::LOGICAL_NOT:
- {
- fn = acl_common::generateLayer<arm_compute::CLBitwiseNot>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::NEG:
- {
- fn = acl_common::generateLayer<arm_compute::CLNeg>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::RSQRT:
- {
- fn = acl_common::generateLayer<arm_compute::CLRsqrtLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::SQRT:
- {
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT};
-
- fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
- input_tensor->handle(), output_tensor->handle(), act_info);
- break;
- }
- default:
- {
- throw std::runtime_error("acl_cl KernelGenerator : " + node.name() + "is not supported yet");
- break;
- }
- }
-
- auto acl_fn = asAclFunction(std::move(fn));
-
- _return_fn = std::move(acl_fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ExpandDims &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::InstanceNorm &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::InstanceNorm::Input::INPUT)};
- const auto gamma_index{node.getInputs().at(ir::operation::InstanceNorm::Input::GAMMA)};
- const auto beta_index{node.getInputs().at(ir::operation::InstanceNorm::Input::BETA)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto gamma_tensor = _tensor_reg->getAclTensor(gamma_index);
- auto beta_tensor = _tensor_reg->getAclTensor(beta_index);
- auto epsilon = node.param().epsilon;
- auto activation = node.param().activation;
-
- auto fn = acl_common::generateLayer<arm_compute::CLInstanceNormalizationLayerEx>(
- ifm_tensor->handle(), ofm_tensor->handle(), gamma_tensor->handle(), beta_tensor->handle(),
- epsilon);
-
- _return_fn = std::make_unique<exec::FunctionSequence>(
- asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::LSTM &node)
-{
- _return_fn = acl_common::kernelGenLSTM<acl_common::AclFunction, ::arm_compute::ICLTensor,
- ::arm_compute::CLLSTMLayer>(node, _ctx, _tensor_reg);
-}
-
-void KernelGenerator::visit(const ir::operation::Comparison &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input0_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT0)};
- const auto input1_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT1)};
-
- const auto comparison_type = node.param().comparison_type;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input0_tensor = _tensor_reg->getAclTensor(input0_index);
- auto input1_tensor = _tensor_reg->getAclTensor(input1_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLComparison>(
- input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle(),
- (arm_compute::ComparisonOperation)comparison_type);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::OneHot &node)
-{
- const auto output_idx{node.getOutputs().at(0)};
- const auto indices_idx{node.getInputs().at(ir::operation::OneHot::Input::INDICES)};
- const auto depth_idx{node.getInputs().at(ir::operation::OneHot::Input::DEPTH)};
- const auto onvalue_idx{node.getInputs().at(ir::operation::OneHot::Input::ON_VALUE)};
- const auto offvalue_idx{node.getInputs().at(ir::operation::OneHot::Input::OFF_VALUE)};
- const auto depth = _ctx.at(depth_idx).asScalar<int32_t>();
- assert(depth > 0);
-
- auto output_tensor = _tensor_reg->getAclTensor(output_idx);
- auto indices_tensor = _tensor_reg->getAclTensor(indices_idx);
- auto onvalue_tensor = _tensor_reg->getAclTensor(onvalue_idx);
-
- const size_t output_rank = _ctx.at(output_idx).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- int32_t axis = node.param().axis == -1 ? output_rank - 1 : node.param().axis;
- axis = acl_common::ToARMComputeAxis(output_rank, axis, frontend_layout, backend_layout).value();
-
- if (output_tensor->num_dimensions() != output_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and output_tensor is applied dim_correction
- acl_common::disableDimCorrection(output_tensor);
- }
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- const auto &offvalue = _ctx.at(offvalue_idx);
- if (offvalue.isConstant())
- {
- fn = acl_common::generateLayer<arm_compute::CLOneHot>(
- indices_tensor->handle(), onvalue_tensor->handle(), output_tensor->handle(),
- acl_common::asPixelValue(offvalue), static_cast<uint32_t>(depth), axis);
- }
- else
- {
- auto offvalue_tensor = _tensor_reg->getAclTensor(offvalue_idx);
- fn = acl_common::generateLayer<arm_compute::CLOneHot>(
- indices_tensor->handle(), onvalue_tensor->handle(), offvalue_tensor->handle(),
- output_tensor->handle(), static_cast<uint32_t>(depth), axis);
- }
-
- if (output_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(output_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Pack &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- auto axis{node.param().axis};
-
- const auto output_rank = _ctx.at(output_index).shape().rank();
-
- std::vector<ir::OperandIndex> input_indexes;
- for (const auto &input_index : node.getInputs())
- input_indexes.emplace_back(input_index);
-
- auto output = _tensor_reg->getAclTensor(output_index)->handle();
- std::vector<arm_compute::ICLTensor *> inputs;
- for (const auto &input_index : input_indexes)
- inputs.emplace_back(_tensor_reg->getAclTensor(input_index)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(output_index)->layout();
-
- if (axis < 0)
- axis += output_rank;
- axis = acl_common::ToARMComputeAxis(output_rank, axis, frontend_layout, backend_layout).value();
-
- // Disable applied dim_correction
- for (const auto &input_index : input_indexes)
- {
- const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
- }
-
- auto fn = acl_common::generateLayer<arm_compute::CLStackLayer>(inputs, axis, output);
-
- // Revert disabling applied dim_correction
- for (const auto &input_index : input_indexes)
- {
- const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
- if (input_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(input_tensor);
- }
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Pool2D &node)
-{
- auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::CLPoolingLayer>(
- node, _ctx, _tensor_reg, _current_op_seq_layout,
- acl_common::convertPoolType(node.param().op_type));
-
- const auto ofm_index{node.getOutputs().at(0)};
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- const auto activation = node.param().activation;
- _return_fn = std::make_unique<exec::FunctionSequence>(
- asAclFunction(std::move(raw_fn)),
- ActivationBuilder::generate(activation, ofm_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::Permute &node)
-{
- const auto ofm_idx{node.getOutputs().at(0)};
- const auto ifm_idx{node.getInputs().at(0)};
- const auto permute_type = node.getPermuteType();
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_idx);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_idx);
- const auto rank = _ctx.at(ofm_idx).shape().rank();
- assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank());
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- arm_compute::PermutationVector pv;
- if (permute_type == ir::operation::Permute::Type::NCHW_TO_NHWC && rank == 4)
- {
- // WHCN -> CWHN
- pv = arm_compute::PermutationVector{2, 0, 1};
-
- fn = acl_common::generateLayer<arm_compute::CLPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), pv);
- }
- else if (permute_type == ir::operation::Permute::Type::NHWC_TO_NCHW && rank == 4)
- {
- // CWHN -> WHCN
- pv = arm_compute::PermutationVector{1, 2, 0};
-
- fn = acl_common::generateLayer<::arm_compute::CLPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), pv);
- }
- else
- {
- fn = acl_common::generateLayer<arm_compute::CLCopy>(ifm_tensor->handle(), ofm_tensor->handle());
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ResizeBilinear &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLScale>(
- ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::InterpolationPolicy::BILINEAR,
- ::arm_compute::BorderMode::REPLICATE, ::arm_compute::PixelValue(0.f),
- ::arm_compute::SamplingPolicy::TOP_LEFT);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ResizeNearestNeighbor &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ResizeNearestNeighbor::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLScale>(
- ifm_tensor->handle(), ofm_tensor->handle(),
- ::arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR, ::arm_compute::BorderMode::REPLICATE,
- ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::RNN &node)
-{
- const auto output_index{node.getOutputs().at(ir::operation::RNN::Output::OUTPUT)};
- const auto hidden_state_out_index{
- node.getOutputs().at(ir::operation::RNN::Output::HIDDEN_STATE_OUT)};
-
- const auto input_index{node.getInputs().at(ir::operation::RNN::Input::INPUT)};
- const auto weights_index{node.getInputs().at(ir::operation::RNN::Input::WEIGHTS)};
- const auto recurrent_weights_index{
- node.getInputs().at(ir::operation::RNN::Input::RECURRENT_WEIGHTS)};
- const auto bias_index{node.getInputs().at(ir::operation::RNN::Input::BIAS)};
- const auto hidden_state_in_index{node.getInputs().at(ir::operation::RNN::Input::HIDDEN_STATE_IN)};
-
- const auto activation = node.param().activation;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto hidden_state_out_tensor = _tensor_reg->getAclTensor(hidden_state_out_index);
-
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- auto weights_tensor = _tensor_reg->getAclTensor(weights_index);
- auto recurrent_weights_tensor = _tensor_reg->getAclTensor(recurrent_weights_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
- auto hidden_state_in_tensor = _tensor_reg->getAclTensor(hidden_state_in_index);
- auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation);
-
- auto copy_layer = acl_common::generateLayer<arm_compute::CLCopy>(
- hidden_state_in_tensor->handle(), hidden_state_out_tensor->handle());
- _return_fn = asAclFunction(std::move(copy_layer));
-
- auto fn = acl_common::generateLayer<arm_compute::CLRNNLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- weights_tensor->handle(), recurrent_weights_tensor->handle(), bias_tensor->handle(),
- hidden_state_out_tensor->handle(), output_tensor->handle(), act_info);
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto block_size_tensor = _tensor_reg->getAclTensor(block_size_index);
- auto paddings_tensor = _tensor_reg->getAclTensor(paddings_index);
-
- assert(_ctx.at(block_size_index).data());
- assert(_ctx.at(paddings_index).data());
-
- auto fn = acl_common::generateLayer<arm_compute::CLSpaceToBatchLayer>(
- ifm_tensor->handle(), block_size_tensor->handle(), paddings_tensor->handle(),
- ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToDepth &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToDepth::Input::INPUT)};
-
- auto block_size = node.param().block_size;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLSpaceToDepthLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), block_size);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::EmbeddingLookup &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)};
- const auto values_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::VALUES)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
- auto values_tensor = _tensor_reg->getAclTensor(values_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLEmbeddingLookup>(
- values_tensor->handle(), output_tensor->handle(), lookups_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::L2Normalization &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::L2Normalization::Input::INPUT)};
-
- // {CL|Neon}L2Normalization performs the reduction only along dimension 0
- // L2 Normalization always performs the reduction along the depth axis
- // Thus, we repurpose {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by
- // choosing normalization parameters as below
-
- const auto &ifm_shape = _ctx.at(ifm_index).shape();
- // TODO Support optional constant dimension that normalization would be performed on
- const auto normalization_axis = _ctx.at(ifm_index).shape().rank() - 1;
- int32_t radius =
- 2 * ifm_shape.dim(normalization_axis) + 1; // normSize = depth(last dimension) * 2 + 1
- float alpha = 1.0f; // In the implementation to make alpha_ become 1
- float beta = 0.5f; // pow(reduction, -0.5) = 1 / sqrt(reduction)
- float bias = 0.0f; // Don't offset the reduction.
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP,
- radius, alpha, beta, bias, false);
-
- auto fn = acl_common::generateLayer<arm_compute::CLNormalizationLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::HashtableLookup &node)
-{
- const auto output_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::OUTPUT)};
- const auto hits_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::HITS)};
-
- const auto lookups_index{node.getInputs().at(ir::operation::HashtableLookup::Input::LOOKUPS)};
- const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)};
- const auto values_index{node.getInputs().at(ir::operation::HashtableLookup::Input::VALUES)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto hits_tensor = _tensor_reg->getAclTensor(hits_index);
-
- auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
- auto keys_tensor = _tensor_reg->getAclTensor(keys_index);
- auto values_tensor = _tensor_reg->getAclTensor(values_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLHashtableLookup>(
- lookups_tensor->handle(), keys_tensor->handle(), values_tensor->handle(),
- output_tensor->handle(), hits_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::PReLU &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::PReLU::Input::INPUT)};
- const auto alpha_index{node.getInputs().at(ir::operation::PReLU::Input::ALPHA)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto alpha_tensor = _tensor_reg->getAclTensor(alpha_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLPReluLayer>(
- ifm_tensor->handle(), alpha_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::TransposeConv &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ker_index{node.getInputs().at(ir::operation::TransposeConv::Input::KERNEL)};
- const auto ifm_index{node.getInputs().at(ir::operation::TransposeConv::Input::INPUT)};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ker_shape = _ctx.at(ker_index).shape().asFeature(_current_op_seq_layout);
-
- const auto stride = node.param().stride;
-
- assert((node.param().padding.type == ir::PaddingType::SAME) ||
- (node.param().padding.type == ir::PaddingType::VALID));
- auto padding = ir::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
- ker_shape.W, ker_shape.H);
- uint32_t invalid_horizontal = 0;
- uint32_t invalid_vertical = 0;
- if (node.param().padding.type == ir::PaddingType::VALID)
- {
- invalid_horizontal =
- ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
- invalid_vertical = ofm_shape.H - (1 + (ifm_shape.H - 1) * stride.vertical) - (ker_shape.H - 1);
- }
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
-
- const auto tconv_info = acl_common::asPadStrideInfo(padding, stride);
-
- auto fn = acl_common::generateLayer<arm_compute::CLTransposeConvLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), ifm_tensor->handle(),
- ker_tensor->handle(), nullptr, ofm_tensor->handle(), tconv_info, invalid_horizontal,
- invalid_vertical);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SquaredDifference &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLElementwiseSquaredDiff>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::TopKV2 &node)
-{
- const auto outputValues_index{node.getOutputs().at(ir::operation::TopKV2::Output::OUTPUT_VALUES)};
- const auto outputIndices_index{
- node.getOutputs().at(ir::operation::TopKV2::Output::OUTPUT_INDICES)};
-
- const auto inputData_index{node.getInputs().at(ir::operation::TopKV2::Input::INPUT)};
-
- // Currently, we only support the vector input.
- assert(_ctx.at(inputData_index).shape().rank() == 1 ||
- _ctx.at(inputData_index).shape().rank() == 2);
-
- const auto k = node.param().k;
-
- auto values_tensor = _tensor_reg->getAclTensor(outputValues_index);
- auto indices_tensor = _tensor_reg->getAclTensor(outputIndices_index);
- auto input_tensor = _tensor_reg->getAclTensor(inputData_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLTopKV2>(
- input_tensor->handle(), k, values_tensor->handle(), indices_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Gather &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- const auto ifm_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)};
-
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- const auto axis_raw = node.param().axis;
- const auto axis_value = (axis_raw < 0 ? (ifm_rank + axis_raw) : axis_raw);
- const int axis = ::onert::backend::acl_common::ToARMComputeAxis(ifm_rank, axis_value).value();
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto indices_tensor = _tensor_reg->getAclTensor(indices_index);
-
- // NOTE The frontend layout and backend layout must be the same for this operation.
- // If not the same, we have to add a stage(?) to perform permutation of output tensor. It
- // is not not efficient even if it works well. If so, it would be better to set the
- // layout of these backend tensors to the same layout.
- // There is also one thing we have to think about. This operation depends on the layout of
- // a model. For example, if a model in NHWC has this operation as output rank == 4, indices
- // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W
- // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case.
- const auto backend_layout = ofm_tensor->layout();
- UNUSED_RELEASE(backend_layout);
- assert(backend_layout == ifm_tensor->layout());
- assert(backend_layout == indices_tensor->layout());
- assert(ifm_rank < 4 || _current_op_seq_layout == backend_layout);
-
- // input is n-D, indices k-D, output is (n + k - 1)-D
- size_t n = ifm_rank;
- assert(n == ifm_tensor->num_dimensions());
- size_t k = _ctx.at(indices_index).shape().rank();
- assert(k == indices_tensor->num_dimensions());
-
- // Disable applied dim_correction
- if (n != ifm_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and ifm tensor is applied dim_correction
- acl_common::disableDimCorrection(ifm_tensor);
- }
- if (k != indices_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and indices tensor is applied dim_correction
- acl_common::disableDimCorrection(indices_tensor);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::CLGatherEx>(
- ifm_tensor->handle(), indices_tensor->handle(), ofm_tensor->handle(), axis);
-
- // Revert disabling applied dim_correction
- if (ifm_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(ifm_tensor);
- }
- if (indices_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(indices_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ArgMax &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ArgMax::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::ArgMax::Input::AXIS)};
-
- auto ifm_shape = _ctx.at(ifm_index).shape();
- auto ofm_shape = _ctx.at(ofm_index).shape();
-
- assert((ifm_shape.rank() - 1) == ofm_shape.rank());
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- auto frontend_layout = _current_op_seq_layout;
- auto backend_layout = ifm_tensor->layout();
-
- int axis_value = _ctx.at(axis_index).asScalar<int32_t>();
- if (axis_value < 0)
- {
- axis_value += ifm_rank;
- }
-
- auto acl_axis =
- acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout).value();
-
- auto fn = acl_common::generateLayer<arm_compute::CLArgMinMaxLayerEx>(
- ifm_tensor->handle(), acl_axis, ofm_tensor->handle(),
- ::arm_compute::ReductionOperation::ARG_IDX_MAX);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::LocalResponseNormalization &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{
- node.getInputs().at(ir::operation::LocalResponseNormalization::Input::INPUT)};
-
- auto radius = node.param().radius;
- auto alpha = node.param().alpha;
- auto beta = node.param().beta;
- auto bias = node.param().bias;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const auto norm_info = ::arm_compute::NormalizationLayerInfo(
- ::arm_compute::NormType::CROSS_MAP, radius * 2 + 1, alpha, beta, bias, false);
-
- auto fn = acl_common::generateLayer<arm_compute::CLNormalizationLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::DepthToSpace &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::DepthToSpace::Input::INPUT)};
-
- auto block_size = node.param().block_size;
- assert(block_size > 0);
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLDepthToSpaceLayer>(
- input_tensor->handle(), output_tensor->handle(), block_size);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Split &node)
-{
- const auto ifm_index{node.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Split::Input::AXIS)};
-
- assert(node.param().num_splits == static_cast<int>(node.getOutputs().size()));
- if (!_ctx.at(axis_index).isConstant())
- {
- throw std::runtime_error("Non-constant axis_index NYI for acl_cl backend");
- }
-
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- std::vector<ir::OperandIndex> output_indexes;
- for (const auto &output : node.getOutputs())
- output_indexes.emplace_back(output);
-
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- std::vector<arm_compute::ICLTensor *> output_tensors;
- for (const auto &ofm_ind : output_indexes)
- output_tensors.emplace_back(_tensor_reg->getAclTensor(ofm_ind)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = ifm_tensor->layout();
- auto axis = _ctx.at(axis_index).asScalar<int32_t>();
- if (axis < 0)
- axis += ifm_rank;
- axis = acl_common::ToARMComputeAxis(ifm_rank, axis, frontend_layout, backend_layout).value();
-
- auto fn =
- acl_common::generateLayer<arm_compute::CLSplit>(ifm_tensor->handle(), output_tensors, axis);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SplitV &node)
-{
- const auto ifm_index{node.getInputs().at(ir::operation::SplitV::Input::INPUT)};
- const auto size_split_index{node.getInputs().at(ir::operation::SplitV::Input::SIZE_SPLITS)};
- const auto split_dim_index{node.getInputs().at(ir::operation::SplitV::Input::SPLIT_DIM)};
-
- assert(node.param().num_splits == static_cast<int>(node.getOutputs().size()));
-
- const size_t ifm_rank = _ctx.at(ifm_index).shape().rank();
- std::vector<ir::OperandIndex> output_indexes;
- for (const auto &output : node.getOutputs())
- output_indexes.emplace_back(output);
-
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto size_split_tensor = _tensor_reg->getAclTensor(size_split_index);
-
- std::vector<arm_compute::ICLTensor *> output_tensors;
- for (const auto &ofm_ind : output_indexes)
- output_tensors.emplace_back(_tensor_reg->getAclTensor(ofm_ind)->handle());
-
- auto fn = std::make_unique<arm_compute::CLSplitVEx>();
- const auto &split_dim_op = _ctx.at(split_dim_index);
- if (split_dim_op.isConstant())
- {
- int32_t split_dim = split_dim_op.asScalar<int32_t>();
- uint32_t split_dim_revised = (split_dim < 0) ? (split_dim + ifm_rank) : split_dim;
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = ifm_tensor->layout();
-
- if (ifm_tensor->num_dimensions() != ifm_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and ifm tensor is applied dim_correction
- acl_common::disableDimCorrection(ifm_tensor);
- }
-
- split_dim_revised =
- acl_common::ToARMComputeAxis(ifm_rank, split_dim_revised, frontend_layout, backend_layout)
- .value();
- fn->configure(ifm_tensor->handle(), size_split_tensor->handle(), split_dim_revised,
- output_tensors, node.param().num_splits);
-
- if (ifm_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(ifm_tensor);
- }
- }
- else
- {
- throw std::runtime_error("Non-constant split_dim NYI for acl_cl backend");
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Unpack &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::Unpack::Input::INPUT)};
- auto axis{node.param().axis};
-
- const auto input_rank = _ctx.at(input_index).shape().rank();
-
- std::vector<ir::OperandIndex> output_indexes;
- for (const auto &output_index : node.getOutputs())
- output_indexes.emplace_back(output_index);
-
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- std::vector<arm_compute::ICLTensor *> outputs;
- for (const auto &output_index : output_indexes)
- outputs.emplace_back(_tensor_reg->getAclTensor(output_index)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(input_index)->layout();
- if (axis < 0)
- axis += input_rank;
- axis = acl_common::ToARMComputeAxis(input_rank, axis, frontend_layout, backend_layout).value();
-
- // Disable applied dim_correction
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
-
- auto fn =
- acl_common::generateLayer<arm_compute::CLUnstack>(input_tensor->handle(), outputs, axis);
-
- // Revert disabling applied dim_correction
- if (input_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(input_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Pad &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::Pad::Input::INPUT)};
- const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
- const auto output_index{node.getOutputs().at(0)};
- assert(_ctx.at(pad_index).data());
-
- auto rank = _ctx.at(input_index).shape().rank();
- auto pad_base = _ctx.at(pad_index).data()->base();
-
- auto input_type = _ctx.at(input_index).typeInfo();
- auto data_type = acl_common::asDataType(input_type.type());
- auto quant_info = ::arm_compute::QuantizationInfo(input_type.scale(), input_type.offset());
- const auto pixel_value = ::arm_compute::PixelValue(0, data_type, quant_info);
-
- auto input = _tensor_reg->getAclTensor(input_index)->handle();
- auto output = _tensor_reg->getAclTensor(output_index)->handle();
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(input_index)->layout();
-
- ::arm_compute::PaddingList padding_list;
- padding_list.resize(rank);
- for (int32_t n = 0; n < rank; ++n)
- {
- const int32_t *from = reinterpret_cast<const int32_t *>(pad_base) + (n * 2);
-
- const auto axis =
- acl_common::ToARMComputeAxis(rank, n, frontend_layout, backend_layout).value();
- padding_list[axis] = ::arm_compute::PaddingInfo{from[0], from[1]};
- }
-
- // Disable applied dim_correction
- const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
-
- auto fn =
- acl_common::generateLayer<arm_compute::CLPadLayer>(input, output, padding_list, pixel_value);
-
- // NOTE Do not revert disabling applied dim_correction for 4D.
- // It would produce a mistach of result by incorrect offset_first_element in
- // ICLKernel::add_tensor_argument<3>().
- // We have to disable applied dim_correction and not to revert enabling for the kernel that slices
- // 4D to 3D because slicing arm_compute::Window can causes incorrect offset_first_element if the
- // used tensor is 4D and the tensor's high dimention is 1
- if (input_tensor->num_dimensions() < 4 && input_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(input_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ConvertFp32ToFp16 &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ConvertFp32ToFp16::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLDepthConvertLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ConvertFp16ToFp32 &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ConvertFp16ToFp32::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::CLDepthConvertLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Reverse &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::Reverse::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Reverse::Input::AXIS)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto axis_tensor = _tensor_reg->getAclTensor(axis_index);
-
- // WORKAROUND: acl-cl backend only allow U32 type for axis
- // ConstantInitializer will resolve S32 type to U32 type
- if (_ctx.at(axis_index).isConstant() &&
- (axis_tensor->handle()->info()->data_type() == arm_compute::DataType::S32))
- {
- axis_tensor->handle()->info()->set_data_type(arm_compute::DataType::U32);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::CLReverse>(
- ifm_tensor->handle(), ofm_tensor->handle(), axis_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/KernelGenerator.h b/runtime/onert/backend/acl_cl/KernelGenerator.h
deleted file mode 100644
index e8a922677..000000000
--- a/runtime/onert/backend/acl_cl/KernelGenerator.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_KERNEL_GENERATOR_H__
-#define __ONERT_BACKEND_ACL_CL_KERNEL_GENERATOR_H__
-
-#include <backend/IKernelGenerator.h>
-
-#include "ir/Operands.h"
-#include "TensorBuilder.h"
-#include "AclTensorRegistry.h"
-#include "TensorManager.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class KernelGenerator : public IKernelGenerator
-{
-public:
- KernelGenerator(const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> &_tensor_reg);
-
- void visit(const ir::OpSequence &) override;
- void visit(const ir::operation::BatchToSpaceND &) override;
- void visit(const ir::operation::BinaryArithmetic &) override;
- void visit(const ir::operation::Conv2D &) override;
- void visit(const ir::operation::DepthwiseConv2D &) override;
- void visit(const ir::operation::Concat &) override;
- void visit(const ir::operation::FullyConnected &) override;
- void visit(const ir::operation::Reduce &) override;
- void visit(const ir::operation::Reshape &) override;
- void visit(const ir::operation::Squeeze &) override;
- void visit(const ir::operation::Softmax &) override;
- void visit(const ir::operation::Slice &) override;
- void visit(const ir::operation::StridedSlice &) override;
- void visit(const ir::operation::Transpose &) override;
- void visit(const ir::operation::ElementwiseActivation &) override;
- void visit(const ir::operation::ElementwiseBinary &) override;
- void visit(const ir::operation::ElementwiseUnary &) override;
- void visit(const ir::operation::ExpandDims &) override;
- void visit(const ir::operation::InstanceNorm &) override;
- void visit(const ir::operation::Comparison &) override;
- void visit(const ir::operation::LSTM &) override;
- void visit(const ir::operation::OneHot &) override;
- void visit(const ir::operation::Pack &) override;
- void visit(const ir::operation::Pool2D &) override;
- void visit(const ir::operation::Permute &) override;
- void visit(const ir::operation::ResizeBilinear &) override;
- void visit(const ir::operation::ResizeNearestNeighbor &) override;
- void visit(const ir::operation::RNN &) override;
- void visit(const ir::operation::SpaceToBatchND &) override;
- void visit(const ir::operation::SpaceToDepth &) override;
- void visit(const ir::operation::EmbeddingLookup &) override;
- void visit(const ir::operation::L2Normalization &) override;
- void visit(const ir::operation::HashtableLookup &) override;
- void visit(const ir::operation::PReLU &) override;
- void visit(const ir::operation::TransposeConv &) override;
- void visit(const ir::operation::SquaredDifference &) override;
- void visit(const ir::operation::TopKV2 &) override;
- void visit(const ir::operation::Gather &) override;
- void visit(const ir::operation::ArgMax &) override;
- void visit(const ir::operation::LocalResponseNormalization &) override;
- void visit(const ir::operation::DepthToSpace &) override;
- void visit(const ir::operation::Split &) override;
- void visit(const ir::operation::SplitV &) override;
- void visit(const ir::operation::Unpack &) override;
- void visit(const ir::operation::Pad &) override;
- void visit(const ir::operation::ConvertFp32ToFp16 &) override;
- void visit(const ir::operation::ConvertFp16ToFp32 &) override;
- void visit(const ir::operation::Reverse &) override;
-
-private:
- const ir::Operands &_ctx;
- const ir::Operations &_operations_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
- std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> _tensor_reg;
- ir::Layout _current_op_seq_layout;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_KERNEL_GENERATOR_H__
diff --git a/runtime/onert/backend/acl_cl/Optimizer.cc b/runtime/onert/backend/acl_cl/Optimizer.cc
deleted file mode 100644
index 9134d3fb8..000000000
--- a/runtime/onert/backend/acl_cl/Optimizer.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Optimizer.h"
-
-#include "ParentInfo.h"
-
-#include <cassert>
-#include <compiler/LoweredGraph.h>
-#include <util/logging.h>
-#include "AclSubTensorAnalyzer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-Optimizer::Optimizer(BackendContext *context)
- : _context{context},
- _tensor_builder{std::dynamic_pointer_cast<TensorBuilder>(context->tensor_builder)}
-{
- assert(context);
-}
-
-void Optimizer::optimize()
-{
- // Concat elimination (build subtensor info)
- {
- acl_common::AclSubTensorAnalyzer sa{*_context->graph()};
- for (auto op_info : _context->operation_list())
- {
- auto &op = _context->graph()->operations().at(op_info.index);
- sa.setLayout(op_info.layout);
- op.accept(sa);
- }
-
- _tensor_builder->parent_map(sa.releaseParentMap());
- }
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/Optimizer.h b/runtime/onert/backend/acl_cl/Optimizer.h
deleted file mode 100644
index 18d38ec1b..000000000
--- a/runtime/onert/backend/acl_cl/Optimizer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_OPTIMIZER_H__
-#define __ONERT_BACKEND_ACL_CL_OPTIMIZER_H__
-
-#include <backend/IOptimizer.h>
-#include <backend/BackendContext.h>
-#include "TensorBuilder.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class Optimizer : public IOptimizer
-{
-public:
- Optimizer(BackendContext *context);
-
- void optimize() override;
-
-private:
- BackendContext *_context;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_OPTIMIZER_H__
diff --git a/runtime/onert/backend/acl_cl/TensorBuilder.h b/runtime/onert/backend/acl_cl/TensorBuilder.h
deleted file mode 100644
index 91502d39a..000000000
--- a/runtime/onert/backend/acl_cl/TensorBuilder.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_TENSOR_BUILDER_H__
-#define __ONERT_BACKEND_ACL_CL_TENSOR_BUILDER_H__
-
-#include <AclTensorBuilder.h>
-
-#include "operand/CLTensor.h"
-#include "operand/CLSubTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-using TensorBuilder =
- acl_common::AclTensorBuilder<operand::ICLTensor, operand::CLTensor, operand::CLSubTensor>;
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_TENSOR_BUILDER_H__
diff --git a/runtime/onert/backend/acl_cl/TensorManager.h b/runtime/onert/backend/acl_cl/TensorManager.h
deleted file mode 100644
index ab295dbec..000000000
--- a/runtime/onert/backend/acl_cl/TensorManager.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_TENSOR_MANAGER_H__
-#define __ONERT_BACKEND_ACL_CL_TENSOR_MANAGER_H__
-
-#include <arm_compute/runtime/CL/CLBufferAllocator.h>
-#include <arm_compute/runtime/PoolManager.h>
-#include <arm_compute/runtime/BlobLifetimeManager.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-#include <arm_compute/runtime/MemoryGroup.h>
-
-#include <AclMemoryManager.h>
-#include <AclLinearMemoryManager.h>
-#include <AclInternalBufferManager.h>
-#include <AclTensorManager.h>
-
-#include "operand/CLTensor.h"
-#include "operand/CLSubTensor.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-using MemoryManager =
- acl_common::AclMemoryManager<operand::ICLTensor, operand::CLTensor, operand::CLSubTensor>;
-
-using LinearMemoryManager = acl_common::AclLinearMemoryManager<
- operand::ICLTensor, operand::CLTensor, operand::CLSubTensor,
- ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
- ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator,
- ::arm_compute::MemoryGroup>;
-
-using InternalBufferManager = acl_common::AclInternalBufferManager<
- ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
- ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator>;
-
-using TensorManager =
- acl_common::AclTensorManager<operand::ICLTensor, operand::CLTensor, operand::CLSubTensor>;
-
-inline TensorManager *createTensorManager(bool is_linear_executor)
-{
- if (is_linear_executor)
- {
- VERBOSE(acl_cl_createTensorManager) << "AclTensorManager as Linear" << std::endl;
- return new TensorManager(new MemoryManager(), new LinearMemoryManager(),
- new InternalBufferManager());
- }
- else
- {
- VERBOSE(acl_cl_createTensorManager) << "AclTensorManager" << std::endl;
- return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
- }
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_TENSOR_MANAGER_H__
diff --git a/runtime/onert/backend/acl_cl/acl_cl.cc b/runtime/onert/backend/acl_cl/acl_cl.cc
deleted file mode 100644
index 88378b13a..000000000
--- a/runtime/onert/backend/acl_cl/acl_cl.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <util/logging.h>
-
-#include "Backend.h"
-
-extern "C" {
-onert::backend::Backend *onert_backend_create()
-{
- VERBOSE(onert_backend_create) << "'acl_cl' loaded\n";
- return new onert::backend::acl_cl::Backend;
-}
-
-void onert_backend_destroy(onert::backend::Backend *backend)
-{
- VERBOSE(onert_backend_create) << "'acl_cl' unloaded\n";
- delete backend;
-}
-}
diff --git a/runtime/onert/backend/acl_cl/operand/CLSubTensor.cc b/runtime/onert/backend/acl_cl/operand/CLSubTensor.cc
deleted file mode 100644
index 234229787..000000000
--- a/runtime/onert/backend/acl_cl/operand/CLSubTensor.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "CLSubTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
- : _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
- coords, extend_parent)),
- _rank{rank}
-{
- // DO NOTHING
-}
-
-const arm_compute::CLSubTensor *CLSubTensor::handle() const { return _cl_sub_tensor.get(); }
-
-arm_compute::CLSubTensor *CLSubTensor::handle() { return _cl_sub_tensor.get(); }
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/operand/CLSubTensor.h b/runtime/onert/backend/acl_cl/operand/CLSubTensor.h
deleted file mode 100644
index fedc17fc2..000000000
--- a/runtime/onert/backend/acl_cl/operand/CLSubTensor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
-#define __ONERT_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
-
-#include <arm_compute/runtime/CL/CLSubTensor.h>
-#include "ICLTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class CLSubTensor : public ICLTensor
-{
-public:
- CLSubTensor() = delete;
-
-public:
- CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
-
-public:
- size_t num_dimensions() const final { return _rank; }
-
-public:
- const arm_compute::CLSubTensor *handle() const override;
- arm_compute::CLSubTensor *handle() override;
-
-public:
- // This method is used to prevent the use of memcpy for SubTensor
- bool has_padding() const override { return true; }
-
-private:
- std::shared_ptr<arm_compute::CLSubTensor> _cl_sub_tensor;
- size_t _rank;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
diff --git a/runtime/onert/backend/acl_cl/operand/CLTensor.cc b/runtime/onert/backend/acl_cl/operand/CLTensor.cc
deleted file mode 100644
index f37edff51..000000000
--- a/runtime/onert/backend/acl_cl/operand/CLTensor.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "CLTensor.h"
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <arm_compute/runtime/CL/CLMemory.h>
-#include <arm_compute/runtime/CL/CLMemoryRegion.h>
-
-#include <Convert.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-CLTensor::CLTensor(const arm_compute::TensorInfo &info, size_t rank, size_t num_uses)
- : _cl_tensor(std::make_shared<arm_compute::CLTensor>()), _rank{rank}, _num_uses{num_uses}
-{
- allocator()->init(info);
-}
-
-const arm_compute::CLTensor *CLTensor::handle() const { return _cl_tensor.get(); }
-
-arm_compute::CLTensor *CLTensor::handle() { return _cl_tensor.get(); }
-
-arm_compute::CLTensorAllocator *CLTensor::allocator() { return _cl_tensor->allocator(); }
-
-void CLTensor::setBuffer(void *host_ptr)
-{
- // Constructs a Buffer on a user-supplied memory
- auto buffer = cl::Buffer(arm_compute::CLScheduler::get().context(),
- CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, info()->total_size(), host_ptr);
- // import memory
- allocator()->import_memory(buffer);
-}
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/operand/CLTensor.h b/runtime/onert/backend/acl_cl/operand/CLTensor.h
deleted file mode 100644
index c92208803..000000000
--- a/runtime/onert/backend/acl_cl/operand/CLTensor.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
-#define __ONERT_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
-
-#include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/runtime/CL/CLTensor.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "ICLTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class CLTensor : public ICLTensor
-{
-public:
- CLTensor() = delete;
-
-public:
- CLTensor(const arm_compute::TensorInfo &info, size_t rank, size_t num_uses);
-
-public:
- size_t num_dimensions() const final { return _rank; }
-
-public:
- const arm_compute::CLTensor *handle() const override;
- arm_compute::CLTensor *handle() override;
- size_t num_uses() const { return _num_uses; }
-
-public:
- arm_compute::CLTensorAllocator *allocator();
- /** Set given buffer as the buffer of the tensor
- *
- * @note Ownership of the memory is not transferred to this object.
- * Thus management (allocate/free) should be done by the client.
- *
- * @param[in] host_ptr Storage to be used.
- */
- void setBuffer(void *host_ptr);
-
-private:
- std::shared_ptr<arm_compute::CLTensor> _cl_tensor;
- size_t _rank;
- size_t _num_uses;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
diff --git a/runtime/onert/backend/acl_cl/operand/ICLTensor.cc b/runtime/onert/backend/acl_cl/operand/ICLTensor.cc
deleted file mode 100644
index b400ef9cf..000000000
--- a/runtime/onert/backend/acl_cl/operand/ICLTensor.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ICLTensor.h"
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-void ICLTensor::access(const std::function<void(ITensor &tensor)> &fn)
-{
- auto &queue = ::arm_compute::CLScheduler::get().queue();
-
- // This is an optional input
- if (total_size() == 0)
- return;
-
- map(queue);
- fn(*this);
- unmap(queue);
-}
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_cl/operand/ICLTensor.h b/runtime/onert/backend/acl_cl/operand/ICLTensor.h
deleted file mode 100644
index 5427000f9..000000000
--- a/runtime/onert/backend/acl_cl/operand/ICLTensor.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
-#define __ONERT_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
-
-#include <arm_compute/core/CL/ICLTensor.h>
-
-#include <IACLTensor.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class ICLTensor : public acl_common::IACLTensor
-{
-public:
- const arm_compute::ICLTensor *handle() const override = 0;
- arm_compute::ICLTensor *handle() override = 0;
-
-public:
- void access(const std::function<void(ITensor &tensor)> &fn) final;
-
-private:
- void map(cl::CommandQueue &q, bool blocking = true) { return handle()->map(q, blocking); }
- void unmap(cl::CommandQueue &q) { return handle()->unmap(q); }
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
diff --git a/runtime/onert/backend/acl_common/AclActivationBuilder.h b/runtime/onert/backend/acl_common/AclActivationBuilder.h
deleted file mode 100644
index bfdea6ea0..000000000
--- a/runtime/onert/backend/acl_common/AclActivationBuilder.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_ACL_ACTIVATION_BUILDER_H__
-#define __ONERT_BACKEND_ACL_COMMON_ACL_ACTIVATION_BUILDER_H__
-
-#include <memory>
-
-#include <ir/InternalType.h>
-#include <exec/IFunction.h>
-#include <exec/NopFunction.h>
-
-#include "Convert.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_Tensor, typename T_ActivationLayer, typename T_ExecFunction>
-class AclActivationBuilder
-{
-private:
- static std::unique_ptr<exec::IFunction> generateReLU(T_Tensor *ifm_alloc);
- static std::unique_ptr<exec::IFunction> generateReLU1(T_Tensor *ifm_alloc);
- static std::unique_ptr<exec::IFunction> generateReLU6(T_Tensor *ifm_alloc);
-
-public:
- static std::unique_ptr<exec::IFunction> generate(ir::Activation code, T_Tensor *ifm_alloc);
-};
-
-template <typename T_Tensor, typename T_ActivationLayer, typename T_ExecFunction>
-std::unique_ptr<exec::IFunction>
-AclActivationBuilder<T_Tensor, T_ActivationLayer, T_ExecFunction>::generateReLU(T_Tensor *ifm_alloc)
-{
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
-
- auto fn = std::make_unique<T_ActivationLayer>();
-
- fn->configure(ifm_alloc, nullptr, act_info);
-
- return asFunction<T_ExecFunction>(std::move(fn));
-}
-
-template <typename T_Tensor, typename T_ActivationLayer, typename T_ExecFunction>
-std::unique_ptr<exec::IFunction>
-AclActivationBuilder<T_Tensor, T_ActivationLayer, T_ExecFunction>::generateReLU1(
- T_Tensor *ifm_alloc)
-{
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
-
- auto fn = std::make_unique<T_ActivationLayer>();
-
- fn->configure(ifm_alloc, nullptr, act_info);
-
- return asFunction<T_ExecFunction>(std::move(fn));
-}
-
-template <typename T_Tensor, typename T_ActivationLayer, typename T_ExecFunction>
-std::unique_ptr<exec::IFunction>
-AclActivationBuilder<T_Tensor, T_ActivationLayer, T_ExecFunction>::generateReLU6(
- T_Tensor *ifm_alloc)
-{
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
-
- auto fn = std::make_unique<T_ActivationLayer>();
-
- fn->configure(ifm_alloc, nullptr, act_info);
-
- return asFunction<T_ExecFunction>(std::move(fn));
-}
-
-template <typename T_Tensor, typename T_ActivationLayer, typename T_ExecFunction>
-std::unique_ptr<exec::IFunction>
-AclActivationBuilder<T_Tensor, T_ActivationLayer, T_ExecFunction>::generate(ir::Activation code,
- T_Tensor *ifm_alloc)
-{
- switch (code)
- {
- case ir::Activation::NONE:
- {
- return std::make_unique<exec::NopFunction>();
- }
- case ir::Activation::RELU:
- {
- return generateReLU(ifm_alloc);
- }
- case ir::Activation::RELU1:
- {
- return generateReLU1(ifm_alloc);
- }
- case ir::Activation::RELU6:
- {
- return generateReLU6(ifm_alloc);
- }
- default:
- {
- throw std::runtime_error("Not supported, yet");
- }
- }
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_ACL_ACTIVATION_BUILDER_H__
diff --git a/runtime/onert/backend/acl_common/AclConstantInitializer.cc b/runtime/onert/backend/acl_common/AclConstantInitializer.cc
deleted file mode 100644
index 6ad5b7b69..000000000
--- a/runtime/onert/backend/acl_common/AclConstantInitializer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "AclConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-AclConstantInitializer::AclConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg)
- : IConstantInitializer{operands}, _tensor_reg{tensor_reg}
-{
- // DO NOTHING
-}
-
-void AclConstantInitializer::copyInputInitialize(const ir::Operation &node, uint32_t index)
-{
- assert(node.getInputs().size() > index);
-
- const auto &input_index = node.getInputs().at(index);
- const auto &input_obj = _operands.at(input_index);
- registerCopyInitializer(input_index, input_obj);
-}
-
-void AclConstantInitializer::permuteInputInitialize(const ir::Operation &node, uint32_t index)
-{
- assert(node.getInputs().size() > index);
-
- const auto &input_index = node.getInputs().at(index);
- const auto &input_obj = _operands.at(input_index);
- registerPermuteInitializer(input_index, input_obj);
-}
-
-void AclConstantInitializer::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto &block_size_index = node.getInputs().at(ir::operation::BatchToSpaceND::BLOCK_SIZE);
- const auto &block_size_obj = _operands.at(block_size_index);
-
- if (block_size_obj.isConstant())
- {
- _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::ITensor &obj) {
- assert(model_obj.data());
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data()->base());
- assert(model_obj.shape().rank() == 1);
- obj.access([&](ITensor &tensor) {
- for (size_t i = 0; i < shape.num_elements(); ++i)
- {
- const int32_t value = base[shape.num_elements() - i - 1];
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer() +
- tensor.calcOffset({static_cast<int32_t>(i)}));
- *into = value;
- }
- });
- };
- }
-}
-
-void AclConstantInitializer::visit(const ir::operation::Conv2D &node)
-{
- permuteInputInitialize(node, ir::operation::Conv2D::KERNEL);
- copyInputInitialize(node, ir::operation::Conv2D::BIAS);
-}
-
-void AclConstantInitializer::visit(const ir::operation::DepthwiseConv2D &node)
-{
- permuteInputInitialize(node, ir::operation::DepthwiseConv2D::KERNEL);
- copyInputInitialize(node, ir::operation::DepthwiseConv2D::BIAS);
-}
-
-void AclConstantInitializer::visit(const ir::operation::FullyConnected &node)
-{
- copyInputInitialize(node, ir::operation::FullyConnected::WEIGHT);
- copyInputInitialize(node, ir::operation::FullyConnected::BIAS);
-}
-
-void AclConstantInitializer::visit(const ir::operation::LSTM &node)
-{
- copyInputInitialize(node, ir::operation::LSTM::INPUT_TO_INPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::INPUT_TO_FORGET_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::INPUT_TO_CELL_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::INPUT_TO_OUTPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::RECURRENT_TO_INPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::RECURRENT_TO_FORGET_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::RECURRENT_TO_CELL_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::RECURRENT_TO_OUTPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::CELL_TO_INPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::CELL_TO_FORGET_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::CELL_TO_OUTPUT_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::INPUT_GATE_BIAS);
- copyInputInitialize(node, ir::operation::LSTM::FORGET_GATE_BIAS);
- copyInputInitialize(node, ir::operation::LSTM::OUTPUT_GATE_BIAS);
- copyInputInitialize(node, ir::operation::LSTM::PROJECTION_WEIGHTS);
- copyInputInitialize(node, ir::operation::LSTM::PROJECTION_BIAS);
-}
-
-void AclConstantInitializer::visit(const ir::operation::RNN &node)
-{
- copyInputInitialize(node, ir::operation::RNN::WEIGHTS);
- copyInputInitialize(node, ir::operation::RNN::RECURRENT_WEIGHTS);
- copyInputInitialize(node, ir::operation::RNN::BIAS);
-}
-
-void AclConstantInitializer::visit(const ir::operation::TransposeConv &node)
-{
- permuteInputInitialize(node, ir::operation::TransposeConv::KERNEL);
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_common/AclConstantInitializer.h b/runtime/onert/backend/acl_common/AclConstantInitializer.h
deleted file mode 100644
index 52f4c54cf..000000000
--- a/runtime/onert/backend/acl_common/AclConstantInitializer.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_ACL_COMMON_ACLCONSTANT_INITIALIZER_H__
-#define __ONERT_COMPILER_ACL_COMMON_ACLCONSTANT_INITIALIZER_H__
-
-#include <backend/IConstantInitializer.h>
-#include <ir/Operands.h>
-#include "AclTensorRegistry.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-class AclConstantInitializer : public IConstantInitializer
-{
-public:
- AclConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg);
-
-public:
- void visit(const ir::operation::BatchToSpaceND &) override;
- void visit(const ir::operation::Conv2D &) override;
- void visit(const ir::operation::DepthwiseConv2D &) override;
- void visit(const ir::operation::FullyConnected &) override;
- void visit(const ir::operation::LSTM &) override;
- void visit(const ir::operation::RNN &) override;
- void visit(const ir::operation::TransposeConv &) override;
-
-protected:
- void copyInputInitialize(const ir::Operation &node, uint32_t index);
- void permuteInputInitialize(const ir::Operation &node, uint32_t index);
-
-private:
- std::shared_ptr<ITensorRegistry> tensor_registry() const final { return _tensor_reg; }
-
-protected:
- std::shared_ptr<ITensorRegistry> _tensor_reg;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_COMPILER_ACL_COMMON_ACLCONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/backend/acl_common/AclFunction.h b/runtime/onert/backend/acl_common/AclFunction.h
deleted file mode 100644
index 94b65863a..000000000
--- a/runtime/onert/backend/acl_common/AclFunction.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_KERNEL_ACL_FUNCTION_H__
-#define __ONERT_BACKEND_ACL_COMMON_KERNEL_ACL_FUNCTION_H__
-
-#include <exec/IFunction.h>
-#include <arm_compute/runtime/IFunction.h>
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-class AclFunction : public ::onert::exec::IFunction
-{
-public:
- AclFunction() = delete;
-
-public:
- AclFunction(std::unique_ptr<::arm_compute::IFunction> &&func) : _func(std::move(func))
- {
- // DO NOTHING
- }
-
-public:
- void run() override { _func->run(); }
- void prepare() override { _func->prepare(); }
-
-private:
- std::unique_ptr<::arm_compute::IFunction> _func;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_KERNEL_ACL_FUNCTION_H__
diff --git a/runtime/onert/backend/acl_common/AclInternalBufferManager.h b/runtime/onert/backend/acl_common/AclInternalBufferManager.h
deleted file mode 100644
index f893bb44b..000000000
--- a/runtime/onert/backend/acl_common/AclInternalBufferManager.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_INTERNAL_BUFFER_MANAGER_H__
-#define __ONERT_BACKEND_ACL_COMMON_INTERNAL_BUFFER_MANAGER_H__
-
-#include <arm_compute/runtime/IMemoryManager.h>
-#include <cassert>
-#include <memory>
-#include <backend/IMemoryManager.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-// NOTE. If any backend can use something like InternalBufferManager,
-// this interface can be moved to core/include/backend/
-/**
- * @brief Interface for InternalBufferManager which has ::arm_compute::IMemoryManager pointer
- */
-struct IInternalBufferManager : public backend::IMemoryManager
-{
- virtual ~IInternalBufferManager() = default;
-
- /**
- * @brief Get shared_ptr of ::arm_compute::IMemoryManager
- */
- virtual std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void) = 0;
-};
-
-/**
- * @brief class for InternalBufferManager which has ::arm_compute::IMemoryManager pointer
- */
-template <typename T_MemoryManager, typename T_PoolManager, typename T_LifetimeManager,
- typename T_Allocator>
-class AclInternalBufferManager : public IInternalBufferManager
-{
-public:
- AclInternalBufferManager() : _allocator{nullptr}
- {
- std::shared_ptr<T_LifetimeManager> lifetime_mgr = std::make_shared<T_LifetimeManager>();
- std::shared_ptr<T_PoolManager> pool_mgr = std::make_shared<T_PoolManager>();
-
- _internal_manager = std::make_shared<T_MemoryManager>(lifetime_mgr, pool_mgr);
- assert(_internal_manager);
- }
-
- virtual ~AclInternalBufferManager() = default;
-
- /**
- * @brief Allocate the internal buffer manager on acl
- */
- void allocate(void) override
- {
- _allocator = std::make_shared<T_Allocator>();
- _internal_manager->populate(*_allocator, 1);
- }
-
- /**
- * @brief Deallocate the internal buffer manager on acl
- */
- void deallocate(void) override { _internal_manager->clear(); }
-
- /**
- * @brief Get shared_ptr of ::arm_compute::IMemoryManager
- */
- std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void) override
- {
- return _internal_manager;
- }
-
-private:
- std::shared_ptr<T_Allocator> _allocator;
- std::shared_ptr<T_MemoryManager> _internal_manager;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_INTERNAL_BUFFER_MANAGER_H__
diff --git a/runtime/onert/backend/acl_common/AclKernelGen.h b/runtime/onert/backend/acl_common/AclKernelGen.h
deleted file mode 100644
index 257bbd3b4..000000000
--- a/runtime/onert/backend/acl_common/AclKernelGen.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_
-#define __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_
-
-#include <exec/IFunction.h>
-#include <ir/Operands.h>
-
-#include <ir/operation/LSTM.h>
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-void enableDimCorrection(IACLTensor *tensor)
-{
- size_t input_rank = tensor->num_dimensions();
- const_cast<arm_compute::TensorShape &>(tensor->info()->tensor_shape())
- .set(input_rank - 1, tensor->info()->dimension(input_rank - 1), true);
-}
-
-void disableDimCorrection(IACLTensor *tensor)
-{
- size_t input_rank = tensor->num_dimensions();
- const_cast<arm_compute::TensorShape &>(tensor->info()->tensor_shape())
- .set(input_rank - 1, tensor->info()->dimension(input_rank - 1), false);
-}
-
-template <typename Layer, typename... Args>
-std::unique_ptr<arm_compute::IFunction> generateLayer(Args &&... args)
-{
- auto l = std::make_unique<Layer>();
-
- l->configure(std::forward<Args>(args)...);
-
- return l;
-}
-
-template <typename Layer, typename... Args>
-std::unique_ptr<arm_compute::IFunction>
-generateLayer(std::shared_ptr<arm_compute::IMemoryManager> memory_manager, Args &&... args)
-{
- auto l = std::make_unique<Layer>(memory_manager);
-
- l->configure(std::forward<Args>(args)...);
-
- return l;
-}
-
-template <typename T_FunctionWrapper, typename T_Tensor, typename T_ACLLayer,
- typename T_TensorRegistry>
-std::unique_ptr<exec::IFunction> kernelGenLSTM(const ir::operation::LSTM &node,
- const ir::Operands &operands,
- const std::shared_ptr<T_TensorRegistry> &tensor_reg)
-{
- // TODO Support dynamic rnn
- // TODO Fix subtle error in the case of non-CIFG, non-peephole and No Projection.
- const auto scratch_buffer_index{
- node.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
- const auto output_state_out_index{
- node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)};
- const auto cell_state_out_index{
- node.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
- const auto output_index{node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
-
- const auto input_index{node.getInputs().at(ir::operation::LSTM::Input::INPUT)};
- const auto input_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)}; // optional
- const auto input_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_FORGET_WEIGHTS)};
- const auto input_to_cell_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_CELL_WEIGHTS)};
- const auto input_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)};
- const auto recurrent_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)}; // optional
- const auto recurrent_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)};
- const auto recurrent_to_cell_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)};
- const auto recurrent_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)};
- const auto cell_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_INPUT_WEIGHTS)}; // optional
- const auto cell_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_FORGET_WEIGHTS)}; // optional
- const auto cell_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_OUTPUT_WEIGHTS)}; // optional
- const auto input_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_GATE_BIAS)};
- const auto forget_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::FORGET_GATE_BIAS)};
- const auto cell_bias_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_BIAS)};
- const auto output_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_GATE_BIAS)};
- const auto projection_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_WEIGHTS)}; // optional
- const auto projection_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_BIAS)}; // optional
- const auto output_state_in_index{
- node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_STATE_IN)};
- const auto cell_state_in_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_STATE_IN)};
- const auto cell_threshold = node.param().cell_threshold;
- const auto projection_threshold = node.param().projection_threshold;
-
- bool has_input_to_input_weights = operands.at(input_to_input_weights_index).shape().dim(0) != 0 &&
- operands.at(input_to_input_weights_index).shape().dim(1) != 0;
- bool has_recurrent_to_input_weights =
- operands.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
- operands.at(recurrent_to_input_weights_index).shape().dim(1) != 0;
- bool has_cell_to_forget_weights = operands.at(cell_to_forget_weights_index).shape().dim(0) != 0;
- bool has_cell_to_output_weights = operands.at(cell_to_output_weights_index).shape().dim(0) != 0;
- bool has_projection_weights = operands.at(projection_weights_index).shape().dim(0) != 0 &&
- operands.at(projection_weights_index).shape().dim(1) != 0;
- bool has_projection_bias = operands.at(projection_bias_index).shape().dim(0);
-
- // NOTE The input_to_input_weights and the recurrent_to_input_weights do not exist in CIFG.
- // true: no CIFG
- // false: CIFG
- // NOTE The cell_to_input_weights does not exist in non-peephole although regular LSTM(non-CIFG).
- bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
-
- // NOTE The cell_to_forget_weights and the cell_to_output_weights exist in peephole.
- // But the cell_to_input_weights does not exist in regular CIFG although peephole.
- // true: peephole
- // false: no peephole
- bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights;
-
- // NOTE Although the projection weights has data the projection bias may not have data.
- bool has_projection_param = has_projection_weights;
-
- const auto activation = node.param().activation;
- const auto cell_clip = cell_threshold;
- const auto projection_clip = projection_threshold;
- assert(cell_clip >= 0.f && projection_clip >= 0.f);
-
- auto scratch_buffer_tensor = tensor_reg->getAclTensor(scratch_buffer_index);
- auto output_state_out_tensor = tensor_reg->getAclTensor(output_state_out_index);
- auto cell_state_out_tensor = tensor_reg->getAclTensor(cell_state_out_index);
- auto output_tensor = tensor_reg->getAclTensor(output_index);
-
- auto input_tensor = tensor_reg->getAclTensor(input_index);
-
- auto input_to_forget_weights_tensor = tensor_reg->getAclTensor(input_to_forget_weights_index);
- auto input_to_cell_weights_tensor = tensor_reg->getAclTensor(input_to_cell_weights_index);
- auto input_to_output_weights_tensor = tensor_reg->getAclTensor(input_to_output_weights_index);
- auto recurrent_to_forget_weights_tensor =
- tensor_reg->getAclTensor(recurrent_to_forget_weights_index);
- auto recurrent_to_cell_weights_tensor = tensor_reg->getAclTensor(recurrent_to_cell_weights_index);
- auto recurrent_to_output_weights_tensor =
- tensor_reg->getAclTensor(recurrent_to_output_weights_index);
-
- auto forget_gate_bias_tensor = tensor_reg->getAclTensor(forget_gate_bias_index);
- auto cell_bias_tensor = tensor_reg->getAclTensor(cell_bias_index);
- auto output_gate_bias_tensor = tensor_reg->getAclTensor(output_gate_bias_index);
- auto output_state_in_tensor = tensor_reg->getAclTensor(output_state_in_index);
- auto cell_state_in_tensor = tensor_reg->getAclTensor(cell_state_in_index);
-
- auto act_info = asActivationLayerInfo(activation);
-
- ::arm_compute::LSTMParams<T_Tensor> lstm_params{};
- if (has_cifg_param)
- {
- auto input_to_input_weights_tensor =
- tensor_reg->getAclTensor(input_to_input_weights_index); // optional
- auto recurrent_to_input_weights_tensor =
- tensor_reg->getAclTensor(recurrent_to_input_weights_index); // optional
- auto cell_to_input_weights_handle =
- has_peephole_param ? tensor_reg->getAclTensor(cell_to_input_weights_index)->handle()
- : nullptr; // optional (non-cifg && peephole)
- auto input_gate_bias_tensor = tensor_reg->getAclTensor(input_gate_bias_index); // optional
- lstm_params.set_cifg_params(input_to_input_weights_tensor->handle(),
- recurrent_to_input_weights_tensor->handle(),
- cell_to_input_weights_handle, input_gate_bias_tensor->handle());
- }
- if (has_peephole_param)
- {
- auto cell_to_forget_weights_tensor =
- tensor_reg->getAclTensor(cell_to_forget_weights_index); // optional
- auto cell_to_output_weights_tensor =
- tensor_reg->getAclTensor(cell_to_output_weights_index); // optional
- lstm_params.set_peephole_params(cell_to_forget_weights_tensor->handle(),
- cell_to_output_weights_tensor->handle());
- }
- if (has_projection_param)
- {
- auto projection_weights_tensor = tensor_reg->getAclTensor(projection_weights_index); // optional
- auto projection_bias_handle = has_projection_bias
- ? tensor_reg->getAclTensor(projection_bias_index)->handle()
- : nullptr; // optional
- lstm_params.set_projection_params(projection_weights_tensor->handle(), projection_bias_handle);
- }
-
- auto fn = generateLayer<T_ACLLayer>(
- input_tensor->handle(), input_to_forget_weights_tensor->handle(),
- input_to_cell_weights_tensor->handle(), input_to_output_weights_tensor->handle(),
- recurrent_to_forget_weights_tensor->handle(), recurrent_to_cell_weights_tensor->handle(),
- recurrent_to_output_weights_tensor->handle(), forget_gate_bias_tensor->handle(),
- cell_bias_tensor->handle(), output_gate_bias_tensor->handle(),
- output_state_in_tensor->handle(), cell_state_in_tensor->handle(),
- scratch_buffer_tensor->handle(), output_state_out_tensor->handle(),
- cell_state_out_tensor->handle(), output_tensor->handle(), lstm_params, act_info, cell_clip,
- projection_clip);
-
- return std::make_unique<T_FunctionWrapper>(std::move(fn));
-}
-
-template <typename T_FunctionWrapper, typename T_Tensor, typename T_ACLLayer,
- typename T_TensorBuilder, typename T_TensorRegistry>
-std::unique_ptr<exec::IFunction>
-kernelGenFullyConnected(const ir::operation::FullyConnected &node, const ir::Operands &operands,
- const std::shared_ptr<T_TensorBuilder> &tensor_builder,
- const std::shared_ptr<T_TensorRegistry> &tensor_reg, ir::Layout layout)
-{
- using ir::operation::FullyConnected;
-
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
- const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
- const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
-
- const auto input_rank = operands.at(input_index).shape().rank();
-
- const auto output_size =
- operands.at(output_index).shape().dim(operands.at(output_index).shape().rank() - 1);
- UNUSED_RELEASE(output_size);
- assert(operands.at(bias_index).shape().dim(0) == output_size);
- assert(operands.at(weight_index).shape().dim(0) == output_size);
- const auto batch_size =
- operands.at(output_index).shape().dim(operands.at(output_index).shape().rank() - 2);
- const auto input_size =
- operands.at(weight_index).shape().dim(operands.at(weight_index).shape().rank() - 1);
-
- // Check for reshaping input's shape into rank-2
- bool needs_reshape = false;
- ir::Shape reshape(2);
- if (input_rank == 3 || input_rank == 4)
- {
- const auto &ifm_shape = operands.at(input_index).shape();
- auto feature_size = 1;
- for (int i = 0; i < ifm_shape.rank(); ++i)
- {
- feature_size *= ifm_shape.dim(i);
- }
-
- UNUSED_RELEASE(feature_size);
- assert(feature_size == batch_size * input_size);
-
- // for reshaping
- needs_reshape = true;
- reshape.dim(0) = batch_size; /* H */
- reshape.dim(1) = input_size; /* W */
- }
-
- auto output_tensor = tensor_reg->getAclTensor(output_index);
- const auto input_tensor = tensor_reg->getAclTensor(input_index);
- const auto weight_tensor = tensor_reg->getAclTensor(weight_index);
- const auto bias_tensor = tensor_reg->getAclTensor(bias_index);
- const auto frontend_layout = layout;
- const auto acl_layout = output_tensor->handle()->info()->data_layout();
-
- typename T_ACLLayer::KernelType kernel_type = T_ACLLayer::KernelType::GENERAL;
- if (operands.at(weight_index).isConstant())
- {
- kernel_type = T_ACLLayer::KernelType::PREPROCESSED_WEIGHTS;
- assert(operands.at(weight_index).data());
- }
-
- auto fn = generateLayer<T_ACLLayer>(
- tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- weight_tensor->handle(), bias_tensor->handle(), output_tensor->handle(), needs_reshape,
- asTensorShape(reshape, frontend_layout, asRuntimeLayout(acl_layout)), kernel_type);
-
- return std::make_unique<T_FunctionWrapper>(std::move(fn));
-}
-
-template <typename T_ACLLayer, typename T_PoolOp, typename T_AclTensorRegistry>
-std::unique_ptr<::arm_compute::IFunction>
-kernelGenPool2D(const T_PoolOp &node, const ir::Operands &operands,
- const std::shared_ptr<T_AclTensorRegistry> &tensor_reg, ir::Layout layout,
- ::arm_compute::PoolingType pooling_type)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(0)};
-
- const auto ofm_shape = operands.at(ofm_index).shape().asFeature(layout);
- const auto ifm_shape = operands.at(ifm_index).shape().asFeature(layout);
-
- const auto kh = node.param().kh;
- const auto kw = node.param().kw;
- const auto stride = node.param().stride;
- const auto padding =
- ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh);
-
- VERBOSE(Pool2DParam) << "IFM_H: " << ifm_shape.H << std::endl;
- VERBOSE(Pool2DParam) << "IFM_W: " << ifm_shape.W << std::endl;
- VERBOSE(Pool2DParam) << "OFM_H: " << ofm_shape.H << std::endl;
- VERBOSE(Pool2DParam) << "OFM_W: " << ofm_shape.W << std::endl;
- VERBOSE(Pool2DParam) << "KER_H: " << kh << std::endl;
- VERBOSE(Pool2DParam) << "KER_W: " << kw << std::endl;
- VERBOSE(Pool2DParam) << "STRIDE_H: " << stride.vertical << std::endl;
- VERBOSE(Pool2DParam) << "STRIDE_W: " << stride.horizontal << std::endl;
- VERBOSE(Pool2DParam) << "PAD(T): " << padding.top << std::endl;
- VERBOSE(Pool2DParam) << "PAD(B): " << padding.bottom << std::endl;
- VERBOSE(Pool2DParam) << "PAD(L): " << padding.left << std::endl;
- VERBOSE(Pool2DParam) << "PAD(R): " << padding.right << std::endl;
-
- auto ofm_tensor = tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = tensor_reg->getAclTensor(ifm_index);
-
- ::arm_compute::PoolingLayerInfo info{
- pooling_type, ::arm_compute::Size2D{kw, kh}, ifm_tensor->info()->data_layout(),
- asPadStrideInfo(padding, stride), true /* exclude_padding */};
-
- auto fn = generateLayer<T_ACLLayer>(ifm_tensor->handle(), ofm_tensor->handle(), info);
-
- return fn;
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_
diff --git a/runtime/onert/backend/acl_common/AclLinearMemoryManager.h b/runtime/onert/backend/acl_common/AclLinearMemoryManager.h
deleted file mode 100644
index 09f25e7a8..000000000
--- a/runtime/onert/backend/acl_common/AclLinearMemoryManager.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_LINEAR_MEMORY_MANAGER_H__
-#define __ONERT_BACKEND_ACL_COMMON_LINEAR_MEMORY_MANAGER_H__
-
-#include <cassert>
-
-#include "AclMemoryManager.h"
-#include "ir/OperandIndexMap.h"
-#include "util/logging.h"
-
-namespace
-{
-
-template <typename T_MemoryManager, typename T_PoolManager, typename T_LifetimeManager>
-std::shared_ptr<T_MemoryManager> createMemoryManager()
-{
- std::shared_ptr<T_LifetimeManager> lifetime_mgr = std::make_shared<T_LifetimeManager>();
- std::shared_ptr<T_PoolManager> pool_mgr = std::make_shared<T_PoolManager>();
-
- std::shared_ptr<T_MemoryManager> mem_mgr =
- std::make_shared<T_MemoryManager>(lifetime_mgr, pool_mgr);
- return mem_mgr;
-}
-
-} // namespace
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_MemoryManager,
- typename T_PoolManager, typename T_LifetimeManager, typename T_Allocator,
- typename T_MemoryGroup>
-class AclLinearMemoryManager : public AclMemoryManager<T_ITensor, T_Tensor, T_SubTensor>
-{
-public:
- AclLinearMemoryManager()
- : _allocator{nullptr},
- _io_manager{createMemoryManager<T_MemoryManager, T_PoolManager, T_LifetimeManager>()},
- _io_group{std::make_shared<T_MemoryGroup>(_io_manager)}
- {
- // DO NOTHING
- }
-
- virtual ~AclLinearMemoryManager() = default;
-
- void allocate(void) override
- {
- _allocator = std::make_shared<T_Allocator>();
- _io_manager->populate(*_allocator, 1);
- _io_group->acquire();
- }
-
- void deallocate(void) override
- {
- _io_group->release();
- _io_manager->clear();
- }
-
- void startLifetime(const ir::OperandIndex &ind) override
- {
- auto &tensors = this->tensors();
- assert(tensors.find(ind) != tensors.end());
-
- auto tensor = tensors[ind];
- assert(tensor->handle());
-
- _io_group->manage(tensor->handle());
- }
-
- void finishLifetime(const ir::OperandIndex &ind) override
- {
- auto &tensors = this->tensors();
- assert(tensors.find(ind) != tensors.end());
-
- auto tensor = tensors[ind];
- assert(tensor->allocator());
-
- tensor->allocator()->allocate();
- }
-
-private:
- std::shared_ptr<T_Allocator> _allocator;
- std::shared_ptr<T_MemoryManager> _io_manager;
- std::shared_ptr<T_MemoryGroup> _io_group;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_LINEAR_MEMORY_MANAGER_H__
diff --git a/runtime/onert/backend/acl_common/AclMemoryManager.h b/runtime/onert/backend/acl_common/AclMemoryManager.h
deleted file mode 100644
index eefcec130..000000000
--- a/runtime/onert/backend/acl_common/AclMemoryManager.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__
-#define __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__
-
-#include <arm_compute/core/Types.h>
-#include <arm_compute/runtime/IMemoryManager.h>
-#include <cassert>
-
-#include "backend/IMemoryManager.h"
-#include "ir/OperandIndexMap.h"
-#include "Convert.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-class AclMemoryManager : public backend::IMemoryManager
-{
-public:
- AclMemoryManager()
- {
- // DO NOTHING
- }
-
- virtual ~AclMemoryManager() = default;
-
- void allocate(void) override
- {
- for (const auto &tensor_entry : _tensors)
- {
- auto tensor = tensor_entry.second;
- tensor->allocator()->allocate();
- }
- }
-
- void deallocate(void) override
- {
- for (const auto &tensor_entry : _tensors)
- {
- auto tensor = tensor_entry.second;
- tensor->allocator()->free();
- }
- }
-
- virtual void startLifetime(const ir::OperandIndex &) { /* DO NOTHING */}
- virtual void finishLifetime(const ir::OperandIndex &) { /* DO NOTHING */}
-
- void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
- size_t num_uses)
- {
- auto tensor = std::make_shared<T_Tensor>(info, rank, num_uses);
- _tensors[ind] = tensor;
- }
-
- void buildSubtensor(std::shared_ptr<T_ITensor> parent_tensor, const ir::OperandIndex &child_ind,
- const ::arm_compute::TensorShape &shape,
- const ::arm_compute::Coordinates &coordinates, size_t rank,
- bool extent_parent)
- {
- auto subtensor =
- std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, rank, extent_parent);
- _subtensors[child_ind] = subtensor;
- }
-
- ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &tensors(void) { return _tensors; }
-
- ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &subtensors(void) { return _subtensors; }
-
-private:
- ir::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
- ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__
diff --git a/runtime/onert/backend/acl_common/AclSubTensorAnalyzer.h b/runtime/onert/backend/acl_common/AclSubTensorAnalyzer.h
deleted file mode 100644
index beec95718..000000000
--- a/runtime/onert/backend/acl_common/AclSubTensorAnalyzer.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_ACL_SUB_TENSOR_ANALYZER_H__
-#define __ONERT_BACKEND_ACL_COMMON_ACL_SUB_TENSOR_ANALYZER_H__
-
-#include <ir/OperationVisitor.h>
-#include <ir/Graph.h>
-#include "ParentInfo.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-/**
- * @brief Class to analyze tensor subsumption
- */
-class AclSubTensorAnalyzer : public ir::OperationVisitor
-{
-public:
- /**
- * @brief Construct a new SubTensorAnalyzer object
- * @param[in] ctx Graph operand set
- */
- AclSubTensorAnalyzer(const ir::Graph &graph) : _graph{graph}
- {
- // DO NOTHING
- }
-
-public:
- void setLayout(ir::Layout layout) { _current_op_layout = layout; }
-
- void visit(const ir::operation::Concat &node) override
- {
- // If operator is concat, fill subsumption info
- int32_t axis_raw = node.param().axis;
-
- const auto &output_index = node.getOutputs().at(0);
- const auto &inputs = node.getInputs();
-
- int32_t axis_point = 0;
- const auto rank = _graph.operands().at(output_index).shape().rank();
- int32_t axis = axis_raw < 0 ? (axis_raw + rank) : axis_raw;
- assert(rank > axis);
-
- for (const auto &ind : inputs)
- {
- /**
- * NOTE Not support below cases.
- * 1. concat's input is a constant.
- * 2. concat's input is a input of model.
- * 3. concat's input already becomes a subtensor of another concat.
- */
- if (_graph.operands().at(ind).isConstant() || _graph.getInputs().contains(ind) ||
- _parent_map.find(ind) != _parent_map.end())
- {
- return;
- }
- }
-
- for (const auto &input_index : inputs)
- {
- auto input_shape = _graph.operands().at(input_index).shape();
- assert(rank == input_shape.rank());
-
- ir::Coordinates coordinate_info{};
- for (int i = 0; i < rank; i++)
- {
- coordinate_info.set(i, 0);
- }
- coordinate_info.set(axis, axis_point);
-
- _parent_map.emplace(
- input_index, acl_common::ParentInfo{output_index, _current_op_layout, coordinate_info});
-
- axis_point += input_shape.dim(axis);
- }
- }
-
- std::unordered_map<ir::OperandIndex, ParentInfo> &&releaseParentMap()
- {
- return std::move(_parent_map);
- }
-
-private:
- const ir::Graph &_graph;
- std::unordered_map<ir::OperandIndex, ParentInfo> _parent_map;
- ir::Layout _current_op_layout{ir::Layout::UNKNOWN};
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_ACL_SUB_TENSOR_ANALYZER_H__
diff --git a/runtime/onert/backend/acl_common/AclTensorBuilder.h b/runtime/onert/backend/acl_common/AclTensorBuilder.h
deleted file mode 100644
index bb7abc95d..000000000
--- a/runtime/onert/backend/acl_common/AclTensorBuilder.h
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
-#define __ONERT_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
-
-#include <memory>
-#include <queue>
-
-#include <arm_compute/core/Types.h>
-#include <backend/ITensorBuilder.h>
-#include "ir/OperandIndexMap.h"
-#include <ir/Operands.h>
-#include "AclTensorManager.h"
-#include "AclTensorRegistry.h"
-#include <memory>
-#include "ParentInfo.h"
-#include <util/Utils.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-enum class UsesType
-{
- FIRST,
- LAST
-};
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-class AclTensorBuilder : public ITensorBuilder
-{
-public:
- using T_AclTensorManager = AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>;
-
- AclTensorBuilder(const ir::Operands &operands, T_AclTensorManager *tensor_mgr,
- const std::shared_ptr<AclTensorRegistry<T_AclTensorManager>> &tensor_reg);
-
- /**
- * @brief Register tensor information to allocate on ACL-CL backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
- * @param[in] layout Tensor data layout
- */
- void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout) override;
-
- void notifyFirstUse(const ir::OperandIndex &) override;
- void notifyLastUse(const ir::OperandIndex &) override;
-
- bool isRegistered(const ir::OperandIndex &) const override;
-
- void prepare(void) override;
- void allocate() override;
- void postFunctionPrepare() override;
-
- T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); }
-
- void setUsesCount(const ir::OperandIndex &index, size_t num_uses)
- {
- assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses
- : true);
- _uses_count_map[index] = num_uses;
- }
-
- void parent_map(std::unordered_map<ir::OperandIndex, ParentInfo> &&parent_map)
- {
- _parent_map = std::move(parent_map);
- }
-
- bool areSubTensorsOf(const ir::OperandIndex &parent, const ir::OperandIndexSequence &seq);
-
- /**
- * @brief Check child tensor is allocated as subtensor of parent tensor
- * @param[in] parent Index of parent
- * @param[in] child Index of child
- * @return @c true if child is allocated as subtensor of parent, otherwise @c false
- */
- bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child);
-
-private:
- void buildTensors(void);
- ir::OperandIndex findRootParent(ir::OperandIndex index);
-
-private:
- const ir::Operands &_operands;
- ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
- ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
- ir::OperandIndexMap<size_t> _uses_count_map;
-
- std::unique_ptr<T_AclTensorManager> _tensor_mgr;
- std::shared_ptr<AclTensorRegistry<T_AclTensorManager>> _tensor_reg;
-
- // for linear executor
- std::vector<std::pair<UsesType, ir::OperandIndex>> _lifetime_seq;
-
- // Extra info for concat elimination
- ir::OperandIndexMap<ParentInfo> _parent_map;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#include <cassert>
-#include <stack>
-
-#include "Convert.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::AclTensorBuilder(
- const ir::Operands &operands, T_AclTensorManager *tensor_mgr,
- const std::shared_ptr<AclTensorRegistry<T_AclTensorManager>> &tensor_reg)
- : _operands{operands}, _tensor_mgr{tensor_mgr}, _tensor_reg{tensor_reg}
-{
- assert(_tensor_mgr);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerTensorInfo(
- const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout)
-{
- assert(_tensor_mgr->constTensors().size() == 0);
- assert(_tensor_mgr->nonconstTensors().size() == 0);
-
- _uses_count_map[ind] = _operands.at(ind).getUses().size();
-
- if (_parent_map.count(ind) == 0)
- {
- // Normal Tensors
- _tensor_info_map.emplace(ind, info);
- _tensor_layout_map.insert({ind, backend_layout});
- }
- else
- {
- // SubTensors
- assert(!info.isConstant() && "Subtensors of constants are not supported yet.");
-
- // Update offset info and emplace
- auto &parent_info = _parent_map[ind];
- const auto &obj = _operands.at(ind);
- auto parent_index = parent_info.parent;
- auto &offset = parent_info.coordinates;
- auto frontend_layout = parent_info.frontend_layout;
-
- assert(obj.shape().rank() <= ir::Shape::MAX_RANK);
- auto shape = obj.shape();
- if (_operands.at(parent_index).shape().rank() >= 4 && frontend_layout == ir::Layout::NHWC &&
- backend_layout == ir::Layout::NCHW)
- {
- // Permutation changing layout beyond 4-D is not supported yet
- const auto parent_rank = _operands.at(parent_index).shape().rank();
- assert(parent_rank == 4);
- shape.extendRank(parent_rank);
- offset = {offset[0], offset[3], offset[1], offset[2]};
- }
- else if (_operands.at(parent_index).shape().rank() >= 4 &&
- frontend_layout == ir::Layout::NHWC && backend_layout == ir::Layout::NCHW)
- {
- // Permutation changing layout beyond 4-D is not supported yet
- const auto parent_rank = _operands.at(parent_index).shape().rank();
- assert(parent_rank == 4);
- shape.extendRank(parent_rank);
- offset = {offset[0], offset[2], offset[3], offset[1]};
- }
- auto new_shape = permuteShape(shape, frontend_layout, backend_layout);
- auto oi = ir::OperandInfo::createStaticInfo(new_shape, obj.typeInfo());
- _tensor_info_map.emplace(ind, oi);
- }
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyFirstUse(const ir::OperandIndex &ind)
-{
- _lifetime_seq.emplace_back(UsesType::FIRST, ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyLastUse(const ir::OperandIndex &ind)
-{
- _lifetime_seq.emplace_back(UsesType::LAST, ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-bool AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isRegistered(
- const ir::OperandIndex &ind) const
-{
- return _tensor_info_map.find(ind) != _tensor_info_map.end();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::prepare(void)
-{
- buildTensors();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::allocate(void)
-{
- // Update lifetime sequence to apply subtensor optimization
-
- std::unordered_map<ir::OperandIndex, ir::OperandIndex> root_map;
- std::function<ir::OperandIndex &(ir::OperandIndex)> find_root =
- [&](ir::OperandIndex ind) -> ir::OperandIndex & {
- ir::OperandIndex &ret = root_map[ind];
-
- // We know the root parent value already
- if (ret.valid())
- return ret;
-
- auto itr = _parent_map.find(ind);
- if (itr == _parent_map.end())
- {
- // If there is no parent, let's store the value of itself
- return ret = ind;
- }
- else
- {
- return ret = find_root(itr->second.parent);
- }
- };
-
- ir::OperandIndexMap<bool> first_use_check;
- ir::OperandIndexMap<bool> last_use_check;
- std::map<size_t, std::pair<UsesType, ir::OperandIndex>> lifetime_map;
- for (size_t i = 0; i < _lifetime_seq.size(); i++)
- {
- auto &entry = _lifetime_seq[i];
- if (entry.first != UsesType::FIRST)
- continue;
- auto root_ind = find_root(entry.second);
- if (first_use_check[root_ind])
- continue;
- first_use_check[root_ind] = true;
- lifetime_map[i] = {UsesType::FIRST, root_ind};
- }
-
- for (int i = _lifetime_seq.size() - 1; i >= 0; i--)
- {
- auto &entry = _lifetime_seq[i];
- if (entry.first != UsesType::LAST)
- continue;
- auto root_ind = find_root(entry.second);
- if (last_use_check[root_ind])
- continue;
- last_use_check[root_ind] = true;
- lifetime_map[i] = {UsesType::LAST, root_ind};
- }
-
- for (auto &entry : lifetime_map)
- {
- auto &use = entry.second;
- auto use_type = use.first;
- auto use_index = use.second;
- assert(use_index.valid());
- if (use_type == UsesType::FIRST)
- _tensor_mgr->startLifetime(use_index);
- else
- _tensor_mgr->finishLifetime(use_index);
- }
-
- _tensor_mgr->allocateConsts();
-
- // TODO Since `_parent_map` is filled for all Concat nodes even if the node this backend uses
- // After refactoring BackendContext we can uncomment this
- // assert(_tensor_info_map.size() ==
- // _tensor_mgr->nonconstTensors().size() + num of constants of _tensor_info_map +
- // _parent_map.size());
- _tensor_mgr->allocateNonconsts();
-
- _tensor_mgr->allocateInternalBufferManager();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::postFunctionPrepare(void)
-{
- _tensor_mgr->tryDeallocConstants();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::buildTensors(void)
-{
- assert(_tensor_mgr->constTensors().size() == 0);
- assert(_tensor_mgr->nonconstTensors().size() == 0);
-
- // Normal tensors
- for (auto &entry : _tensor_info_map)
- {
- auto ind = entry.first;
- if (_parent_map.count(ind) > 0)
- continue;
-
- const auto &info = entry.second;
- const auto &backend_layout = _tensor_layout_map[ind];
- auto tensor_info =
- asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN, backend_layout, true);
- _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), info.isConstant(),
- _uses_count_map[ind]);
- }
-
- // Subtensors
- assert(_tensor_mgr->nonconstSubtensors().size() == 0);
- // TODO Iterate `_parent_map` instead, once the optimizer bug is fixed
- // `Optimizer` iterates the entire OpSequences, so there is a bug if iterating _parent_map
- for (auto &entry : _tensor_info_map)
- {
- auto ind = entry.first;
- if (_parent_map.count(ind) == 0)
- continue;
-
- // To make subtensor, parent tensor must be made first
- // For this condition, use stack
- // 1) Push one subtensor index to stack (iterate subtensors)
- // 2) If tensor at stack top is already made, pop and go to 4)
- // 3) If tensor pushed at 1) is not made, check parent tensor
- // 3-1) If parent tensor is already made, we can make child tensor
- // Make child tensor and pop, go to 4)
- // 3-2) If parent tensor is not made, we can't make child tensor yet
- // Push parent tensor index to stack and return to 4)
- // 4) If stack is empty, return to 1), else return to 2)
- auto &subtensors = _tensor_mgr->nonconstSubtensors();
-
- std::stack<ir::OperandIndex> stack;
- stack.push(ind);
-
- while (!stack.empty())
- {
- const auto current = stack.top();
- const auto &tensor_info = _tensor_info_map.at(current);
- const auto &parent_info = _parent_map.at(current);
-
- // Already generated SubTensor
- if (subtensors.find(current) != subtensors.end())
- {
- stack.pop();
- continue;
- }
-
- auto parent = parent_info.parent;
- std::shared_ptr<T_ITensor> parent_tensor = _tensor_mgr->findTensorAsParent(parent);
- if (!parent_tensor)
- {
- // Cannot find allocated parent tensor: allocate parent first
- assert(_parent_map.count(parent) > 0);
- stack.push(parent);
- continue;
- }
- assert(parent_tensor != nullptr);
-
- // Child's type should be same with parent
- assert(tensor_info.typeInfo().offset() ==
- parent_tensor->info()->quantization_info().uniform().offset);
- assert(tensor_info.typeInfo().scale() ==
- parent_tensor->info()->quantization_info().uniform().scale);
- assert(tensor_info.typeInfo().type() == parent_tensor->data_type());
-
- // NOTE SubTensor's layout must be the same with layout of parent tensor
- const auto &root_parent = findRootParent(parent);
- const auto &backend_layout = _tensor_layout_map[root_parent];
-
- auto shape = asTensorShape(tensor_info.shape(), ir::Layout::UNKNOWN, backend_layout, true);
- ::arm_compute::Coordinates coordinates =
- asTensorCoordinate(parent_info.coordinates, ir::Layout::UNKNOWN, backend_layout);
- _tensor_mgr->buildSubtensor(parent, current, shape, coordinates, tensor_info.shape().rank(),
- true);
- stack.pop();
- }
- }
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-bool AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::areSubTensorsOf(
- const ir::OperandIndex &parent, const ir::OperandIndexSequence &seq)
-{
- for (auto &cand : seq)
- {
- if (!isSubTensorOf(parent, cand))
- {
- return false;
- }
- }
- return true;
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-bool AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isSubTensorOf(
- const ir::OperandIndex &parent, const ir::OperandIndex &child)
-{
- auto itr = _parent_map.find(child);
- if (itr == _parent_map.end())
- {
- return false;
- }
-
- return itr->second.parent == parent;
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-ir::OperandIndex
-AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::findRootParent(ir::OperandIndex ind)
-{
- if (_parent_map.find(ind) == _parent_map.end())
- return ind;
-
- auto parent_ind = _parent_map.at(ind).parent;
- return findRootParent(parent_ind);
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
diff --git a/runtime/onert/backend/acl_common/AclTensorManager.h b/runtime/onert/backend/acl_common/AclTensorManager.h
deleted file mode 100644
index b999a39a9..000000000
--- a/runtime/onert/backend/acl_common/AclTensorManager.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
-#define __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
-
-#include <arm_compute/runtime/IMemoryManager.h>
-
-#include "backend/ITensorManager.h"
-#include "AclMemoryManager.h"
-#include "AclInternalBufferManager.h"
-#include "ir/OperandIndexMap.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-class AclTensorManager : public backend::ITensorManager
-{
-public:
- using T_AclMemoryManager = AclMemoryManager<T_ITensor, T_Tensor, T_SubTensor>;
-
- AclTensorManager(T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
- IInternalBufferManager *inter_mgr);
-
- virtual ~AclTensorManager() = default;
-
- void allocateConsts(void);
- void allocateNonconsts(void);
- void deallocateConsts(void);
- void deallocateNonconsts(void);
-
- void allocateInternalBufferManager(void);
- void deallocateInternalBufferManager(void);
-
- void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
- bool as_const, size_t num_uses);
- void buildSubtensor(const ir::OperandIndex &parent, const ir::OperandIndex &child,
- const ::arm_compute::TensorShape &shape,
- const ::arm_compute::Coordinates &coordinates, size_t rank,
- bool extent_parent);
-
- std::shared_ptr<T_ITensor> findTensorAsParent(const ir::OperandIndex &ind);
-
- void startLifetime(const ir::OperandIndex &ind);
- void finishLifetime(const ir::OperandIndex &ind);
-
- std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
-
- ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
- ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
- ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
-
- std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void);
-
- void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
-
- void tryDeallocConstants(void);
-
-private:
- std::unique_ptr<T_AclMemoryManager> _const_mgr;
- std::unique_ptr<T_AclMemoryManager> _nonconst_mgr;
- std::unique_ptr<IInternalBufferManager> _inter_mgr;
- ir::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#include <cassert>
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::AclTensorManager(
- T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
- IInternalBufferManager *inter_mgr)
- : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}, _inter_mgr{inter_mgr}
-{
- // DO NOTHING
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateConsts(void)
-{
- _const_mgr->allocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateNonconsts(void)
-{
- _nonconst_mgr->allocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateConsts(void)
-{
- _const_mgr->deallocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateNonconsts(void)
-{
- _nonconst_mgr->deallocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateInternalBufferManager(void)
-{
- _inter_mgr->allocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateInternalBufferManager(void)
-{
- _inter_mgr->deallocate();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildTensor(
- const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, bool as_const,
- size_t num_uses)
-{
- assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
- if (as_const)
- {
- _const_mgr->buildTensor(ind, info, rank, num_uses);
- _ind_to_mgr.insert({ind, *_const_mgr});
- }
- else
- {
- _nonconst_mgr->buildTensor(ind, info, rank, num_uses);
- _ind_to_mgr.insert({ind, *_nonconst_mgr});
- }
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildSubtensor(
- const ir::OperandIndex &parent, const ir::OperandIndex &child,
- const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates,
- size_t rank, bool extent_parent)
-{
- assert(_ind_to_mgr.find(child) == _ind_to_mgr.end());
- std::shared_ptr<T_ITensor> parent_tensor = findTensorAsParent(parent);
- assert(parent_tensor);
- _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates, rank, extent_parent);
- _ind_to_mgr.insert({child, *_nonconst_mgr});
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-std::shared_ptr<T_ITensor>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::findTensorAsParent(const ir::OperandIndex &ind)
-{
-
- auto &tensors = _nonconst_mgr->tensors();
- auto &subtensors = _nonconst_mgr->subtensors();
- if (tensors.find(ind) != tensors.end())
- {
- // Parent is allocated as tensor
- return tensors[ind];
- }
- else if (subtensors.find(ind) != subtensors.end())
- {
- // Parent is allocated as subtensor
- return subtensors[ind];
- }
- else
- {
- return nullptr;
- }
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::startLifetime(const ir::OperandIndex &ind)
-{
- assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
- _ind_to_mgr.at(ind).startLifetime(ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::finishLifetime(const ir::OperandIndex &ind)
-{
- assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
- _ind_to_mgr.at(ind).finishLifetime(ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-std::shared_ptr<T_ITensor>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::at(const ir::OperandIndex &ind)
-{
- if (_ind_to_mgr.find(ind) == _ind_to_mgr.end())
- return nullptr;
-
- auto &tensors = _ind_to_mgr.at(ind).tensors();
- if (tensors.find(ind) != tensors.end())
- {
- return tensors.at(ind);
- }
- else
- {
- return _ind_to_mgr.at(ind).subtensors().at(ind);
- }
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::constTensors(void)
-{
- return _const_mgr->tensors();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstTensors(void)
-{
- return _nonconst_mgr->tensors();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstSubtensors(void)
-{
- return _nonconst_mgr->subtensors();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-std::shared_ptr<::arm_compute::IMemoryManager>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::internal_buffer_manager(void)
-{
- return _inter_mgr->internal_buffer_manager();
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::iterate(
- const std::function<void(const ir::OperandIndex &)> &fn)
-{
- for (auto it : _nonconst_mgr->tensors())
- fn(it.first);
-
- for (auto it : _nonconst_mgr->subtensors())
- fn(it.first);
-
- for (auto it : _const_mgr->tensors())
- fn(it.first);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::tryDeallocConstants(void)
-{
- auto &tensors = _const_mgr->tensors();
-
- for (auto it = tensors.begin(); it != tensors.end();)
- {
- const auto &ind = it->first;
- auto tensor = it->second;
- // NOTE The condition "tensor->num_uses() < 2" is used to prevent deallocating a constant tensor
- // used in several nodes.
- if (tensor->handle() && !tensor->handle()->is_used() && tensor->num_uses() < 2)
- {
- VERBOSE(AclTensorManager) << "Tensor #" << ind.value()
- << " will be deallocated as an unused constant tensor" << std::endl;
- tensor->allocator()->free();
- tensor.reset();
- it = tensors.erase(it);
- }
- else
- {
- ++it;
- }
- }
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
diff --git a/runtime/onert/backend/acl_common/AclTensorRegistry.h b/runtime/onert/backend/acl_common/AclTensorRegistry.h
deleted file mode 100644
index 02d66db99..000000000
--- a/runtime/onert/backend/acl_common/AclTensorRegistry.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_ACL_TENSOR_REGISTRY_H__
-#define __ONERT_BACKEND_ACL_COMMON_ACL_TENSOR_REGISTRY_H__
-
-#include "backend/ITensorRegistry.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-/**
- * @brief Tensor registry class for acl backends
- *
- * This is implemented as a wrapper of AclTensorManager.
- */
-template <typename T_AclTensorManager> class AclTensorRegistry : public ITensorRegistry
-{
-public:
- AclTensorRegistry(T_AclTensorManager *tensor_mgr) : _tensor_mgr{tensor_mgr} {}
-
- ITensor *getITensor(const ir::OperandIndex &ind) override { return _tensor_mgr->at(ind).get(); }
-
- ITensor *getNativeITensor(const ir::OperandIndex &ind) override { return getITensor(ind); }
-
- auto getAclTensor(const ir::OperandIndex &ind) { return _tensor_mgr->at(ind).get(); }
-
-private:
- T_AclTensorManager *_tensor_mgr;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_ACL_TENSOR_REGISTRY_H__
diff --git a/runtime/onert/backend/acl_common/CMakeLists.txt b/runtime/onert/backend/acl_common/CMakeLists.txt
deleted file mode 100644
index d3ae5acf7..000000000
--- a/runtime/onert/backend/acl_common/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Unsupported architecture
-nnfw_find_package(ARMCompute QUIET)
-if(NOT ARMCompute_FOUND)
- return()
-endif(NOT ARMCompute_FOUND)
-
-file(GLOB SOURCES "*.cc")
-
-add_library(${LIB_ONERT_BACKEND_ACL_COMMON} STATIC ${SOURCES})
-
-target_include_directories(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC onert_core)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC arm_compute arm_compute_ex)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC nnfw_lib_misc)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PRIVATE nnfw_common)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PRIVATE nnfw_coverage)
-
-set_target_properties(${LIB_ONERT_BACKEND_ACL_COMMON} PROPERTIES POSITION_INDEPENDENT_CODE ON)
-set_target_properties(${LIB_ONERT_BACKEND_ACL_COMMON} PROPERTIES OUTPUT_NAME backend_acl_common)
diff --git a/runtime/onert/backend/acl_common/Convert.cc b/runtime/onert/backend/acl_common/Convert.cc
deleted file mode 100644
index 6ef6a2dc3..000000000
--- a/runtime/onert/backend/acl_common/Convert.cc
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Convert.h"
-
-#include "Swizzle.h"
-#include "ir/DataType.h"
-#include "ir/operation/ElementwiseActivation.h"
-#include <memory>
-
-namespace
-{
-
-::arm_compute::DataLayout asDataLayout(onert::ir::Layout layout)
-{
- switch (layout)
- {
- case onert::ir::Layout::NHWC:
- return ::arm_compute::DataLayout::NHWC;
- case onert::ir::Layout::NCHW:
- return ::arm_compute::DataLayout::NCHW;
- default:
- return ::arm_compute::DataLayout::UNKNOWN;
- }
-}
-
-} // namespace
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
- ir::Layout backend_layout, bool apply_dim_correction)
-{
- // If shape's rank is 0, the tensor is a scalar
- // Sometimes, some ACL kernel can use a scalar as tensor. But ACL does not allocate buffer for
- // tensor having rank as 0.
- const auto tensor_shape = shape.rank() == 0 ? ir::Shape{1} : shape;
-
- const uint32_t rank = tensor_shape.rank();
-
- ::arm_compute::TensorShape res{};
-
- res.set_num_dimensions(rank);
-
- for (uint32_t axis = 0; axis < rank; ++axis)
- {
- // NOTE In some cases, in incorrect dimensions is required.
- // For example, intput_size is 1 in LSTM. The input-to-input weights([num_units, input_size]) of
- // LSTM is used as the weight of the FullyConnected.
- // The FullyConnected's weight must be greater or equal than 2-dimensions.
- // However, if the dimension correction is applied to input_to_input_weights with input_size
- // equal to 1, it will be changed to 1-D.
- // So input_to_input_weights is not used by the weight of FullyConnected.
- res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(),
- tensor_shape.dim(axis), apply_dim_correction);
- }
-
- return res;
-}
-
-::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord,
- ir::Layout frontend_layout, ir::Layout backend_layout)
-{
- const uint32_t rank = coord.size();
-
- ::arm_compute::Coordinates res{};
-
- res.set_num_dimensions(rank);
-
- for (uint32_t axis = 0; axis < rank; ++axis)
- {
- res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), coord[axis]);
- }
-
- return res;
-}
-
-::arm_compute::DataType asDataType(const ir::DataType type)
-{
- switch (type)
- {
- case ir::DataType::FLOAT32:
- return ::arm_compute::DataType::F32;
- case ir::DataType::INT32:
- return ::arm_compute::DataType::S32;
- case ir::DataType::UINT32:
- return ::arm_compute::DataType::U32;
- case ir::DataType::QUANT_UINT8_ASYMM:
- return ::arm_compute::DataType::QASYMM8;
- case ir::DataType::BOOL8:
- case ir::DataType::UINT8:
- return ::arm_compute::DataType::U8;
- case ir::DataType::QUANT_INT8_SYMM:
- return ::arm_compute::DataType::S8;
- case ir::DataType::FLOAT16:
- return ::arm_compute::DataType::F16;
- case ir::DataType::INT64:
- return ::arm_compute::DataType::S64;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
-{
- return ::arm_compute::QuantizationInfo(scale, offset);
-}
-
-::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
- ir::Layout frontend_layout, ir::Layout backend_layout,
- bool apply_dim_correction)
-{
- ::arm_compute::TensorInfo info(
- asTensorShape(shape, frontend_layout, backend_layout, apply_dim_correction), 1,
- asDataType(typeInfo.type()), asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
- info.set_data_layout(asDataLayout(backend_layout));
- return info;
-}
-
-::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
- const ir::Stride &stride)
-{
- return ::arm_compute::PadStrideInfo{stride.horizontal,
- stride.vertical,
- padding.left,
- padding.right,
- padding.top,
- padding.bottom,
- ::arm_compute::DimensionRoundingType::FLOOR};
-}
-
-::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
-{
- switch (act_code)
- {
- case ir::Activation::NONE:
- return ::arm_compute::ActivationLayerInfo{};
- case ir::Activation::RELU:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
- case ir::Activation::RELU1:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
- case ir::Activation::RELU6:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
- // Cases for activation of LSTM.
- case ir::Activation::TANH:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
- case ir::Activation::SIGMOID:
- // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
- // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
- // 0(always sigmoid) regardless of values of the parameter.
- // If ACL support non-sigmoid logistic, should fix param values.
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
- default:
- throw std::runtime_error{"Not supported, yet"};
- break;
- }
-}
-
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ir::operation::ElementwiseActivation::Type op_type, float alpha,
- float beta)
-{
- switch (op_type)
- {
- case ir::operation::ElementwiseActivation::Type::RELU:
- if (beta == 0.f)
- {
- if (alpha == ir::operation::ElementwiseActivation::infinity)
- {
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
- }
- else
- {
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
- }
- }
- else
- {
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
- }
- case ir::operation::ElementwiseActivation::Type::TANH:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
- case ir::operation::ElementwiseActivation::Type::LOGISTIC:
- // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
- // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
- // 0(always sigmoid) regardless of values of the parameter.
- // If ACL support non-sigmoid logistic, should fix param values.
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
- case ir::operation::ElementwiseActivation::Type::LEAKY_RELU:
- return ::arm_compute::ActivationLayerInfo{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
- default:
- throw std::runtime_error{"Not supported, yet"};
- break;
- }
-}
-
-arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank,
- ir::Layout frontend_layout, ir::Layout backend_layout)
-{
- std::set<uint32_t> axes = asSet(operand, rank, frontend_layout, backend_layout);
-
- arm_compute::Coordinates reduce_axes;
- for (const int32_t axis : axes)
- {
- reduce_axes.set(reduce_axes.num_dimensions(), axis);
- }
-
- return reduce_axes;
-}
-
-std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank, ir::Layout frontend_layout,
- ir::Layout backend_layout)
-{
- std::set<std::uint32_t> axes;
-
- for (size_t i = 0; i < operand.shape().num_elements(); ++i)
- {
- int32_t axis = 0;
- switch (operand.typeInfo().type())
- {
- case ir::DataType::INT32:
- axis = reinterpret_cast<const int32_t *>(operand.data()->base())[i];
- break;
- case ir::DataType::INT64:
- axis = reinterpret_cast<const int64_t *>(operand.data()->base())[i];
- break;
- default:
- throw std::runtime_error("acl_common::asSet: Not supported data type");
- }
- if (axis < 0)
- axis += rank;
- axes.insert(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value());
- }
-
- return axes;
-}
-
-std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
-{
- return std::make_unique<AclFunction>(std::move(layer));
-}
-
-ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout)
-{
- switch (data_layout)
- {
- case ::arm_compute::DataLayout::NHWC:
- return ir::Layout::NHWC;
- case ::arm_compute::DataLayout::NCHW:
- return ir::Layout::NCHW;
- default:
- return ir::Layout::UNKNOWN;
- }
-}
-
-ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
-{
- switch (data_type)
- {
- case ::arm_compute::DataType::F32:
- return ir::DataType::FLOAT32;
- case ::arm_compute::DataType::S32:
- return ir::DataType::INT32;
- case ::arm_compute::DataType::U32:
- return ir::DataType::UINT32;
- case ::arm_compute::DataType::QASYMM8:
- return ir::DataType::QUANT_UINT8_ASYMM;
- case ::arm_compute::DataType::U8:
- return ir::DataType::UINT8;
- case ::arm_compute::DataType::QSYMM8:
- return ir::DataType::QUANT_INT8_SYMM;
- case ::arm_compute::DataType::F16:
- return ir::DataType::FLOAT16;
- case ::arm_compute::DataType::S64:
- return ir::DataType::INT64;
- default:
- throw std::runtime_error{"Not supported, yet"};
- break;
- }
-}
-
-arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
-{
- switch (pool_type_ir)
- {
- case ir::operation::Pool2D::PoolType::AVG:
- return arm_compute::PoolingType::AVG;
- case ir::operation::Pool2D::PoolType::L2:
- return arm_compute::PoolingType::L2;
- case ir::operation::Pool2D::PoolType::MAX:
- return arm_compute::PoolingType::MAX;
- default:
- throw std::runtime_error("convertPoolType: Not supported operation yet");
- }
-}
-
-arm_compute::ReduceOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
-{
- switch (reduce_type_ir)
- {
- case ir::operation::Reduce::ReduceType::MAX:
- return arm_compute::ReduceOperation::MAX;
- case ir::operation::Reduce::ReduceType::MIN:
- return arm_compute::ReduceOperation::MIN;
- case ir::operation::Reduce::ReduceType::SUM:
- return arm_compute::ReduceOperation::SUM;
- default:
- throw std::runtime_error("convertReduceType: Not supported operation yet");
- }
-}
-
-arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
-{
- assert(operand.isConstant());
- assert(operand.shape().num_elements() == 1);
- switch (operand.typeInfo().type())
- {
- case ir::DataType::INT32:
- return arm_compute::PixelValue(operand.asScalar<int32_t>());
- case ir::DataType::INT64:
- return arm_compute::PixelValue(operand.asScalar<int64_t>());
- case ir::DataType::UINT32:
- return arm_compute::PixelValue(operand.asScalar<uint64_t>());
- case ir::DataType::UINT8:
- return arm_compute::PixelValue(operand.asScalar<uint8_t>());
- case ir::DataType::FLOAT32:
- return arm_compute::PixelValue(operand.asScalar<float>());
- default:
- throw std::runtime_error("asPixelValue : Not supported datatype yet");
- }
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_common/Convert.h b/runtime/onert/backend/acl_common/Convert.h
deleted file mode 100644
index 0b36df102..000000000
--- a/runtime/onert/backend/acl_common/Convert.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_CONVERT_H__
-#define __ONERT_BACKEND_ACL_COMMON_CONVERT_H__
-
-#include <arm_compute/core/PixelValue.h>
-#include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/core/SubTensorInfo.h>
-#include <arm_compute/core/TensorShape.h>
-#include <arm_compute/core/TypesEx.h>
-
-#include "ir/Layout.h"
-#include "ir/InternalType.h"
-#include "ir/Operand.h"
-#include "ir/operation/Pool2D.h"
-#include "ir/operation/Reduce.h"
-#include "ir/operation/ElementwiseActivation.h"
-#include "ir/Shape.h"
-#include "ir/TypeInfo.h"
-#include "ir/Coordinates.h"
-#include "ir/Padding.h"
-#include "misc/feature/Shape.h"
-#include "misc/kernel/Shape.h"
-
-#include "AclFunction.h"
-
-#include <set>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
- ir::Layout backend_layout,
- bool apply_dim_correction = true);
-::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord,
- ir::Layout frontend_layout,
- ir::Layout backend_layout);
-::arm_compute::DataType asDataType(ir::DataType type);
-::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
- ir::Layout frontend_layout, ir::Layout backend_layout,
- bool apply_dim_correction = true);
-
-::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
- const ir::Stride &stride);
-
-::arm_compute::ActivationLayerInfo asActivationLayerInfo(ir::Activation act_code);
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ir::operation::ElementwiseActivation::Type op_type, float alpha,
- float beta);
-
-arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank,
- ir::Layout frontend_layout, ir::Layout backend_layout);
-
-std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank, ir::Layout frontend_layout,
- ir::Layout backend_layout);
-
-std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer);
-
-template <typename T_Function>
-std::unique_ptr<T_Function> asFunction(std::unique_ptr<::arm_compute::IFunction> &&fn)
-{
- return std::make_unique<T_Function>(std::move(fn));
-}
-
-ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout);
-ir::DataType asRuntimeDataType(::arm_compute::DataType data_type);
-
-arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir);
-arm_compute::ReduceOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir);
-
-arm_compute::PixelValue asPixelValue(const ir::Operand &operand);
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_CONVERT_H__
diff --git a/runtime/onert/backend/acl_common/IACLTensor.cc b/runtime/onert/backend/acl_common/IACLTensor.cc
deleted file mode 100644
index 70988bd11..000000000
--- a/runtime/onert/backend/acl_common/IACLTensor.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "IACLTensor.h"
-#include "Convert.h"
-#include "Swizzle.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-size_t IACLTensor::dimension(size_t index) const
-{
- // Assume that the front is higher dimensional.
- // i.g. N: 0, C: 1, H: 2, W: 3 for NCHW layout
- // NOTE This tensor must not be applied dim correction
- auto rank = num_dimensions();
- rank = rank == 0 ? 1 : rank;
- assert(rank > index);
- const ARMComputeAxis reversed{(static_cast<uint32_t>(rank - index) - 1)};
- return info()->dimension(reversed.value());
-}
-
-size_t IACLTensor::calcOffset(const ir::Coordinates &coords) const
-{
- auto rank = num_dimensions();
- rank = rank == 0 ? 1 : rank;
- assert(rank == coords.size());
-
- ::arm_compute::Coordinates acl_coords;
- for (uint32_t i = 0; i < rank; ++i)
- {
- const ARMComputeAxis reversed{static_cast<uint32_t>((rank - i) - 1)};
- acl_coords.set(reversed.value(), coords[i]);
- }
-
- return info()->offset_element_in_bytes(acl_coords);
-}
-
-ir::Layout IACLTensor::layout() const { return acl_common::asRuntimeLayout(info()->data_layout()); }
-
-ir::DataType IACLTensor::data_type() const
-{
- return acl_common::asRuntimeDataType(info()->data_type());
-}
-
-float IACLTensor::data_scale() const
-{
- // FIXME What if quantization info is non-uniform?
- return info()->quantization_info().uniform().scale;
-}
-
-int32_t IACLTensor::data_offset() const
-{
- // FIXME What if quantization info is non-uniform?
- return info()->quantization_info().uniform().offset;
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_common/IACLTensor.h b/runtime/onert/backend/acl_common/IACLTensor.h
deleted file mode 100644
index 3d1268940..000000000
--- a/runtime/onert/backend/acl_common/IACLTensor.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_I_ACL_TENSOR_H__
-#define __ONERT_BACKEND_ACL_COMMON_I_ACL_TENSOR_H__
-
-#include <backend/ITensor.h>
-#include <arm_compute/core/ITensor.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-/**
- * @brief Class representing Tensor for ACL
- * @todo Override is_dynamic() method. We don't support dynamic tensor for ACL yet as of Apr, 2020.
- * FYI, ACL ITensorInfo has is_dynamic() method, which seems currently not used.
- * Maybe for ACL, this method can be implemented using ITensorInfo::is_dynamic() in future.
- */
-class IACLTensor : public ITensor
-{
-public:
- IACLTensor() = default;
- IACLTensor(const IACLTensor &) = delete;
- IACLTensor &operator=(const IACLTensor &) = delete;
- IACLTensor(IACLTensor &&) = default;
- IACLTensor &operator=(IACLTensor &&) = default;
-
-public:
- uint8_t *buffer() const final { return handle()->buffer(); }
- size_t total_size() const final { return info()->total_size(); }
- size_t dimension(size_t index) const final;
- size_t calcOffset(const ir::Coordinates &coords) const final;
- ir::Layout layout() const final;
- ir::DataType data_type() const final;
- float data_scale() const override;
- int32_t data_offset() const override;
- bool has_padding() const override { return info()->has_padding(); }
- bool is_dynamic() const override { return false; }
-
-public:
- virtual const arm_compute::ITensor *handle() const = 0;
- virtual arm_compute::ITensor *handle() = 0;
-
- const arm_compute::ITensorInfo *info() const { return handle()->info(); }
- arm_compute::ITensorInfo *info() { return handle()->info(); }
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif //__ONERT_BACKEND_ACL_COMMON_I_ACL_TENSOR_H__
diff --git a/runtime/onert/backend/acl_common/ParentInfo.h b/runtime/onert/backend/acl_common/ParentInfo.h
deleted file mode 100644
index 708436327..000000000
--- a/runtime/onert/backend/acl_common/ParentInfo.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_PARENT_INFO_H__
-#define __ONERT_BACKEND_ACL_COMMON_PARENT_INFO_H__
-
-#include <ir/Index.h>
-#include <ir/Coordinates.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-/**
- * @brief Struct to represent parent operand in child operand
- */
-struct ParentInfo
-{
- ir::OperandIndex parent;
- ir::Layout frontend_layout;
- ir::Coordinates coordinates;
-};
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_PARENT_INFO_H__
diff --git a/runtime/onert/backend/acl_common/Swizzle.h b/runtime/onert/backend/acl_common/Swizzle.h
deleted file mode 100644
index e1c7f8041..000000000
--- a/runtime/onert/backend/acl_common/Swizzle.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_COMMON_SWIZZLE_H__
-#define __ONERT_BACKEND_ACL_COMMON_SWIZZLE_H__
-
-#include <cassert>
-#include <ir/Layout.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_common
-{
-
-class ARMComputeAxis
-{
-public:
- ARMComputeAxis() = default;
-
-public:
- explicit ARMComputeAxis(uint32_t value) : _value{value}
- {
- // DO NOTHING
- }
-
-public:
- uint32_t value(void) const { return _value; }
-
-private:
- uint32_t _value;
-};
-
-// Convert axis in acl order
-inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis,
- const ir::Layout org_layout = ir::Layout::UNKNOWN,
- const ir::Layout acl_layout = ir::Layout::UNKNOWN)
-{
- assert(rank > axis);
-
- const ARMComputeAxis reversed{(rank - axis) - 1};
-
- if (rank >= 4 && org_layout == ir::Layout::NHWC && acl_layout == ir::Layout::NCHW)
- {
- // NHWC -> WHCN
- // DEPTH
- if (0 == reversed.value())
- {
- return ARMComputeAxis{2};
- }
- // WIDTH
- if (1 == reversed.value())
- {
- return ARMComputeAxis{0};
- }
- // HEIGHT
- if (2 == reversed.value())
- {
- return ARMComputeAxis{1};
- }
- }
- if (rank >= 4 && org_layout == ir::Layout::NCHW && acl_layout == ir::Layout::NHWC)
- {
- // NCHW -> CWHN
- // WIDTH
- if (0 == reversed.value())
- {
- return ARMComputeAxis{1};
- }
- // HEIGHT
- if (1 == reversed.value())
- {
- return ARMComputeAxis{2};
- }
- // DEPTH
- if (2 == reversed.value())
- {
- return ARMComputeAxis{0};
- }
- }
-
- return reversed;
-}
-
-inline ::arm_compute::Coordinates
-getARMComputeAxises(uint32_t rank, const ir::Layout org_layout = ir::Layout::UNKNOWN,
- const ir::Layout acl_layout = ir::Layout::UNKNOWN)
-{
- ::arm_compute::Coordinates res{};
-
- res.set_num_dimensions(rank);
-
- for (uint32_t axis = 0; axis < rank; ++axis)
- {
- res.set(axis, ToARMComputeAxis(rank, axis, org_layout, acl_layout).value());
- }
-
- return res;
-}
-
-// Restructure runtime_permutationVector to ACL_permutationVector
-inline ::arm_compute::PermutationVector
-getARMComputePermutationVector(uint32_t rank, const std::vector<int32_t> runtime_pv,
- const ir::Layout org_layout = ir::Layout::UNKNOWN,
- const ir::Layout acl_layout = ir::Layout::UNKNOWN)
-{
- // rank upto 4 is supported
- assert(rank <= 4);
- assert(runtime_pv.size() > 0);
-
- int new_pv[4] = {0};
- ::arm_compute::Coordinates axises = getARMComputeAxises(rank, org_layout, acl_layout);
-
- for (uint32_t i = 0; i < rank; ++i)
- {
- new_pv[axises[i]] = ToARMComputeAxis(rank, runtime_pv[i], org_layout, acl_layout).value();
- }
-
- ::arm_compute::PermutationVector ACL_PV =
- ::arm_compute::PermutationVector{new_pv[0], new_pv[1], new_pv[2], new_pv[3]};
- ACL_PV.set_num_dimensions(rank);
-
- return ACL_PV;
-}
-
-template <typename T>
-inline T ReorderBits(T in, size_t numOfBits, const ir::Layout org_layout = ir::Layout::UNKNOWN,
- const ir::Layout acl_layout = ir::Layout::UNKNOWN)
-{
- assert(numOfBits > 0);
- T out = 0;
- for (int32_t i = numOfBits - 1; i >= 0; --i)
- {
- const uint32_t toShift =
- numOfBits - ToARMComputeAxis(numOfBits, i, org_layout, acl_layout).value() - 1;
- out += ((in & 1) << toShift);
- in >>= 1;
- }
- return out;
-}
-
-} // namespace acl_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_COMMON_SWIZZLE_H__
diff --git a/runtime/onert/backend/acl_neon/Backend.h b/runtime/onert/backend/acl_neon/Backend.h
deleted file mode 100644
index 35d6e4e8e..000000000
--- a/runtime/onert/backend/acl_neon/Backend.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_BACKEND_H__
-#define __ONERT_BACKEND_ACL_NEON_BACKEND_H__
-
-#include <memory>
-#include <backend/Backend.h>
-#include <ir/Operands.h>
-
-#include "Config.h"
-#include "ConstantInitializer.h"
-#include "KernelGenerator.h"
-#include "TensorManager.h"
-#include "Optimizer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class Backend : public ::onert::backend::Backend
-{
-public:
- Backend() : _config{std::make_shared<Config>()} {}
-
- std::shared_ptr<IConfig> config() const override { return _config; }
-
- std::unique_ptr<BackendContext> newContext(const ir::Graph &graph,
- const std::shared_ptr<custom::IKernelBuilder> &,
- bool is_linear_executor) const override
- {
- const auto &operands = graph.operands();
- const auto &operations = graph.operations();
- auto context = std::make_unique<BackendContext>(this, &graph);
- auto tm = createTensorManager(is_linear_executor);
- auto tr = std::make_shared<acl_common::AclTensorRegistry<TensorManager>>(tm);
- auto tb = std::make_shared<TensorBuilder>(operands, tm, tr);
- context->tensor_registry = tr;
- context->tensor_builder = tb;
- context->constant_initializer = std::make_shared<ConstantInitializer>(operands, tr);
- context->kernel_gen = std::make_shared<KernelGenerator>(operands, operations, tb, tr);
- context->tensor_register = nullptr;
- context->optimizer = std::make_shared<Optimizer>(context.get());
- return context;
- }
-
-private:
- std::shared_ptr<IConfig> _config;
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_BACKEND_H__
diff --git a/runtime/onert/backend/acl_neon/CMakeLists.txt b/runtime/onert/backend/acl_neon/CMakeLists.txt
deleted file mode 100644
index 03d4946e0..000000000
--- a/runtime/onert/backend/acl_neon/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Unsupported architecture
-nnfw_find_package(ARMCompute QUIET)
-if(NOT ARMCompute_FOUND)
- return()
-endif(NOT ARMCompute_FOUND)
-
-set(LIB_ONERT_BACKEND_ACL_NEON onert_backend_acl_neon)
-
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_library(${LIB_ONERT_BACKEND_ACL_NEON} SHARED ${SOURCES})
-
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE ${LIB_ONERT_BACKEND_ACL_COMMON})
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE nnfw_common)
-target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE nnfw_coverage)
-
-set_target_properties(${LIB_ONERT_BACKEND_ACL_NEON} PROPERTIES OUTPUT_NAME backend_acl_neon)
-
-install(TARGETS ${LIB_ONERT_BACKEND_ACL_NEON} DESTINATION lib)
diff --git a/runtime/onert/backend/acl_neon/Config.cc b/runtime/onert/backend/acl_neon/Config.cc
deleted file mode 100644
index 4e78efd2d..000000000
--- a/runtime/onert/backend/acl_neon/Config.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Config.h"
-
-#include <util/ConfigSource.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-bool Config::initialize() { return true; }
-
-ir::Layout Config::supportLayout(const ir::Operation &, ir::Layout frontend_layout)
-{
- const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT);
- if (acl_layout_str == "NHWC")
- {
- return ir::Layout::NHWC;
- }
- else if (acl_layout_str == "NCHW")
- {
- return ir::Layout::NCHW;
- }
-
- return frontend_layout;
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/Config.h b/runtime/onert/backend/acl_neon/Config.h
deleted file mode 100644
index 089d9479a..000000000
--- a/runtime/onert/backend/acl_neon/Config.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_CONFIG_H__
-#define __ONERT_BACKEND_ACL_NEON_CONFIG_H__
-
-#include <backend/IConfig.h>
-#include <memory>
-#include <util/ITimer.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class Config : public IConfig
-{
-public:
- std::string id() override { return "acl_neon"; }
- bool initialize() override;
- ir::Layout supportLayout(const ir::Operation &node, ir::Layout frontend_layout) override;
- bool supportPermutation() override { return true; }
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return false; }
-
- std::unique_ptr<util::ITimer> timer() override { return std::make_unique<util::CPUTimer>(); }
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_CONFIG_H__
diff --git a/runtime/onert/backend/acl_neon/ConstantInitializer.cc b/runtime/onert/backend/acl_neon/ConstantInitializer.cc
deleted file mode 100644
index 79edb9ded..000000000
--- a/runtime/onert/backend/acl_neon/ConstantInitializer.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg)
- : acl_common::AclConstantInitializer{operands, tensor_reg}
-{
- // DO NOTHING
-}
-
-void ConstantInitializer::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto &block_size_index = node.getInputs().at(ir::operation::SpaceToBatchND::BLOCK_SIZE);
- const auto &block_size_obj = _operands.at(block_size_index);
-
- if (block_size_obj.isConstant())
- {
- _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::ITensor &obj) {
- assert(model_obj.data());
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data()->base());
- assert(model_obj.shape().rank() == 1);
- obj.access([&](ITensor &tensor) {
- for (size_t i = 0; i < shape.num_elements(); ++i)
- {
- const int32_t value = base[shape.num_elements() - i - 1];
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer() +
- tensor.calcOffset({static_cast<int32_t>(i)}));
- *into = value;
- }
- });
- };
- }
-
- const auto &paddings_index = node.getInputs().at(ir::operation::SpaceToBatchND::PADDINGS);
- const auto &paddings_obj = _operands.at(paddings_index);
- if (paddings_obj.isConstant())
- {
- _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::ITensor &obj) {
- assert(model_obj.data());
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data()->base());
- assert(model_obj.shape().rank() == 2);
- assert(shape.dim(0) == 2);
- assert(shape.dim(1) == 2);
- obj.access([&](ITensor &tensor) {
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- for (auto j = 0; j < shape.dim(1); ++j)
- {
- const int32_t value = base[i * 2 + j];
- int32_t *into = reinterpret_cast<int32_t *>(
- // The coordinates of NETensor are different from the coordiantes of CLTensor in
- // this operand.
- // NEON : {j, reversed i}
- // CL : {reversed i, j}
- tensor.buffer() + tensor.calcOffset({j, shape.dim(0) - i - 1}));
- *into = value;
- }
- }
- });
- };
- }
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/ConstantInitializer.h b/runtime/onert/backend/acl_neon/ConstantInitializer.h
deleted file mode 100644
index c7d71cdcf..000000000
--- a/runtime/onert/backend/acl_neon/ConstantInitializer.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__
-#define __ONERT_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__
-
-#include "AclConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class ConstantInitializer : public acl_common::AclConstantInitializer
-{
-public:
- ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg);
-
-public:
- using acl_common::AclConstantInitializer::visit;
- void visit(const ir::operation::SpaceToBatchND &node) final;
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/backend/acl_neon/KernelGenerator.cc b/runtime/onert/backend/acl_neon/KernelGenerator.cc
deleted file mode 100644
index 598d043e7..000000000
--- a/runtime/onert/backend/acl_neon/KernelGenerator.cc
+++ /dev/null
@@ -1,1437 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "KernelGenerator.h"
-
-#include <arm_compute/runtime/NEON/NEFunctions.h> // Include all ARM Compute NEON functions
-#include <arm_compute/runtime/NEON/NEFunctionsEx.h> // Include all ARM Compute EX NEON functions
-
-#include <AclActivationBuilder.h>
-#include <AclFunction.h>
-#include <Convert.h>
-#include <Swizzle.h>
-
-#include "ir/Index.h"
-#include "ir/DataType.h"
-#include "ir/InternalType.h"
-#include "exec/NopFunction.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-#include "AclKernelGen.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-using ::onert::backend::acl_common::asAclFunction;
-using ActivationBuilder = ::onert::backend::acl_common::AclActivationBuilder<
- ::arm_compute::ITensor, ::arm_compute::NEActivationLayer, acl_common::AclFunction>;
-
-KernelGenerator::KernelGenerator(
- const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> &tensor_reg)
- : _ctx(operands_ctx), _operations_ctx(operations_ctx), _tensor_builder(tensor_builder),
- _tensor_reg(tensor_reg), _current_op_seq_layout(ir::Layout::UNKNOWN)
-{
- // DO NOTHING
-}
-
-void KernelGenerator::visit(const ir::OpSequence &op_seq)
-{
- // TODO Move this to IKernelGenerator
- // (all derivatives have the same implementation for this)
- assert(!_return_fn_seq);
- _return_fn_seq = std::make_unique<exec::FunctionSequence>();
- _return_fn_seq->enableDynamicShapeInferer(false);
-
- _current_op_seq_layout = op_seq.getLayout();
- for (const auto &operation_idx : op_seq.operations())
- {
- const auto &node = _operations_ctx.at(operation_idx);
- node.accept(*this);
- _return_fn_seq->append(releaseFunction());
- }
-}
-
-void KernelGenerator::visit(const ir::operation::ArgMax &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ArgMax::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::ArgMax::Input::AXIS)};
-
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto frontend_layout = _current_op_seq_layout;
- auto backend_layout = ifm_tensor->layout();
-
- int axis_value = _ctx.at(axis_index).asScalar<int32_t>();
- if (axis_value < 0)
- {
- axis_value += ifm_rank;
- }
- assert(axis_value >= 0 && axis_value < ifm_rank);
- const auto fixed_axis =
- acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout).value();
-
- auto fn = acl_common::generateLayer<arm_compute::NEArgMinMaxLayer>(
- ifm_tensor->handle(), fixed_axis, ofm_tensor->handle(),
- arm_compute::ReductionOperation::ARG_IDX_MAX);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::BatchToSpaceND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto block_size_tensor = _tensor_reg->getAclTensor(block_size_index);
-
- assert(_ctx.at(block_size_index).data());
-
- auto fn = acl_common::generateLayer<arm_compute::NEBatchToSpaceLayer>(
- ifm_tensor->handle(), block_size_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::BinaryArithmetic &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::RHS)};
-
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().arithmetic_type)
- {
- case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
- {
- fn = acl_common::generateLayer<arm_compute::NEArithmeticAddition>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
- arm_compute::ConvertPolicy::SATURATE);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
- {
- fn = acl_common::generateLayer<arm_compute::NEArithmeticSubtraction>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
- arm_compute::ConvertPolicy::SATURATE);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
- {
- // RoundingPolicy for scale:1.0 is only allowed RoundingPolicy::TO_ZERO
- fn = acl_common::generateLayer<arm_compute::NEPixelWiseMultiplication>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), 1.0, // scale
- arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_ZERO);
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
- {
- fn = acl_common::generateLayer<arm_compute::NEElementwiseDivision>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle());
- break;
- }
- default:
- assert(false && "The BinaryArithmetic operation supports only binary arithmetic operations");
- break;
- }
- _return_fn = std::make_unique<exec::FunctionSequence>(
- asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::Conv2D &node)
-{
- using ir::operation::Conv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- const auto stride = node.param().stride;
- const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
- ker_width, ker_height);
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
-
- const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
- const auto act_info = acl_common::asActivationLayerInfo(activation);
-
- auto fn = acl_common::generateLayer<arm_compute::NEConvolutionLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), ifm_tensor->handle(),
- ker_tensor->handle(), bias_tensor->handle(), ofm_tensor->handle(), conv_info,
- ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::DepthToSpace &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::DepthToSpace::Input::INPUT)};
-
- auto block_size = node.param().block_size;
- assert(block_size > 0);
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEDepthToSpaceLayer>(
- input_tensor->handle(), output_tensor->handle(), block_size);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
-{
- using ir::operation::DepthwiseConv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [1, kernel_height, kernel_width, depth_out].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- const auto stride = node.param().stride;
- const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
- ker_width, ker_height);
- const auto multiplier = node.param().multiplier;
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
-
- const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
- const auto act_info = acl_common::asActivationLayerInfo(activation);
-
- {
- auto fn = acl_common::generateLayer<arm_compute::NEDepthwiseConvolutionLayer>(
- ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), ofm_tensor->handle(),
- conv_info, multiplier, act_info);
-
- _return_fn = asAclFunction(std::move(fn));
- }
-}
-
-void KernelGenerator::visit(const ir::operation::Concat &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- std::vector<ir::OperandIndex> input_indexes;
- for (const auto &input : node.getInputs())
- input_indexes.emplace_back(input);
-
- const auto axis = node.param().axis;
-
- // Concat elimination check
- bool eliminated = _tensor_builder->areSubTensorsOf(ofm_index, node.getInputs());
- if (eliminated)
- {
- // If concat eliminated, return a NOP IFunction
- VERBOSE(acl_neon_KernelGenerator_Concat) << "Concat eliminated" << std::endl;
- _return_fn = std::make_unique<exec::NopFunction>();
- return;
- }
-
- auto output_tensor = _tensor_reg->getAclTensor(ofm_index);
- std::vector<::arm_compute::ITensor *> input_tensors;
- for (const auto &ifm_ind : input_indexes)
- input_tensors.emplace_back(_tensor_reg->getAclTensor(ifm_ind)->handle());
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- if (input_indexes.size() < 2)
- {
- fn = acl_common::generateLayer<arm_compute::NECopy>(input_tensors.at(0),
- output_tensor->handle());
- }
- else
- {
- const auto rank = _ctx.at(ofm_index).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- const auto fixed_axis =
- acl_common::ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value();
- fn = acl_common::generateLayer<arm_compute::NEConcatenateLayer>(
- input_tensors, output_tensor->handle(), fixed_axis);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseActivation &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ElementwiseActivation::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const ::arm_compute::ActivationLayerInfo act_info = acl_common::asActivationLayerInfo(
- node.param().op_type, node.param().alpha, node.param().beta);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- if (node.param().op_type == ir::operation::ElementwiseActivation::Type::LOGISTIC)
- {
- // NOTE NEActivationLayer can generate produce erroneous results. it were caused by
- // 'vexpq_f32()'.
- // The neon function returns a value outside of the limit of representation in float as 'NaN'
- // instead of 'INF', and then the result of this op will be errors due to the 'NaN'.
- fn = acl_common::generateLayer<arm_compute::NEActivationLayerEx>(
- ifm_tensor->handle(), ofm_tensor->handle(), act_info);
- }
- else
- {
- fn = acl_common::generateLayer<arm_compute::NEActivationLayer>(ifm_tensor->handle(),
- ofm_tensor->handle(), act_info);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseBinary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().op_type)
- {
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND:
- {
- fn = acl_common::generateLayer<arm_compute::NELogicalAnd>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR:
- {
- fn = acl_common::generateLayer<arm_compute::NELogicalOr>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX:
- {
- fn = acl_common::generateLayer<arm_compute::NEElementwiseMax>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN:
- {
- fn = acl_common::generateLayer<arm_compute::NEElementwiseMin>(
- lhs_tensor->handle(), rhs_tensor->handle(), output_tensor->handle());
- break;
- }
- default:
- {
- std::string err_msg("acl_neon KernelGenerator : " + node.name() +
- "is not elementwise-binary operations");
- assert(false && err_msg.c_str());
- break;
- }
- }
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseUnary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- std::unique_ptr<arm_compute::IFunction> fn;
- switch (node.param().op_type)
- {
- case ir::operation::ElementwiseUnary::Type::ABS:
- {
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::ABS};
-
- fn = acl_common::generateLayer<arm_compute::NEActivationLayer>(
- input_tensor->handle(), output_tensor->handle(), act_info);
- break;
- }
- case ir::operation::ElementwiseUnary::Type::CAST:
- {
- if (input_tensor->data_type() == output_tensor->data_type())
- {
- fn = acl_common::generateLayer<arm_compute::NECopy>(input_tensor->handle(),
- output_tensor->handle());
- }
- else if (_ctx.at(input_index).typeInfo().type() == ir::DataType::BOOL8)
- {
- fn = acl_common::generateLayer<arm_compute::NECastBool>(input_tensor->handle(),
- output_tensor->handle());
- }
- else
- {
- fn = acl_common::generateLayer<arm_compute::NECast>(
- input_tensor->handle(), output_tensor->handle(), arm_compute::ConvertPolicy::SATURATE);
- }
- break;
- }
- case ir::operation::ElementwiseUnary::Type::DEQUANTIZE:
- {
- fn = acl_common::generateLayer<arm_compute::NEDequantizationLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::EXP:
- {
- fn = acl_common::generateLayer<arm_compute::NEExpLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::FLOOR:
- {
- fn = acl_common::generateLayer<arm_compute::NEFloor>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::LOGICAL_NOT:
- {
- fn = acl_common::generateLayer<arm_compute::NEBitwiseNot>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::NEG:
- {
- fn = acl_common::generateLayer<arm_compute::NENegLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::RSQRT:
- {
- fn = acl_common::generateLayer<arm_compute::NERsqrtLayer>(input_tensor->handle(),
- output_tensor->handle());
- break;
- }
- case ir::operation::ElementwiseUnary::Type::SQRT:
- {
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT};
-
- fn = acl_common::generateLayer<arm_compute::NEActivationLayer>(
- input_tensor->handle(), output_tensor->handle(), act_info);
- break;
- }
- default:
- {
- throw std::runtime_error("acl_neon KernelGenerator : " + node.name() +
- "is not supported yet");
- break;
- }
- }
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::EmbeddingLookup &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)};
- const auto values_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::VALUES)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
- auto values_tensor = _tensor_reg->getAclTensor(values_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEEmbeddingLookup>(
- values_tensor->handle(), output_tensor->handle(), lookups_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::FullyConnected &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- const auto activation = node.param().activation;
-
- auto fn = acl_common::kernelGenFullyConnected<acl_common::AclFunction, ::arm_compute::ITensor,
- ::arm_compute::NEFullyConnectedReshapingLayer>(
- node, _ctx, _tensor_builder, _tensor_reg, _current_op_seq_layout);
- _return_fn = std::make_unique<exec::FunctionSequence>(
- std::move(fn), ActivationBuilder::generate(activation, output_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::HashtableLookup &node)
-{
- const auto output_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::OUTPUT)};
- const auto hits_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::HITS)};
-
- const auto lookups_index{node.getInputs().at(ir::operation::HashtableLookup::Input::LOOKUPS)};
- const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)};
- const auto values_index{node.getInputs().at(ir::operation::HashtableLookup::Input::VALUES)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto hits_tensor = _tensor_reg->getAclTensor(hits_index);
-
- auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
- auto keys_tensor = _tensor_reg->getAclTensor(keys_index);
- auto values_tensor = _tensor_reg->getAclTensor(values_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEHashtableLookup>(
- lookups_tensor->handle(), keys_tensor->handle(), values_tensor->handle(),
- output_tensor->handle(), hits_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Gather &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- const auto ifm_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)};
-
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- const auto axis_raw = node.param().axis;
- const auto axis_value = (axis_raw < 0 ? (ifm_rank + axis_raw) : axis_raw);
- // Converting in reverse order
- const int axis = ::onert::backend::acl_common::ToARMComputeAxis(ifm_rank, axis_value).value();
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto indices_tensor = _tensor_reg->getAclTensor(indices_index);
- const auto backend_layout = ofm_tensor->layout();
- UNUSED_RELEASE(backend_layout);
-
- // NOTE The frontend layout and backend layout must be the same for this operation.
- // If not the same, we have to add a stage(?) to perform permutation of output tensor. It
- // is not not efficient even if it works well. If so, it would be better to set the
- // layout of these backend tensors to the same layout.
- // There is also one thing we have to think about. This operation depends on the layout of
- // a model. For example, if a model in NHWC has this operation as output rank == 4, indices
- // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W
- // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case.
- assert(backend_layout == ifm_tensor->layout());
- assert(backend_layout == indices_tensor->layout());
- assert(ifm_rank < 4 || _current_op_seq_layout == backend_layout);
-
- // input is n-D, indices k-D, output is (n + k - 1)-D
- size_t n = ifm_rank;
- assert(n == ifm_tensor->num_dimensions());
- size_t k = _ctx.at(indices_index).shape().rank();
- assert(k == indices_tensor->num_dimensions());
-
- // Disable applied dim_correction
- if (n != ifm_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and ifm tensor is applied dim_correction
- acl_common::disableDimCorrection(ifm_tensor);
- }
- if (k != indices_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and indices tensor is applied dim_correction
- acl_common::disableDimCorrection(indices_tensor);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::NEGatherEx>(
- ifm_tensor->handle(), indices_tensor->handle(), ofm_tensor->handle(), axis);
-
- // Revert disabling applied dim_correction
- if (ifm_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(ifm_tensor);
- }
- if (indices_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(indices_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::InstanceNorm &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::InstanceNorm::Input::INPUT)};
- const auto gamma_index{node.getInputs().at(ir::operation::InstanceNorm::Input::GAMMA)};
- const auto beta_index{node.getInputs().at(ir::operation::InstanceNorm::Input::BETA)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto gamma_tensor = _tensor_reg->getAclTensor(gamma_index);
- auto beta_tensor = _tensor_reg->getAclTensor(beta_index);
- auto epsilon = node.param().epsilon;
- auto activation = node.param().activation;
-
- auto fn = acl_common::generateLayer<arm_compute::NEInstanceNormalizationLayerEx>(
- ifm_tensor->handle(), ofm_tensor->handle(), gamma_tensor->handle(), beta_tensor->handle(),
- epsilon);
-
- _return_fn = std::make_unique<exec::FunctionSequence>(
- asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::L2Normalization &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::L2Normalization::Input::INPUT)};
-
- // {CL|Neon}L2Normalization performs the reduction only along dimension 0
- // L2 Normalization always performs the reduction along the depth axis
- // Thus, we repurpose {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by
- // choosing normalization parameters as below
-
- const auto &ifm_shape = _ctx.at(ifm_index).shape();
- // TODO Support optional constant dimension that normalization would be performed on
- const auto normalization_axis = _ctx.at(ifm_index).shape().rank() - 1;
- int32_t radius =
- 2 * ifm_shape.dim(normalization_axis) + 1; // normSize = depth(last dimension) * 2 + 1
- float alpha = 1.0f; // In the implementation to make alpha_ become 1
- float beta = 0.5f; // pow(reduction, -0.5) = 1 / sqrt(reduction)
- float bias = 0.0f; // Don't offset the reduction.
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP,
- radius, alpha, beta, bias, false);
-
- auto fn = acl_common::generateLayer<arm_compute::NENormalizationLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::LocalResponseNormalization &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{
- node.getInputs().at(ir::operation::LocalResponseNormalization::Input::INPUT)};
-
- auto radius = node.param().radius;
- auto alpha = node.param().alpha;
- auto beta = node.param().beta;
- auto bias = node.param().bias;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- const auto norm_info = ::arm_compute::NormalizationLayerInfo(
- ::arm_compute::NormType::CROSS_MAP, radius * 2 + 1, alpha, beta, bias, false);
-
- auto fn = acl_common::generateLayer<arm_compute::NENormalizationLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::LSTM &node)
-{
- _return_fn = acl_common::kernelGenLSTM<acl_common::AclFunction, ::arm_compute::ITensor,
- ::arm_compute::NELSTMLayer>(node, _ctx, _tensor_reg);
-}
-
-void KernelGenerator::visit(const ir::operation::Pack &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- auto axis{node.param().axis};
-
- const auto output_rank = _ctx.at(output_index).shape().rank();
-
- std::vector<ir::OperandIndex> input_indexes;
- for (const auto &input_index : node.getInputs())
- input_indexes.emplace_back(input_index);
-
- auto output = _tensor_reg->getAclTensor(output_index)->handle();
- std::vector<arm_compute::ITensor *> inputs;
- for (const auto &input_index : input_indexes)
- inputs.emplace_back(_tensor_reg->getAclTensor(input_index)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(output_index)->layout();
-
- if (axis < 0)
- axis += output_rank;
- axis = acl_common::ToARMComputeAxis(output_rank, axis, frontend_layout, backend_layout).value();
-
- // Disable applied dim_correction
- for (const auto &input_index : input_indexes)
- {
- const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
- }
-
- auto fn = acl_common::generateLayer<arm_compute::NEStackLayer>(inputs, axis, output);
-
- // Revert disabling applied dim_correction
- for (const auto &input_index : input_indexes)
- {
- const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
- if (input_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(input_tensor);
- }
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Pad &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::Pad::Input::INPUT)};
- const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
- const auto output_index{node.getOutputs().at(0)};
- assert(_ctx.at(pad_index).data());
-
- auto rank = _ctx.at(input_index).shape().rank();
- auto pad_base = _ctx.at(pad_index).data()->base();
-
- auto input = _tensor_reg->getAclTensor(input_index)->handle();
- auto output = _tensor_reg->getAclTensor(output_index)->handle();
-
- ::arm_compute::PaddingList padding_list;
- padding_list.resize(rank);
- for (int32_t n = 0; n < rank; ++n)
- {
- const int32_t *from = reinterpret_cast<const int32_t *>(pad_base) + (n * 2);
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(input_index)->layout();
- const auto axis =
- acl_common::ToARMComputeAxis(rank, n, frontend_layout, backend_layout).value();
- padding_list[axis] = ::arm_compute::PaddingInfo{from[0], from[1]};
- }
-
- const auto input_type = _ctx.at(input_index).typeInfo();
- UNUSED_RELEASE(input_type);
- assert(input->info()->data_type() == acl_common::asDataType(input_type.type()));
- assert(input->info()->quantization_info() ==
- ::arm_compute::QuantizationInfo(input_type.scale(), input_type.offset()));
- const auto pixel_value =
- ::arm_compute::PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
-
- auto fn =
- acl_common::generateLayer<arm_compute::NEPadLayer>(input, output, padding_list, pixel_value);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Pool2D &node)
-{
- auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::NEPoolingLayer>(
- node, _ctx, _tensor_reg, _current_op_seq_layout,
- acl_common::convertPoolType(node.param().op_type));
-
- const auto ofm_index{node.getOutputs().at(0)};
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- const auto activation = node.param().activation;
- _return_fn = std::make_unique<exec::FunctionSequence>(
- asAclFunction(std::move(raw_fn)),
- ActivationBuilder::generate(activation, ofm_tensor->handle()));
-}
-
-void KernelGenerator::visit(const ir::operation::Permute &node)
-{
- const auto ofm_idx{node.getOutputs().at(0)};
- const auto ifm_idx{node.getInputs().at(0)};
- const auto permute_type = node.getPermuteType();
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_idx);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_idx);
- const auto rank = _ctx.at(ofm_idx).shape().rank();
- assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank());
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- arm_compute::PermutationVector pv;
- if (permute_type == ir::operation::Permute::Type::NCHW_TO_NHWC && rank == 4)
- {
- // WHCN -> CWHN
- pv = arm_compute::PermutationVector{2, 0, 1};
-
- fn = acl_common::generateLayer<arm_compute::NEPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), pv);
- }
- else if (permute_type == ir::operation::Permute::Type::NHWC_TO_NCHW && rank == 4)
- {
- // CWHN -> WHCN
- pv = arm_compute::PermutationVector{1, 2, 0};
-
- fn = acl_common::generateLayer<arm_compute::NEPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), pv);
- }
- else
- {
- fn = acl_common::generateLayer<arm_compute::NECopy>(ifm_tensor->handle(), ofm_tensor->handle());
- }
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::PReLU &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::PReLU::Input::INPUT)};
- const auto alpha_index{node.getInputs().at(ir::operation::PReLU::Input::ALPHA)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto alpha_tensor = _tensor_reg->getAclTensor(alpha_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEPReluLayer>(
- ifm_tensor->handle(), alpha_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Reduce &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto axes_index{node.getInputs().at(ir::operation::Reduce::Input::AXES)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- // Convert to ACL axes taking into account negative values and possible duplicates.
- const auto &axes = _ctx.at(axes_index);
- const auto input_rank = _ctx.at(input_index).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = input_tensor->layout();
- const auto reduce_axes =
- acl_common::asCoordinates(axes, input_rank, frontend_layout, backend_layout);
- const auto reduce_type = node.param().reduce_type;
- const auto keep_dims = node.param().keep_dims;
-
- std::unique_ptr<::arm_compute::IFunction> fn;
- if (reduce_type == ir::operation::Reduce::ReduceType::MEAN)
- {
- fn = acl_common::generateLayer<arm_compute::NEReduceMean>(input_tensor->handle(), reduce_axes,
- keep_dims, output_tensor->handle());
- }
- else if (reduce_type == ir::operation::Reduce::ReduceType::SUM)
- {
- fn = acl_common::generateLayer<arm_compute::NEReduceSum>(input_tensor->handle(), reduce_axes,
- keep_dims, output_tensor->handle());
- }
- else
- {
- fn = acl_common::generateLayer<arm_compute::NEReduceOperation>(
- input_tensor->handle(), reduce_axes, keep_dims, output_tensor->handle(),
- acl_common::convertReduceType(reduce_type));
- }
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Reshape &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- // NOTE This operation must not be changed the layout from frontend to backend
- // So, PermutationOperationPass makes layouts of frontend and backend the same.
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- assert((_ctx.at(input_index).shape().rank() < 4 && _ctx.at(output_index).shape().rank() < 4) ||
- frontend_layout == backend_layout);
- UNUSED_RELEASE(frontend_layout);
- UNUSED_RELEASE(backend_layout);
-
- auto fn = acl_common::generateLayer<arm_compute::NEReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ResizeBilinear &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEScale>(
- ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::InterpolationPolicy::BILINEAR,
- ::arm_compute::BorderMode::REPLICATE, ::arm_compute::PixelValue(0.f),
- ::arm_compute::SamplingPolicy::TOP_LEFT);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::RNN &node)
-{
- const auto output_index{node.getOutputs().at(ir::operation::RNN::Output::OUTPUT)};
- const auto hidden_state_out_index{
- node.getOutputs().at(ir::operation::RNN::Output::HIDDEN_STATE_OUT)};
-
- const auto input_index{node.getInputs().at(ir::operation::RNN::Input::INPUT)};
- const auto weights_index{node.getInputs().at(ir::operation::RNN::Input::WEIGHTS)};
- const auto recurrent_weights_index{
- node.getInputs().at(ir::operation::RNN::Input::RECURRENT_WEIGHTS)};
- const auto bias_index{node.getInputs().at(ir::operation::RNN::Input::BIAS)};
- const auto hidden_state_in_index{node.getInputs().at(ir::operation::RNN::Input::HIDDEN_STATE_IN)};
-
- const auto activation = node.param().activation;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto hidden_state_out_tensor = _tensor_reg->getAclTensor(hidden_state_out_index);
-
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- auto weights_tensor = _tensor_reg->getAclTensor(weights_index);
- auto recurrent_weights_tensor = _tensor_reg->getAclTensor(recurrent_weights_index);
- auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
- auto hidden_state_in_tensor = _tensor_reg->getAclTensor(hidden_state_in_index);
- auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation);
-
- auto copy_layer = acl_common::generateLayer<arm_compute::NECopy>(
- hidden_state_in_tensor->handle(), hidden_state_out_tensor->handle());
- _return_fn = asAclFunction(std::move(copy_layer));
-
- auto fn = acl_common::generateLayer<arm_compute::NERNNLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- weights_tensor->handle(), recurrent_weights_tensor->handle(), bias_tensor->handle(),
- hidden_state_out_tensor->handle(), output_tensor->handle(), act_info);
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Squeeze &node)
-{
- // Squeeze is identical to reshape except that it has an optional dimensions input.
- // In addition, optional dims_index is ignored since output tensor already has squeezed shape
- // by freezer and toco
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
- const auto dims{node.param().dims};
- const auto ndim{node.param().ndim};
- (void)dims;
- (void)ndim;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- auto fn = acl_common::generateLayer<arm_compute::NEReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Softmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Softmax::Input::INPUT)};
- const auto beta = node.param().beta;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- // Disable applied dim_correction
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::NESoftmaxLayer>(
- _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
- output_tensor->handle(), beta);
-
- // Revert disabling applied dim_correction
- if (input_tensor->dimension(0) == 1)
- {
- acl_common::disableDimCorrection(input_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto block_size_tensor = _tensor_reg->getAclTensor(block_size_index);
- auto paddings_tensor = _tensor_reg->getAclTensor(paddings_index);
-
- assert(_ctx.at(block_size_index).data());
- assert(_ctx.at(paddings_index).data());
-
- auto fn = acl_common::generateLayer<arm_compute::NESpaceToBatchLayer>(
- ifm_tensor->handle(), block_size_tensor->handle(), paddings_tensor->handle(),
- ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToDepth &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToDepth::Input::INPUT)};
-
- auto block_size = node.param().block_size;
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NESpaceToDepthLayer>(
- ifm_tensor->handle(), ofm_tensor->handle(), block_size);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Split &node)
-{
- // TODO Support this op by SubTensor
- const auto ifm_index{node.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Split::Input::AXIS)};
-
- assert(node.param().num_splits == static_cast<int>(node.getOutputs().size()));
- if (!_ctx.at(axis_index).isConstant())
- {
- throw std::runtime_error("Non-constant axis_index NYI for acl_neon backend");
- }
-
- const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- std::vector<ir::OperandIndex> output_indexes;
- for (const auto &output : node.getOutputs())
- output_indexes.emplace_back(output);
-
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- std::vector<arm_compute::ITensor *> output_tensors;
- for (const auto &ofm_ind : output_indexes)
- output_tensors.emplace_back(_tensor_reg->getAclTensor(ofm_ind)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = ifm_tensor->layout();
- auto axis = _ctx.at(axis_index).asScalar<int32_t>();
- if (axis < 0)
- axis += ifm_rank;
- axis = acl_common::ToARMComputeAxis(ifm_rank, axis, frontend_layout, backend_layout).value();
-
- auto fn =
- acl_common::generateLayer<arm_compute::NESplit>(ifm_tensor->handle(), output_tensors, axis);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::SquaredDifference &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEElementwiseSquaredDiff>(
- lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Slice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Slice::Input::INPUT)};
- const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)};
- const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)};
-
- auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
- auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = inputData_tensor->layout();
-
- // Set initializers for indices data such as order of inputData
- int input_rank = _ctx.at(input_index).shape().rank();
- std::vector<int32_t> starts;
- std::vector<int32_t> ends;
- starts.resize(input_rank, 0);
- ends.resize(input_rank, 0);
- {
- auto beginData_base = _ctx.at(begins_index).data()->base();
- auto sizeData_base = _ctx.at(sizes_index).data()->base();
- const int beginData_size = _ctx.at(begins_index).shape().num_elements();
- const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();
-
- using ir::DataType;
-
- UNUSED_RELEASE(beginData_size);
- UNUSED_RELEASE(sizeData_size);
-
- assert(_ctx.at(begins_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(sizes_index).typeInfo().type() == DataType::INT32);
- assert(beginData_size == input_rank);
- assert(sizeData_size == input_rank);
-
- assert(beginData_base != nullptr);
- for (int n = 0; n < input_rank; ++n)
- {
- auto axis = ::onert::backend::acl_common::ToARMComputeAxis(input_rank, n, frontend_layout,
- backend_layout)
- .value();
-
- int32_t begin_value = *(reinterpret_cast<const int32_t *>(beginData_base) + n);
- starts[axis] = begin_value;
-
- int32_t size_value = *(reinterpret_cast<const int32_t *>(sizeData_base) + n);
- ends[axis] = begin_value + size_value;
- }
- }
-
- ::arm_compute::Coordinates starts_set;
- ::arm_compute::Coordinates ends_set;
-
- for (size_t i = 0; i < starts.size(); ++i)
- {
- starts_set.set(i, starts[i]);
- ends_set.set(i, ends[i]);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::NESlice>(
- inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::StridedSlice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
- const auto starts_index{node.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
- const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
- const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
-
- auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
- auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = inputData_tensor->layout();
-
- // Set initializers for indices data such as order of inputData
- int input_rank = _ctx.at(input_index).shape().rank();
- std::vector<int32_t> starts;
- std::vector<int32_t> ends;
- std::vector<int32_t> strides;
- starts.resize(input_rank, 0);
- ends.resize(input_rank, 0);
- strides.resize(input_rank, 0);
- {
- auto startData_base = _ctx.at(starts_index).data()->base();
- auto endData_base = _ctx.at(ends_index).data()->base();
- auto stridesData_base = _ctx.at(strides_index).data()->base();
- const int startData_size = _ctx.at(starts_index).shape().num_elements();
- const int endData_size = _ctx.at(ends_index).shape().num_elements();
- const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
-
- using ir::DataType;
-
- UNUSED_RELEASE(startData_size);
- UNUSED_RELEASE(endData_size);
- UNUSED_RELEASE(stridesData_size);
-
- assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
- assert(startData_size == input_rank);
- assert(endData_size == input_rank);
- assert(stridesData_size == input_rank);
-
- assert(startData_base != nullptr);
- for (int n = 0; n < input_rank; ++n)
- {
- auto axis = ::onert::backend::acl_common::ToARMComputeAxis(input_rank, n, frontend_layout,
- backend_layout)
- .value();
-
- int32_t start_value = *(reinterpret_cast<const int32_t *>(startData_base) + n);
- starts[axis] = start_value;
-
- int32_t end_value = *(reinterpret_cast<const int32_t *>(endData_base) + n);
- ends[axis] = end_value;
-
- int32_t strides_value = *(reinterpret_cast<const int32_t *>(stridesData_base) + n);
- strides[axis] = strides_value;
- }
- }
-
- // Set mask bits such as order of inputData
- // FIXME Take the layouts into account.
- const auto begin_mask = acl_common::ReorderBits<int32_t>(node.param().begin_mask, input_rank);
- const auto end_mask = acl_common::ReorderBits<int32_t>(node.param().end_mask, input_rank);
- const auto shrink_axis_mask =
- acl_common::ReorderBits<int32_t>(node.param().shrink_axis_mask, input_rank);
-
- ::arm_compute::Coordinates starts_set;
- ::arm_compute::Coordinates ends_set;
- ::arm_compute::BiStrides strides_set;
-
- for (size_t i = 0; i < starts.size(); ++i)
- {
- starts_set.set(i, starts[i]);
- ends_set.set(i, ends[i]);
- strides_set.set(i, strides[i]);
- }
-
- // Disable applied dim_correction
- if (inputData_tensor->num_dimensions() != inputData_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(inputData_tensor);
- }
-
- auto fn = acl_common::generateLayer<arm_compute::NEStridedSlice>(
- inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set, strides_set,
- begin_mask, end_mask, shrink_axis_mask);
-
- // Revert disabling applied dim_correction
- if (inputData_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(inputData_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::TransposeConv &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ker_index{node.getInputs().at(ir::operation::TransposeConv::Input::KERNEL)};
- const auto ifm_index{node.getInputs().at(ir::operation::TransposeConv::Input::INPUT)};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ker_shape = _ctx.at(ker_index).shape().asFeature(_current_op_seq_layout);
-
- const auto stride = node.param().stride;
-
- assert((node.param().padding.type == ir::PaddingType::SAME) ||
- (node.param().padding.type == ir::PaddingType::VALID));
- auto padding = ir::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
- ker_shape.W, ker_shape.H);
-
- uint32_t invalid_horizontal = 0;
- uint32_t invalid_vertical = 0;
- if (node.param().padding.type == ir::PaddingType::VALID)
- {
- invalid_horizontal =
- ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
- invalid_vertical = ofm_shape.H - (1 + (ifm_shape.H - 1) * stride.vertical) - (ker_shape.H - 1);
- }
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
-
- const auto tconv_info = acl_common::asPadStrideInfo(padding, stride);
-
- auto fn = acl_common::generateLayer<arm_compute::NETransposeConvLayer>(
- ifm_tensor->handle(), ker_tensor->handle(), nullptr, ofm_tensor->handle(), tconv_info,
- invalid_horizontal, invalid_vertical);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Transpose &node)
-{
- const auto ofm_idx{node.getOutputs().at(0)};
- const auto ifm_idx{node.getInputs().at(ir::operation::Transpose::Input::INPUT)};
- const auto perm_idx{node.getInputs().at(ir::operation::Transpose::Input::PERMUTATION)};
-
- auto ofm_tensor = _tensor_reg->getAclTensor(ofm_idx);
- const auto ifm_tensor = _tensor_reg->getAclTensor(ifm_idx);
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = ifm_tensor->layout();
- const auto rank = _ctx.at(ifm_idx).shape().rank();
-
- const auto &perms = _ctx.at(perm_idx);
- std::vector<int32_t> pv;
- if (perms.shape() == ir::Shape{0})
- {
- pv.resize(rank);
- std::iota(pv.begin(), pv.end(), 0);
- std::reverse(pv.begin(), pv.end());
- }
- else
- {
- pv = _ctx.at(perm_idx).asVector<int32_t>();
- }
-
- std::unique_ptr<arm_compute::IFunction> fn;
- if (rank == 1)
- {
- fn = acl_common::generateLayer<arm_compute::NECopy>(ifm_tensor->handle(), ofm_tensor->handle());
- }
- else if (rank == 2)
- {
- assert(pv.size() == 2 && pv.at(0) == 1 && pv.at(1) == 0);
- fn = acl_common::generateLayer<arm_compute::NETranspose>(ifm_tensor->handle(),
- ofm_tensor->handle());
- }
- else
- {
- auto backend_pv =
- acl_common::getARMComputePermutationVector(rank, pv, frontend_layout, backend_layout);
-
- fn = acl_common::generateLayer<arm_compute::NEPermute>(ifm_tensor->handle(),
- ofm_tensor->handle(), backend_pv);
- }
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Unpack &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::Unpack::Input::INPUT)};
- auto axis{node.param().axis};
-
- const auto input_rank = _ctx.at(input_index).shape().rank();
-
- std::vector<ir::OperandIndex> output_indexes;
- for (const auto &output_index : node.getOutputs())
- output_indexes.emplace_back(output_index);
-
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
- std::vector<arm_compute::ITensor *> outputs;
- for (const auto &output_index : output_indexes)
- outputs.emplace_back(_tensor_reg->getAclTensor(output_index)->handle());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = _tensor_reg->getAclTensor(input_index)->layout();
- if (axis < 0)
- axis += input_rank;
- axis = acl_common::ToARMComputeAxis(input_rank, axis, frontend_layout, backend_layout).value();
-
- // Disable applied dim_correction
- if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
- {
- // This means that high dimension's value is 1 and input tensor is applied dim_correction
- acl_common::disableDimCorrection(input_tensor);
- }
-
- auto fn =
- acl_common::generateLayer<arm_compute::NEUnstack>(input_tensor->handle(), outputs, axis);
-
- // Revert disabling applied dim_correction
- if (input_tensor->dimension(0) == 1)
- {
- acl_common::enableDimCorrection(input_tensor);
- }
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::ExpandDims &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input_tensor = _tensor_reg->getAclTensor(input_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEReshapeLayer>(input_tensor->handle(),
- output_tensor->handle());
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Comparison &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input0_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT0)};
- const auto input1_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT1)};
-
- const auto comparison_type = node.param().comparison_type;
-
- auto output_tensor = _tensor_reg->getAclTensor(output_index);
- auto input0_tensor = _tensor_reg->getAclTensor(input0_index);
- auto input1_tensor = _tensor_reg->getAclTensor(input1_index);
-
- auto fn = acl_common::generateLayer<arm_compute::NEElementwiseComparison>(
- input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle(),
- (arm_compute::ComparisonOperation)comparison_type);
-
- _return_fn = asAclFunction(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::OneHot &node)
-{
- const auto out_idx{node.getOutputs().at(0)};
- const auto indices_idx{node.getInputs().at(ir::operation::OneHot::Input::INDICES)};
- const auto depth_idx{node.getInputs().at(ir::operation::OneHot::Input::DEPTH)};
- const auto onvalue_idx{node.getInputs().at(ir::operation::OneHot::Input::ON_VALUE)};
- const auto offvalue_idx{node.getInputs().at(ir::operation::OneHot::Input::OFF_VALUE)};
-
- auto output_tensor = _tensor_reg->getAclTensor(out_idx);
- auto indices_tensor = _tensor_reg->getAclTensor(indices_idx);
- auto depth_tensor = _tensor_reg->getAclTensor(depth_idx);
- auto onvalue_tensor = _tensor_reg->getAclTensor(onvalue_idx);
- auto offvalue_tensor = _tensor_reg->getAclTensor(offvalue_idx);
-
- const size_t output_rank = _ctx.at(out_idx).shape().rank();
- const auto frontend_layout = _current_op_seq_layout;
- const auto backend_layout = output_tensor->layout();
- int32_t axis = node.param().axis == -1 ? output_rank - 1 : node.param().axis;
- axis = acl_common::ToARMComputeAxis(output_rank, axis, frontend_layout, backend_layout).value();
-
- auto fn = acl_common::generateLayer<arm_compute::NEOneHot>(
- indices_tensor->handle(), depth_tensor->handle(), onvalue_tensor->handle(),
- offvalue_tensor->handle(), output_tensor->handle(), axis);
- _return_fn = asAclFunction(std::move(fn));
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/KernelGenerator.h b/runtime/onert/backend/acl_neon/KernelGenerator.h
deleted file mode 100644
index 4d269cde5..000000000
--- a/runtime/onert/backend/acl_neon/KernelGenerator.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_KERNEL_GENERATOR_H__
-#define __ONERT_BACKEND_ACL_NEON_KERNEL_GENERATOR_H__
-
-#include <backend/IKernelGenerator.h>
-
-#include "ir/Operands.h"
-#include "TensorBuilder.h"
-#include "AclTensorRegistry.h"
-#include "TensorManager.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class KernelGenerator : public IKernelGenerator
-{
-public:
- KernelGenerator(const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> &_tensor_reg);
-
- void visit(const ir::OpSequence &) override;
- void visit(const ir::operation::ArgMax &) override;
- void visit(const ir::operation::BatchToSpaceND &) override;
- void visit(const ir::operation::BinaryArithmetic &) override;
- void visit(const ir::operation::Conv2D &) override;
- void visit(const ir::operation::DepthToSpace &) override;
- void visit(const ir::operation::DepthwiseConv2D &) override;
- void visit(const ir::operation::Concat &) override;
- void visit(const ir::operation::ElementwiseActivation &) override;
- void visit(const ir::operation::ElementwiseBinary &) override;
- void visit(const ir::operation::ElementwiseUnary &) override;
- void visit(const ir::operation::EmbeddingLookup &) override;
- void visit(const ir::operation::FullyConnected &) override;
- void visit(const ir::operation::Gather &) override;
- void visit(const ir::operation::HashtableLookup &) override;
- void visit(const ir::operation::InstanceNorm &) override;
- void visit(const ir::operation::L2Normalization &) override;
- void visit(const ir::operation::LocalResponseNormalization &) override;
- void visit(const ir::operation::LSTM &) override;
- void visit(const ir::operation::Pack &) override;
- void visit(const ir::operation::Pad &) override;
- void visit(const ir::operation::Pool2D &) override;
- void visit(const ir::operation::Permute &) override;
- void visit(const ir::operation::PReLU &) override;
- void visit(const ir::operation::Reduce &) override;
- void visit(const ir::operation::Reshape &) override;
- void visit(const ir::operation::ResizeBilinear &) override;
- void visit(const ir::operation::RNN &) override;
- void visit(const ir::operation::Squeeze &) override;
- void visit(const ir::operation::Softmax &) override;
- void visit(const ir::operation::SpaceToBatchND &) override;
- void visit(const ir::operation::SpaceToDepth &) override;
- void visit(const ir::operation::Split &) override;
- void visit(const ir::operation::SquaredDifference &) override;
- void visit(const ir::operation::Slice &) override;
- void visit(const ir::operation::StridedSlice &) override;
- void visit(const ir::operation::TransposeConv &) override;
- void visit(const ir::operation::Transpose &) override;
- void visit(const ir::operation::Unpack &) override;
- void visit(const ir::operation::ExpandDims &) override;
- void visit(const ir::operation::Comparison &) override;
- void visit(const ir::operation::OneHot &) override;
-
-private:
- const ir::Operands &_ctx;
- const ir::Operations &_operations_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
- std::shared_ptr<acl_common::AclTensorRegistry<TensorManager>> _tensor_reg;
- ir::Layout _current_op_seq_layout;
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_KERNEL_GENERATOR_H__
diff --git a/runtime/onert/backend/acl_neon/Optimizer.cc b/runtime/onert/backend/acl_neon/Optimizer.cc
deleted file mode 100644
index ac80901cc..000000000
--- a/runtime/onert/backend/acl_neon/Optimizer.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Optimizer.h"
-
-#include "ParentInfo.h"
-
-#include <cassert>
-#include <compiler/LoweredGraph.h>
-#include <util/logging.h>
-#include "AclSubTensorAnalyzer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-Optimizer::Optimizer(BackendContext *context)
- : _context{context},
- _tensor_builder{std::dynamic_pointer_cast<TensorBuilder>(context->tensor_builder)}
-{
- assert(context);
-}
-
-void Optimizer::optimize()
-{
- // Concat elimination (build subtensor info)
- {
- acl_common::AclSubTensorAnalyzer sa{*_context->graph()};
- for (auto op_info : _context->operation_list())
- {
- auto &op = _context->graph()->operations().at(op_info.index);
- sa.setLayout(op_info.layout);
- op.accept(sa);
- }
-
- _tensor_builder->parent_map(sa.releaseParentMap());
- }
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/Optimizer.h b/runtime/onert/backend/acl_neon/Optimizer.h
deleted file mode 100644
index 5fe0d519c..000000000
--- a/runtime/onert/backend/acl_neon/Optimizer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_OPTIMIZER_H__
-#define __ONERT_BACKEND_ACL_NEON_OPTIMIZER_H__
-
-#include <backend/IOptimizer.h>
-#include <backend/BackendContext.h>
-#include "TensorBuilder.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class Optimizer : public IOptimizer
-{
-public:
- Optimizer(BackendContext *context);
-
- void optimize() override;
-
-private:
- BackendContext *_context;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_OPTIMIZER_H__
diff --git a/runtime/onert/backend/acl_neon/TensorBuilder.h b/runtime/onert/backend/acl_neon/TensorBuilder.h
deleted file mode 100644
index 070dc20ac..000000000
--- a/runtime/onert/backend/acl_neon/TensorBuilder.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_TENSOR_BUILDER_H__
-#define __ONERT_BACKEND_ACL_NEON_TENSOR_BUILDER_H__
-
-#include <AclTensorBuilder.h>
-
-#include "operand/NETensor.h"
-#include "operand/NESubTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-using TensorBuilder =
- acl_common::AclTensorBuilder<operand::INETensor, operand::NETensor, operand::NESubTensor>;
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_TENSOR_BUILDER_H__
diff --git a/runtime/onert/backend/acl_neon/TensorManager.h b/runtime/onert/backend/acl_neon/TensorManager.h
deleted file mode 100644
index 3b7cfbcfd..000000000
--- a/runtime/onert/backend/acl_neon/TensorManager.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
-#define __ONERT_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
-
-#include <arm_compute/runtime/Allocator.h>
-#include <arm_compute/runtime/PoolManager.h>
-#include <arm_compute/runtime/OffsetLifetimeManager.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-#include <arm_compute/runtime/MemoryGroup.h>
-
-#include <AclMemoryManager.h>
-#include <AclLinearMemoryManager.h>
-#include <AclInternalBufferManager.h>
-#include <AclTensorManager.h>
-
-#include "operand/NETensor.h"
-#include "operand/NESubTensor.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-using MemoryManager =
- acl_common::AclMemoryManager<operand::INETensor, operand::NETensor, operand::NESubTensor>;
-
-using LinearMemoryManager = acl_common::AclLinearMemoryManager<
- operand::INETensor, operand::NETensor, operand::NESubTensor,
- ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
- ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator, ::arm_compute::MemoryGroup>;
-
-using InternalBufferManager = acl_common::AclInternalBufferManager<
- ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
- ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator>;
-
-using TensorManager = acl_common::AclTensorManager<acl_neon::operand::INETensor, operand::NETensor,
- operand::NESubTensor>;
-
-inline TensorManager *createTensorManager(bool is_linear_executor)
-{
- if (is_linear_executor)
- {
- VERBOSE(acl_neon_createTensorManager) << "AclTensorManager as Linear" << std::endl;
- return new TensorManager(new MemoryManager(), new LinearMemoryManager(),
- new InternalBufferManager());
- }
- else
- {
- VERBOSE(acl_neon_createTensorManager) << "AclTensorManager" << std::endl;
- return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
- }
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_TENSOR_MANAGER_H__
diff --git a/runtime/onert/backend/acl_neon/acl_neon.cc b/runtime/onert/backend/acl_neon/acl_neon.cc
deleted file mode 100644
index f490d132d..000000000
--- a/runtime/onert/backend/acl_neon/acl_neon.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <util/logging.h>
-
-#include "Backend.h"
-
-extern "C" {
-onert::backend::Backend *onert_backend_create()
-{
- VERBOSE(onert_backend_create) << "'acl_neon' loaded\n";
- return new onert::backend::acl_neon::Backend;
-}
-
-void onert_backend_destroy(onert::backend::Backend *backend)
-{
- VERBOSE(onert_backend_create) << "'acl_neon' unloaded\n";
- delete backend;
-}
-}
diff --git a/runtime/onert/backend/acl_neon/operand/INETensor.cc b/runtime/onert/backend/acl_neon/operand/INETensor.cc
deleted file mode 100644
index fddcff0f4..000000000
--- a/runtime/onert/backend/acl_neon/operand/INETensor.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "INETensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-void INETensor::access(const std::function<void(ITensor &tensor)> &fn)
-{
- // This is an optional input
- if (total_size() == 0)
- return;
-
- fn(*this);
-}
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/operand/INETensor.h b/runtime/onert/backend/acl_neon/operand/INETensor.h
deleted file mode 100644
index db0ce6fdc..000000000
--- a/runtime/onert/backend/acl_neon/operand/INETensor.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_OPERAND_I_NE_TENSOR_H__
-#define __ONERT_BACKEND_ACL_NEON_OPERAND_I_NE_TENSOR_H__
-
-#include <arm_compute/core/ITensor.h>
-
-#include <IACLTensor.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-class INETensor : public acl_common::IACLTensor
-{
-public:
- const arm_compute::ITensor *handle() const override = 0;
- arm_compute::ITensor *handle() override = 0;
- void access(const std::function<void(ITensor &tensor)> &fn) final;
-};
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_OPERAND_I_NE_TENSOR_H__
diff --git a/runtime/onert/backend/acl_neon/operand/NESubTensor.cc b/runtime/onert/backend/acl_neon/operand/NESubTensor.cc
deleted file mode 100644
index 457addd55..000000000
--- a/runtime/onert/backend/acl_neon/operand/NESubTensor.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "NESubTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-NESubTensor::NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
- : _ne_sub_tensor(std::make_shared<arm_compute::SubTensor>(parent->handle(), tensor_shape,
- coords, extend_parent)),
- _rank{rank}
-{
- // DO NOTHING
-}
-
-const arm_compute::SubTensor *NESubTensor::handle() const { return _ne_sub_tensor.get(); }
-
-arm_compute::SubTensor *NESubTensor::handle() { return _ne_sub_tensor.get(); }
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/operand/NESubTensor.h b/runtime/onert/backend/acl_neon/operand/NESubTensor.h
deleted file mode 100644
index e7f77d7fc..000000000
--- a/runtime/onert/backend/acl_neon/operand/NESubTensor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_OPERAND_NE_SUB_TENSOR_H__
-#define __ONERT_BACKEND_ACL_NEON_OPERAND_NE_SUB_TENSOR_H__
-
-#include <arm_compute/runtime/SubTensor.h>
-#include "INETensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-class NESubTensor : public INETensor
-{
-public:
- NESubTensor() = delete;
-
-public:
- NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
-
-public:
- size_t num_dimensions() const final { return _rank; }
-
-public:
- const arm_compute::SubTensor *handle() const override;
- arm_compute::SubTensor *handle() override;
-
-public:
- // This method is used to prevent the use of memcpy for SubTensor
- bool has_padding() const override { return true; }
-
-private:
- std::shared_ptr<arm_compute::SubTensor> _ne_sub_tensor;
- size_t _rank;
-};
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_OPERAND_NE_SUB_TENSOR_H__
diff --git a/runtime/onert/backend/acl_neon/operand/NETensor.cc b/runtime/onert/backend/acl_neon/operand/NETensor.cc
deleted file mode 100644
index 53dbb3021..000000000
--- a/runtime/onert/backend/acl_neon/operand/NETensor.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <arm_compute/runtime/Memory.h>
-#include <arm_compute/runtime/MemoryRegion.h>
-#include "NETensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-NETensor::NETensor(const arm_compute::TensorInfo &info, size_t rank, size_t num_uses)
- : _ne_tensor(std::make_shared<arm_compute::Tensor>()), _rank{rank}, _num_uses{num_uses}
-{
- allocator()->init(info);
-}
-
-const arm_compute::Tensor *NETensor::handle() const { return _ne_tensor.get(); }
-
-arm_compute::Tensor *NETensor::handle() { return _ne_tensor.get(); }
-
-arm_compute::TensorAllocator *NETensor::allocator() { return _ne_tensor->allocator(); }
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/acl_neon/operand/NETensor.h b/runtime/onert/backend/acl_neon/operand/NETensor.h
deleted file mode 100644
index 0dd81afec..000000000
--- a/runtime/onert/backend/acl_neon/operand/NETensor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ACL_NEON_OPERAND_NE_TENSOR_H__
-#define __ONERT_BACKEND_ACL_NEON_OPERAND_NE_TENSOR_H__
-
-#include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/runtime/Tensor.h>
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "INETensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace acl_neon
-{
-namespace operand
-{
-
-class NETensor : public INETensor
-{
-public:
- NETensor() = delete;
-
-public:
- NETensor(const arm_compute::TensorInfo &info, size_t rank, size_t num_uses);
-
-public:
- size_t num_dimensions() const final { return _rank; }
-
-public:
- const arm_compute::Tensor *handle() const override;
- arm_compute::Tensor *handle() override;
- size_t num_uses() const { return _num_uses; }
-
-public:
- arm_compute::TensorAllocator *allocator();
-
-private:
- std::shared_ptr<arm_compute::Tensor> _ne_tensor;
- size_t _rank;
- size_t _num_uses;
-};
-
-} // namespace operand
-} // namespace acl_neon
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ACL_NEON_OPERAND_NE_TENSOR_H__
diff --git a/runtime/onert/backend/cpu/Backend.h b/runtime/onert/backend/cpu/Backend.h
deleted file mode 100644
index fc8574b26..000000000
--- a/runtime/onert/backend/cpu/Backend.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_BACKEND_H__
-#define __ONERT_BACKEND_CPU_BACKEND_H__
-
-#include "BackendContext.h"
-#include "Config.h"
-#include "ConstantInitializer.h"
-#include "KernelGenerator.h"
-
-#include <backend/Backend.h>
-
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class Backend : public ::onert::backend::Backend
-{
-public:
- Backend() : _config{std::make_shared<Config>()} {}
-
- std::shared_ptr<IConfig> config() const override { return _config; }
-
- std::unique_ptr<onert::backend::BackendContext>
- newContext(const ir::Graph &graph, const std::shared_ptr<custom::IKernelBuilder> &kb,
- bool) const override
- {
- const auto &operands = graph.operands();
- const auto &operations = graph.operations();
- auto context = std::make_unique<BackendContext>(this, &graph);
- auto tr = std::make_shared<cpu_common::TensorRegistry>();
- auto tb = std::make_shared<TensorBuilder>(tr);
- context->tensor_registry = tr;
- context->tensor_builder = tb;
- context->constant_initializer = std::make_shared<ConstantInitializer>(operands, tr);
- context->kernel_gen = std::make_shared<KernelGenerator>(operands, operations, tb, tr, kb,
- context->external_context());
- context->tensor_register = nullptr;
- context->optimizer = nullptr;
- return context;
- }
-
-private:
- std::shared_ptr<IConfig> _config;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_BACKEND_H__
diff --git a/runtime/onert/backend/cpu/BackendContext.h b/runtime/onert/backend/cpu/BackendContext.h
deleted file mode 100644
index e90b21054..000000000
--- a/runtime/onert/backend/cpu/BackendContext.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
-#define __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
-
-#include <backend/BackendContext.h>
-#include "ExternalContext.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class BackendContext : public onert::backend::BackendContext
-{
-public:
- BackendContext(const Backend *backend, const ir::Graph *graph,
- std::shared_ptr<ITensorRegistry> tensor_registry = nullptr,
- std::shared_ptr<ITensorBuilder> tensor_builder = nullptr,
- std::shared_ptr<IConstantInitializer> constant_initializer = nullptr,
- std::shared_ptr<IKernelGenerator> kernel_gen = nullptr,
- std::shared_ptr<ITensorRegister> tensor_register = nullptr,
- std::shared_ptr<IOptimizer> optimizer = nullptr)
- : onert::backend::BackendContext(backend, graph, tensor_registry, tensor_builder,
- constant_initializer, kernel_gen, tensor_register,
- optimizer),
- _external_context(new ExternalContext)
- {
- }
-
- std::shared_ptr<ExternalContext> external_context() { return _external_context; }
-
-private:
- // NOTE ruy context has a thread pool, and when multiple ruy contexts are created,
- // the thread pool is also created in duplicate
- // TODO Create one ruy context for session
- std::shared_ptr<ExternalContext> _external_context;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
diff --git a/runtime/onert/backend/cpu/CMakeLists.txt b/runtime/onert/backend/cpu/CMakeLists.txt
deleted file mode 100644
index 01a3cd178..000000000
--- a/runtime/onert/backend/cpu/CMakeLists.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-set(LIB_ONERT_BACKEND_CPU onert_backend_cpu)
-
-nnfw_find_package(Ruy REQUIRED)
-
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_library(${LIB_ONERT_BACKEND_CPU} SHARED ${SOURCES})
-
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_lib_cker)
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE onert_core)
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_common)
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_coverage)
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE ruy)
-target_link_libraries(${LIB_ONERT_BACKEND_CPU} INTERFACE ruy_instrumentation)
-
-set_target_properties(${LIB_ONERT_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
-
-install(TARGETS ${LIB_ONERT_BACKEND_CPU} DESTINATION lib)
diff --git a/runtime/onert/backend/cpu/Config.cc b/runtime/onert/backend/cpu/Config.cc
deleted file mode 100644
index 3ace47f5d..000000000
--- a/runtime/onert/backend/cpu/Config.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Config.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-bool Config::initialize() { return true; }
-
-ir::Layout Config::supportLayout(const ir::Operation &, ir::Layout) { return ir::Layout::NHWC; }
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/Config.h b/runtime/onert/backend/cpu/Config.h
deleted file mode 100644
index 37e49581a..000000000
--- a/runtime/onert/backend/cpu/Config.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_CONFIG_H__
-#define __ONERT_BACKEND_CPU_CONFIG_H__
-
-#include <backend/IConfig.h>
-#include <memory>
-#include <util/ITimer.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class Config : public IConfig
-{
-public:
- std::string id() override { return "cpu"; }
- bool initialize() override;
- ir::Layout supportLayout(const ir::Operation &node, ir::Layout frontend_layout) override;
- bool supportPermutation() override { return true; }
- bool supportDynamicTensor() override { return true; }
- bool supportFP16() override { return false; }
-
- std::unique_ptr<util::ITimer> timer() override { return std::make_unique<util::CPUTimer>(); }
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_CONFIG_H__
diff --git a/runtime/onert/backend/cpu/ConstantInitializer.cc b/runtime/onert/backend/cpu/ConstantInitializer.cc
deleted file mode 100644
index 6f6eb77bc..000000000
--- a/runtime/onert/backend/cpu/ConstantInitializer.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantInitializer.h"
-#include "Tensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg)
- : IConstantInitializer{operands}, _tensor_reg{tensor_reg}
-{
- // DO NOTHING
-}
-
-void ConstantInitializer::registerDefaultInitializer(const ir::OperandIndex &index,
- const ir::Operand &obj)
-{
- registerExternalInitializer(index, obj);
-}
-
-void ConstantInitializer::registerExternalInitializer(const ir::OperandIndex &index,
- const ir::Operand &obj)
-{
- // For only CONSTANTS
- // TODO Add to check if tensor has been allocated
- if (!obj.isConstant())
- return;
-
- _init_map[index] = [](const onert::ir::Operand &model_obj, onert::backend::ITensor &itensor) {
- auto data = model_obj.shareData();
- assert(data && data->base());
- ExternalTensor &tensor = dynamic_cast<ExternalTensor &>(itensor);
- tensor.setData(data);
- };
-}
-
-void ConstantInitializer::visit(const ir::operation::Conv2D &node)
-{
- const auto &kernel_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
- const auto &kernel_obj = _operands.at(kernel_index);
- registerExternalInitializer(kernel_index, kernel_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerExternalInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::DepthwiseConv2D &node)
-{
- const auto &kernel_index = node.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
- const auto &kernel_obj = _operands.at(kernel_index);
- registerExternalInitializer(kernel_index, kernel_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::DepthwiseConv2D::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerExternalInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::FullyConnected &node)
-{
- const auto &weight_index = node.getInputs().at(ir::operation::FullyConnected::WEIGHT);
- const auto &weight_obj = _operands.at(weight_index);
- registerExternalInitializer(weight_index, weight_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::FullyConnected::BIAS);
- if (!bias_index.undefined())
- {
- const auto &bias_obj = _operands.at(bias_index);
- registerExternalInitializer(bias_index, bias_obj);
- }
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ConstantInitializer.h b/runtime/onert/backend/cpu/ConstantInitializer.h
deleted file mode 100644
index c016c83bc..000000000
--- a/runtime/onert/backend/cpu/ConstantInitializer.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_CPU_CONSTANT_INITIALIZER_H__
-#define __ONERT_COMPILER_CPU_CONSTANT_INITIALIZER_H__
-
-#include "backend/cpu_common/TensorRegistry.h"
-
-#include <backend/IConstantInitializer.h>
-#include <ir/Operands.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class ConstantInitializer : public IConstantInitializer
-{
-public:
- ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg);
-
-public:
- void registerDefaultInitializer(const ir::OperandIndex &index, const ir::Operand &obj) override;
-
- // TODO: For now the only cpu backend supports constant tensor to use data from external
- // If the other backend supports (to do this,
- // ExternalTensor should be abstract such as IExternal, maybe),
- // this can be an interface of IConstantInitializer
- void registerExternalInitializer(const ir::OperandIndex &, const ir::Operand &);
-
-public:
- void visit(const ir::operation::Conv2D &) override;
- void visit(const ir::operation::DepthwiseConv2D &) override;
- void visit(const ir::operation::FullyConnected &) override;
-
-private:
- std::shared_ptr<ITensorRegistry> tensor_registry() const override { return _tensor_reg; }
-
-private:
- std::shared_ptr<ITensorRegistry> _tensor_reg;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_COMPILER_CPU_CONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/backend/cpu/ExternalContext.h b/runtime/onert/backend/cpu/ExternalContext.h
deleted file mode 100644
index 32e249f5a..000000000
--- a/runtime/onert/backend/cpu/ExternalContext.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__
-#define __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__
-
-#include <backend/IExternalContext.h>
-#include <util/ConfigSource.h>
-#include <ruy/context.h>
-
-namespace
-{
-const int kDefaultNumThreadpoolThreads = 1;
-}
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class ExternalContext : public IExternalContext
-{
-public:
- ExternalContext() : _ruy_context(new ruy::Context)
- {
- setMaxNumThreads(onert::util::getConfigInt(onert::util::config::RUY_THREADS));
- }
-
- void setMaxNumThreads(int max_num_threads)
- {
- const int target_num_threads =
- max_num_threads > -1 ? max_num_threads : kDefaultNumThreadpoolThreads;
- _ruy_context->set_max_num_threads(target_num_threads);
- }
-
- ruy::Context *ruy_context() const { return _ruy_context.get(); }
-
-private:
- const std::unique_ptr<ruy::Context> _ruy_context;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__
diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc
deleted file mode 100644
index 5f330ff50..000000000
--- a/runtime/onert/backend/cpu/KernelGenerator.cc
+++ /dev/null
@@ -1,1289 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "KernelGenerator.h"
-
-#include "ops/ArgMinMaxLayer.h"
-#include "ops/BatchToSpaceNDLayer.h"
-#include "ops/BinaryArithmeticLayer.h"
-#include "ops/CompareLayer.h"
-#include "ops/ConcatLayer.h"
-#include "ops/ConvolutionLayer.h"
-#include "ops/DepthwiseConvolutionLayer.h"
-#include "ops/EinsumLayer.h"
-#include "ops/ElementwiseActivationLayer.h"
-#include "ops/ElementwiseBinaryLayer.h"
-#include "ops/ElementwiseUnaryLayer.h"
-#include "ops/ExpandDimsLayer.h"
-#include "ops/FillLayer.h"
-#include "ops/FullyConnectedLayer.h"
-#include "ops/GatherLayer.h"
-#include "ops/MeanLayer.h"
-#include "ops/OneHotLayer.h"
-#include "ops/OperationUtils.h"
-#include "ops/PackLayer.h"
-#include "ops/PadLayer.h"
-#include "ops/PoolLayer.h"
-#include "ops/PowLayer.h"
-#include "ops/RangeLayer.h"
-#include "ops/RankLayer.h"
-#include "ops/ReduceLayer.h"
-#include "ops/ReshapeLayer.h"
-#include "ops/ResizeBilinearLayer.h"
-#include "ops/ReverseLayer.h"
-#include "ops/SelectLayer.h"
-#include "ops/ShapeLayer.h"
-#include "ops/SliceLayer.h"
-#include "ops/SoftMaxLayer.h"
-#include "ops/StridedSliceLayer.h"
-#include "ops/SpaceToBatchNDLayer.h"
-#include "ops/SpaceToDepthLayer.h"
-#include "ops/SplitLayer.h"
-#include "ops/SplitVLayer.h"
-#include "ops/TileLayer.h"
-#include "ops/TransposeLayer.h"
-#include "ops/UnpackLayer.h"
-#include "ops/SquaredDiffLayer.h"
-#include "ops/L2NormLayer.h"
-#include "ops/MatrixBandPartLayer.h"
-#include "ops/BatchMatMulLayer.h"
-#include "ops/BroadcastToLayer.h"
-#include "ops/FusedBatchNormLayer.h"
-#include "ops/LogSoftMaxLayer.h"
-#include "ops/StatelessRandomUniformLayer.h"
-
-#include <backend/Backend.h>
-#include <backend/IConfig.h>
-#include <memory>
-#include <util/Utils.h>
-#include <util/logging.h>
-#include <exec/DynamicShapeInference.h>
-
-#include <stdexcept>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-namespace
-{
-ops::ArithmeticType
-convertArithmeticType(ir::operation::BinaryArithmetic::ArithmeticType arithmetic_type_ir)
-{
- switch (arithmetic_type_ir)
- {
- case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
- return ops::ArithmeticType::kAdd;
- case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
- return ops::ArithmeticType::kSub;
- case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
- return ops::ArithmeticType::kMul;
- case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
- return ops::ArithmeticType::kDiv;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-
-ops::ElementwiseActivationType
-convertElementwiseActivationType(ir::operation::ElementwiseActivation::Type type_ir)
-{
- switch (type_ir)
- {
- case ir::operation::ElementwiseActivation::Type::LOGISTIC:
- return ops::ElementwiseActivationType::kLogistic;
- case ir::operation::ElementwiseActivation::Type::RELU:
- return ops::ElementwiseActivationType::kReLU;
- case ir::operation::ElementwiseActivation::Type::TANH:
- return ops::ElementwiseActivationType::kTanh;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-
-ops::ElementwiseBinaryType
-convertElementwiseBinaryType(ir::operation::ElementwiseBinary::ElementwiseBinaryType type_ir)
-{
- switch (type_ir)
- {
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR:
- return ops::ElementwiseBinaryType::kLogicalOr;
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX:
- return ops::ElementwiseBinaryType::kMax;
- case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN:
- return ops::ElementwiseBinaryType::kMin;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-
-ops::ElementwiseUnaryType convertElementwiseUnaryType(ir::operation::ElementwiseUnary::Type type_ir)
-{
- switch (type_ir)
- {
- case ir::operation::ElementwiseUnary::Type::ABS:
- return ops::ElementwiseUnaryType::kAbs;
- case ir::operation::ElementwiseUnary::Type::CAST:
- return ops::ElementwiseUnaryType::kCast;
- case ir::operation::ElementwiseUnary::Type::COS:
- return ops::ElementwiseUnaryType::kCos;
- case ir::operation::ElementwiseUnary::Type::ERF:
- return ops::ElementwiseUnaryType::kErf;
- case ir::operation::ElementwiseUnary::Type::EXP:
- return ops::ElementwiseUnaryType::kExp;
- case ir::operation::ElementwiseUnary::Type::LOG:
- return ops::ElementwiseUnaryType::kLog;
- case ir::operation::ElementwiseUnary::Type::LOGICAL_NOT:
- return ops::ElementwiseUnaryType::kLogicalNot;
- case ir::operation::ElementwiseUnary::Type::NEG:
- return ops::ElementwiseUnaryType::kNeg;
- case ir::operation::ElementwiseUnary::Type::QUANTIZE:
- return ops::ElementwiseUnaryType::kQuantize;
- case ir::operation::ElementwiseUnary::Type::ROUND:
- return ops::ElementwiseUnaryType::kRound;
- case ir::operation::ElementwiseUnary::Type::RSQRT:
- return ops::ElementwiseUnaryType::kRSqrt;
- case ir::operation::ElementwiseUnary::Type::SIN:
- return ops::ElementwiseUnaryType::kSin;
- case ir::operation::ElementwiseUnary::Type::ZEROS_LIKE:
- return ops::ElementwiseUnaryType::kZerosLike;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-
-ops::PoolType convertPoolType(ir::operation::Pool2D::PoolType type_ir)
-{
- switch (type_ir)
- {
- case ir::operation::Pool2D::PoolType::AVG:
- return ops::PoolType::kAvg;
- case ir::operation::Pool2D::PoolType::MAX:
- return ops::PoolType::kMax;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-
-ops::ReduceType convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
-{
- switch (reduce_type_ir)
- {
- case ir::operation::Reduce::ReduceType::ALL:
- return ops::ReduceType::kAll;
- case ir::operation::Reduce::ReduceType::ANY:
- return ops::ReduceType::kAny;
- case ir::operation::Reduce::ReduceType::MAX:
- return ops::ReduceType::kMax;
- case ir::operation::Reduce::ReduceType::MIN:
- return ops::ReduceType::kMin;
- case ir::operation::Reduce::ReduceType::PROD:
- return ops::ReduceType::kProd;
- case ir::operation::Reduce::ReduceType::SUM:
- return ops::ReduceType::kSum;
- default:
- throw std::runtime_error("cpu KernelGenerator : Not supported operation yet");
- }
-}
-} // namespace
-
-KernelGenerator::KernelGenerator(
- const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<cpu_common::TensorRegistry> &tensor_reg,
- const std::shared_ptr<backend::custom::IKernelBuilder> &kernel_builder,
- const std::shared_ptr<ExternalContext> &external_context)
- : _ctx(operands_ctx), _operations_ctx{operations_ctx}, _tensor_builder(tensor_builder),
- _tensor_reg{tensor_reg}, _kernel_builder(kernel_builder),
- _current_op_seq_layout(ir::Layout::UNKNOWN), _external_context(external_context)
-{
- // DO NOTHING
-}
-
-void KernelGenerator::visit(const ir::OpSequence &op_seq)
-{
- assert(!_return_fn_seq);
- assert(_tensor_builder->dynamicTensorManager());
- assert(_tensor_reg);
-
- auto dyn_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_ctx, _tensor_reg);
-
- _return_fn_seq = std::make_unique<exec::FunctionSequence>();
-
- // Prepare to handle dynamic tensors later
- auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
- {
- dyn_ctx->op_seq = &op_seq;
- dyn_ctx->operations = &_operations_ctx;
- dyn_ctx->dynamic_shape_inferer = std::move(dyn_shape_inferer);
- dyn_ctx->dynamic_tensor_manager = _tensor_builder->dynamicTensorManager();
-
- _return_fn_seq->dynamic_tensor_ctx(dyn_ctx);
- }
-
- _current_op_seq_layout = op_seq.getLayout();
- for (const auto &operation_idx : op_seq.operations())
- {
- const auto &node = _operations_ctx.at(operation_idx);
- node.accept(*this);
- _return_fn_seq->append(releaseFunction());
-
- for (const auto &ind : (node.getInputs() | ir::Remove::UNDEFINED) + node.getOutputs())
- {
- auto portable_tensor = _tensor_reg->getPortableTensor(ind);
- if (portable_tensor)
- {
- assert(portable_tensor->layout() == ir::Layout::NHWC);
- }
-
- auto tensor = _tensor_reg->getNativeTensor(ind);
- if (tensor)
- {
- tensor->increase_ref();
- }
- }
- }
-}
-
-void KernelGenerator::visit(const ir::operation::Conv2D &node)
-{
- using ir::operation::Conv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
- auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
-
- const auto stride = node.param().stride;
- const auto activation = node.param().activation;
- const auto param_padding = node.param().padding;
- const auto dilation = node.param().dilation;
- auto fn = std::make_unique<ops::ConvolutionLayer>();
-
- if (_ctx.at(ifm_index).info().isDynamic() || _ctx.at(ker_index).info().isDynamic())
- {
- fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, param_padding.param.left,
- param_padding.param.right, param_padding.param.top, param_padding.param.bottom,
- stride.horizontal, stride.vertical, dilation.width_factor, dilation.height_factor,
- activation, ofm_tensor);
-
- _return_fn = std::move(fn);
- return;
- }
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- const auto padding =
- ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
- dilation.width_factor, dilation.height_factor);
-
- fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left,
- padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical,
- dilation.width_factor, dilation.height_factor, activation, ofm_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
-{
- using ir::operation::DepthwiseConv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
-
- const auto stride = node.param().stride;
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- // Kernel format is [1, kernel_height, kernel_width, depth_out].
- const auto &ker_shape = _ctx.at(ker_index).shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
- const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
- ker_width, ker_height);
- const auto multiplier = node.param().multiplier;
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
- auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
- auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
-
- auto fn = std::make_unique<ops::DepthwiseConvolutionLayer>();
-
- fn->configure(ifm_tensor, ker_tensor, bias_tensor, padding.left, padding.right, padding.top,
- padding.bottom, stride.horizontal, stride.vertical, multiplier, activation,
- ofm_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Concat &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- const auto rank = _ctx.at(ofm_index).shape().rank();
- const auto axis = ops::getAxis(rank, node.param().axis, _current_op_seq_layout);
-
- auto output_tensor = _tensor_reg->getPortableTensor(ofm_index);
-
- std::vector<const IPortableTensor *> input_tensors;
- for (auto &ifm_idx : node.getInputs())
- input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
-
- auto fn = std::make_unique<ops::ConcatLayer>();
-
- fn->configure(input_tensors, axis, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::BatchToSpaceND::INPUT)};
- const auto block_size_index{node.getInputs().at(ir::operation::BatchToSpaceND::BLOCK_SIZE)};
-
- auto output_alloc = _tensor_reg->getPortableTensor(output_index);
- auto input_alloc = _tensor_reg->getPortableTensor(input_index);
- auto block_size_alloc = _tensor_reg->getPortableTensor(block_size_index);
-
- auto fn = std::make_unique<ops::BatchToSpaceNDLayer>();
-
- IPortableTensor *crops_alloc = nullptr;
- const auto NNApiInputs = 2;
-
- if (node.getInputs().size() != NNApiInputs)
- {
- const auto crops_data_index{node.getInputs().at(ir::operation::BatchToSpaceND::CROPS_DATA)};
- crops_alloc = _tensor_reg->getPortableTensor(crops_data_index);
- }
-
- fn->configure(input_alloc, output_alloc, block_size_alloc, crops_alloc);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Fill &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Fill::Input::INPUT)};
- const auto value_index{node.getInputs().at(ir::operation::Fill::Input::VALUE)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto value_tensor = _tensor_reg->getPortableTensor(value_index);
-
- auto fn = std::make_unique<ops::FillLayer>();
-
- fn->configure(input_tensor, value_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::FullyConnected &node)
-{
- using ir::operation::FullyConnected;
-
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
- const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
- const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
- const auto activation = node.param().activation;
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto weight_tensor = _tensor_reg->getPortableTensor(weight_index);
- auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_reg->getPortableTensor(bias_index);
-
- auto fn = std::make_unique<ops::FullyConnectedLayer>();
-
- fn->configure(input_tensor, weight_tensor, bias_tensor, activation, output_tensor,
- _external_context);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Reshape &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- // optional 2nd input
- IPortableTensor *shape_tensor = nullptr;
-
- if (node.getInputs().size() == 2)
- {
- const auto shape_index{node.getInputs().at(ir::operation::Reshape::Input::SHAPE)};
- shape_tensor = _tensor_reg->getPortableTensor(shape_index);
- }
-
- auto fn = std::make_unique<ops::ReshapeLayer>();
-
- fn->configure(input_tensor, shape_tensor, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Squeeze &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- // Squeeze can share same kernel with reshape
- auto fn = std::make_unique<ops::ReshapeLayer>();
-
- fn->configure(input_tensor, nullptr, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Softmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Softmax::Input::INPUT)};
-
- const auto beta = node.param().beta;
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::SoftMaxLayer>();
-
- fn->configure(input_tensor, beta, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::BinaryArithmetic &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::BinaryArithmetic::Input::RHS)};
-
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- auto fn = std::make_unique<ops::BinaryArithmeticLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, ofm_tensor, activation,
- convertArithmeticType(node.param().arithmetic_type));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Comparison &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT0)};
- const auto rhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT1)};
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- auto comparison_type = node.param().comparison_type;
-
- auto fn = std::make_unique<ops::CompareLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, comparison_type, ofm_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Gather &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto indices_tensor = _tensor_reg->getPortableTensor(indices_index);
-
- const auto backend_layout = output_tensor->layout();
- UNUSED_RELEASE(backend_layout);
-
- // NOTE The frontend layout and backend layout must be the same for this operation.
- // If not the same, we have to add a stage(?) to perform permutation of output tensor. It
- // is not not efficient even if it works well. If so, it would be better to set the
- // layout of these backend tensors to the same layout.
- // There is also one thing we have to think about. This operation depends on the layout of
- // a model. For example, if a model in NHWC has this operation as output rank == 4, indices
- // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W
- // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case.
- assert(backend_layout == input_tensor->layout());
- assert(backend_layout == indices_tensor->layout());
- const auto &input_shape = _ctx.at(input_index).shape();
- UNUSED_RELEASE(input_shape);
- assert(input_shape.rank() < 4 || _current_op_seq_layout == backend_layout);
-
- const auto axis_raw = node.param().axis;
- const auto axis_value = (axis_raw < 0 ? (input_shape.rank() + axis_raw) : axis_raw);
-
- auto fn = std::make_unique<ops::GatherLayer>();
-
- fn->configure(input_tensor, indices_tensor, output_tensor, axis_value);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::OneHot &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto indices_index{node.getInputs().at(ir::operation::OneHot::INDICES)};
- const auto depth_index{node.getInputs().at(ir::operation::OneHot::Input::DEPTH)};
- const auto onvalue_index{node.getInputs().at(ir::operation::OneHot::Input::ON_VALUE)};
- const auto offvalue_index{node.getInputs().at(ir::operation::OneHot::Input::OFF_VALUE)};
-
- const auto axis = node.param().axis;
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto indices_tensor = _tensor_reg->getPortableTensor(indices_index);
- auto depth_tensor = _tensor_reg->getPortableTensor(depth_index);
- auto onvalue_tensor = _tensor_reg->getPortableTensor(onvalue_index);
- auto offvalue_tensor = _tensor_reg->getPortableTensor(offvalue_index);
-
- assert(indices_tensor->data_type() == OperandType::INT32);
- assert(axis <= static_cast<int>(indices_tensor->num_dimensions()));
-
- auto fn = std::make_unique<ops::OneHotLayer>();
-
- fn->configure(indices_tensor, depth_tensor, onvalue_tensor, offvalue_tensor, output_tensor, axis);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Einsum &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(ofm_index);
- std::vector<const IPortableTensor *> input_tensors;
- for (auto &ifm_idx : node.getInputs())
- input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
-
- const auto equation = node.param().equation;
-
- auto fn = std::make_unique<ops::EinsumLayer>();
-
- fn->configure(input_tensors, equation, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Custom &node)
-{
- auto fill_op_info = [&](const ir::OperandIndexSequence &opSeq,
- std::vector<custom::TypeInfo> &types,
- std::vector<IPortableTensor *> &tensors) {
- for (auto &idx : opSeq)
- {
- const auto &operand = _ctx.at(idx);
- // TODO make sure using `_current_op_seq_layout` is correct for custom operations
- types.emplace_back(custom::TypeInfo{operand.shape(), operand.typeInfo().type()});
- auto in_tensor = _tensor_reg->getPortableTensor(idx);
- tensors.emplace_back(in_tensor);
- }
- };
-
- backend::custom::CustomKernelConfigParams params{};
-
- fill_op_info(node.getInputs(), params.input_types, params.input_tensors);
- fill_op_info(node.getOutputs(), params.output_types, params.output_tensors);
-
- params.userdata = node.userdata().data;
- params.userdata_size = node.userdata().size;
-
- auto fn = _kernel_builder->buildKernel(node.id(), std::move(params));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseActivation &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseActivation::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::ElementwiseActivationLayer>();
-
- fn->configure(input_tensor, output_tensor, node.param().alpha, node.param().beta,
- convertElementwiseActivationType(node.param().op_type));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseBinary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- auto fn = std::make_unique<ops::ElementwiseBinaryLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, output_tensor,
- convertElementwiseBinaryType(node.param().op_type));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ElementwiseUnary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::ElementwiseUnaryLayer>();
-
- fn->configure(input_tensor, output_tensor, convertElementwiseUnaryType(node.param().op_type));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ExpandDims &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::ExpandDims::Input::AXIS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto axis_tensor = _tensor_reg->getPortableTensor(axis_index);
-
- auto fn = std::make_unique<ops::ExpandDimsLayer>();
-
- fn->configure(input_tensor, axis_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Pack &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- const auto rank = _ctx.at(ofm_index).shape().rank();
- const auto axis = ops::getAxis(rank, node.param().axis, _current_op_seq_layout);
-
- assert(-rank <= axis && axis < rank);
-
- auto output_tensor = _tensor_reg->getPortableTensor(ofm_index);
-
- std::vector<const IPortableTensor *> input_tensors;
- for (auto &ifm_idx : node.getInputs())
- input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
-
- auto fn = std::make_unique<ops::PackLayer>();
-
- fn->configure(input_tensors, axis, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Unpack &node)
-{
- const auto input_index{node.getInputs().at(0)};
-
- const auto rank = _ctx.at(input_index).shape().rank();
- const auto axis = ops::getAxis(rank, node.param().axis, _current_op_seq_layout);
-
- assert(rank == 0 || (-rank <= axis && axis < rank));
-
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- std::vector<IPortableTensor *> output_tensors;
- for (auto &output_idx : node.getOutputs())
- output_tensors.emplace_back(_tensor_reg->getPortableTensor(output_idx));
-
- auto fn = std::make_unique<ops::UnpackLayer>();
-
- uint32_t axis_resolved = (axis < 0 ? axis + rank : axis);
-
- fn->configure(input_tensor, axis_resolved, node.param().num, output_tensors);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Pad &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::Pad::Input::INPUT)};
- const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
- const auto output_index{node.getOutputs().at(0)};
- assert(_ctx.at(pad_index).data());
-
- auto input = _tensor_reg->getPortableTensor(input_index);
- auto output = _tensor_reg->getPortableTensor(output_index);
- auto pad_rank = _ctx.at(pad_index).shape().dim(0);
- auto pad_base = reinterpret_cast<const int32_t *>(_ctx.at(pad_index).data()->base());
-
- auto fn = std::make_unique<ops::PadLayer>();
-
- bool isPadV2 = node.getInputs().size() == 3 ? true : false;
- const void *value = nullptr;
-
- if (isPadV2)
- {
- const auto value_index{node.getInputs().at(ir::operation::Pad::Input::VALUE)};
- value = reinterpret_cast<const void *>(_ctx.at(value_index).data()->base());
- }
-
- fn->configure(input, output, pad_base, pad_rank, value);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Transpose &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Transpose::Input::INPUT)};
- const auto perm_index{node.getInputs().at(ir::operation::Transpose::Input::PERMUTATION)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto perm_tensor = _tensor_reg->getPortableTensor(perm_index);
-
- auto fn = std::make_unique<ops::TransposeLayer>();
-
- fn->configure(input_tensor, perm_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Reduce &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto axes_index{node.getInputs().at(ir::operation::Reduce::Input::AXES)};
-
- const auto keep_dims = node.param().keep_dims;
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto axes_tensor = _tensor_reg->getPortableTensor(axes_index);
-
- if (node.param().reduce_type == ir::operation::Reduce::ReduceType::MEAN)
- {
- auto fn = std::make_unique<ops::MeanLayer>();
-
- fn->configure(input_tensor, axes_tensor, output_tensor, keep_dims);
-
- _return_fn = std::move(fn);
- }
- else
- {
- auto fn = std::make_unique<ops::ReduceLayer>();
-
- const auto reduce_type = convertReduceType(node.param().reduce_type);
- fn->configure(input_tensor, axes_tensor, output_tensor, reduce_type, keep_dims);
-
- _return_fn = std::move(fn);
- }
-}
-
-void KernelGenerator::visit(const ir::operation::Select &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto condition_index{node.getInputs().at(ir::operation::Select::Input::CONDITION)};
- const auto true_index{node.getInputs().at(ir::operation::Select::Input::INPUT_TRUE)};
- const auto false_index{node.getInputs().at(ir::operation::Select::Input::INPUT_FALSE)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto condition_tensor = _tensor_reg->getPortableTensor(condition_index);
- auto true_tensor = _tensor_reg->getPortableTensor(true_index);
- auto false_tensor = _tensor_reg->getPortableTensor(false_index);
-
- auto fn = std::make_unique<ops::SelectLayer>();
-
- fn->configure(condition_tensor, true_tensor, false_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Slice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Slice::Input::INPUT)};
- const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)};
- const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto begins_tensor = _tensor_reg->getPortableTensor(begins_index);
- auto sizes_tensor = _tensor_reg->getPortableTensor(sizes_index);
-
- auto fn = std::make_unique<ops::SliceLayer>();
-
- fn->configure(input_tensor, begins_tensor, sizes_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::StridedSlice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
- const auto starts_index{node.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
- const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
- const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto starts_tensor = _tensor_reg->getPortableTensor(starts_index);
- auto ends_tensor = _tensor_reg->getPortableTensor(ends_index);
- auto strides_tensor = _tensor_reg->getPortableTensor(strides_index);
-
- auto begin_mask = node.param().begin_mask;
- auto end_mask = node.param().end_mask;
- auto shrink_axis_mask = node.param().shrink_axis_mask;
-
- auto fn = std::make_unique<ops::StridedSliceLayer>();
-
- fn->configure(input_tensor, starts_tensor, ends_tensor, strides_tensor, output_tensor, begin_mask,
- end_mask, shrink_axis_mask);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Split &node)
-{
- const auto num_splits = node.param().num_splits;
- assert(num_splits == static_cast<int>(node.getOutputs().size()));
-
- const auto input_idx{node.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto axis_idx{node.getInputs().at(ir::operation::Split::Input::AXIS)};
-
- auto in_tensor = _tensor_reg->getPortableTensor(input_idx);
- auto axis_tensor = _tensor_reg->getPortableTensor(axis_idx);
-
- std::vector<IPortableTensor *> out_tensors;
- for (auto &output_idx : node.getOutputs())
- out_tensors.emplace_back(_tensor_reg->getPortableTensor(output_idx));
-
- auto fn = std::make_unique<ops::SplitLayer>();
-
- fn->configure(in_tensor, axis_tensor, num_splits, out_tensors);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Shape &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::Shape::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
-
- auto fn = std::make_unique<ops::ShapeLayer>();
-
- fn->configure(ifm_tensor, ofm_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ResizeBilinear &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ResizeBilinear::INPUT)};
-
- auto align_corners = node.param().align_corners;
- auto half_pixel_centers = node.param().half_pixel_centers;
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::ResizeBilinearLayer>();
-
- if (node.getInputs().size() == 1)
- {
- fn->configure(input_tensor, output_tensor, node.param().height_out, node.param().width_out,
- align_corners, half_pixel_centers);
- }
- else
- {
- assert(node.getInputs().size() == 2);
- const auto size_index{node.getInputs().at(ir::operation::ResizeBilinear::SIZE)};
- auto size_tensor = _tensor_reg->getPortableTensor(size_index);
- if (size_tensor->is_constant())
- {
- auto size_vec = _ctx.at(size_index).asVector<int32_t>();
- const auto height_out = size_vec[0];
- const auto width_out = size_vec[1];
- fn->configure(input_tensor, output_tensor, height_out, width_out, align_corners,
- half_pixel_centers);
- }
- else
- {
- fn->configure(input_tensor, output_tensor, size_tensor, align_corners, half_pixel_centers);
- }
- }
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Reverse &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reverse::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Reverse::AXIS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto axis_tensor = _tensor_reg->getPortableTensor(axis_index);
-
- auto fn = std::make_unique<ops::ReverseLayer>();
-
- fn->configure(input_tensor, axis_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::ArgMax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ArgMax::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::ArgMax::AXIS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto axis_tensor = _tensor_reg->getPortableTensor(axis_index);
-
- auto fn = std::make_unique<ops::ArgMinMaxLayer>();
-
- fn->configure(input_tensor, output_tensor, axis_tensor, /* is_arg_max */ true);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Pool2D &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::Pool2D::Input::INPUT)};
-
- const auto kh = node.param().kh;
- const auto kw = node.param().kw;
- const auto stride = node.param().stride;
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout);
- const auto padding =
- ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh);
- const auto activation = node.param().activation;
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
-
- auto fn = std::make_unique<ops::PoolLayer>();
-
- fn->configure(ifm_tensor, padding.left, padding.right, padding.top, padding.bottom,
- stride.horizontal, stride.vertical, kw, kh, activation, ofm_tensor,
- convertPoolType(node.param().op_type));
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Pow &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::Pow::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::Pow::RHS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- auto fn = std::make_unique<ops::PowLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, ir::Activation::NONE, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::L2Normalization &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(0)};
-
- auto output_alloc = _tensor_reg->getPortableTensor(output_index);
- auto input_alloc = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::L2NormLayer>();
-
- fn->configure(input_alloc, output_alloc);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Range &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto start_index{node.getInputs().at(ir::operation::Range::START)};
- const auto limit_index{node.getInputs().at(ir::operation::Range::LIMIT)};
- const auto delta_index{node.getInputs().at(ir::operation::Range::DELTA)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto start_tensor = _tensor_reg->getPortableTensor(start_index);
- auto limit_tensor = _tensor_reg->getPortableTensor(limit_index);
- auto delta_tensor = _tensor_reg->getPortableTensor(delta_index);
-
- auto fn = std::make_unique<ops::RangeLayer>();
-
- fn->configure(start_tensor, limit_tensor, delta_tensor, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Rank &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(ir::operation::Shape::Input::INPUT)};
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
-
- auto fn = std::make_unique<ops::RankLayer>();
-
- fn->configure(ifm_tensor, ofm_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::SquaredDifference &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)};
-
- auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- auto fn = std::make_unique<ops::SqDiffLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, ofm_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Tile &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Tile::INPUT)};
- const auto multiples_index{node.getInputs().at(ir::operation::Tile::MULTIPLES)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto multiples_tensor = _tensor_reg->getPortableTensor(multiples_index);
-
- auto fn = std::make_unique<ops::TileLayer>();
-
- fn->configure(input_tensor, multiples_tensor, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::MatrixBandPart &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::MatrixBandPart::INPUT)};
- const auto num_lower_index{node.getInputs().at(ir::operation::MatrixBandPart::NUM_LOWER_DIAG)};
- const auto num_upper_index{node.getInputs().at(ir::operation::MatrixBandPart::NUM_UPPER_DIAG)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto num_lower_tensor = _tensor_reg->getPortableTensor(num_lower_index);
- auto num_upper_tensor = _tensor_reg->getPortableTensor(num_upper_index);
-
- auto fn = std::make_unique<ops::MatrixBandPartLayer>();
-
- fn->configure(input_tensor, num_lower_tensor, num_upper_tensor, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::BatchMatMul &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::BatchMatMul::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::BatchMatMul::RHS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
- auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
-
- const auto adj_x = node.param().adj_x;
- const auto adj_y = node.param().adj_y;
-
- auto fn = std::make_unique<ops::BatchMatMulLayer>();
-
- fn->configure(lhs_tensor, rhs_tensor, adj_x, adj_y, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::BroadcastTo &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::BroadcastTo::INPUT)};
- const auto shape_index{node.getInputs().at(ir::operation::BroadcastTo::SHAPE)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto shape_tensor = _tensor_reg->getPortableTensor(shape_index);
-
- auto fn = std::make_unique<ops::BroadcastToLayer>();
-
- fn->configure(input_tensor, shape_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::FusedBatchNorm &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(ofm_index);
- std::vector<const IPortableTensor *> input_tensors;
- for (auto &ifm_idx : node.getInputs())
- input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
-
- const auto epsilon = node.param().epsilon;
- const auto is_training = node.param().is_training;
- const auto data_format = node.param().data_format;
-
- auto fn = std::make_unique<ops::FusedBatchNormLayer>();
-
- fn->configure(input_tensors, epsilon, is_training, data_format, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::LogSoftmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::LogSoftmax::Input::INPUT)};
-
- const auto beta = node.param().beta;
- const auto axis = node.param().axis;
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
-
- auto fn = std::make_unique<ops::LogSoftMaxLayer>();
-
- fn->configure(input_tensor, beta, axis, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::SpaceToBatchND::INPUT)};
- const auto block_shape_index{node.getInputs().at(ir::operation::SpaceToBatchND::BLOCK_SIZE)};
- const auto padding_index{node.getInputs().at(ir::operation::SpaceToBatchND::PADDINGS)};
-
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto block_shape_tensor = _tensor_reg->getPortableTensor(block_shape_index);
- auto padding_tensor = _tensor_reg->getPortableTensor(padding_index);
-
- auto fn = std::make_unique<ops::SpaceToBatchNDLayer>();
-
- fn->configure(input_tensor, block_shape_tensor, padding_tensor, output_tensor);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::SpaceToDepth &node)
-{
- const auto input_index{node.getInputs().at(ir::operation::SpaceToDepth::Input::INPUT)};
- const auto output_index{node.getOutputs().at(0)};
- auto block_size = node.param().block_size;
-
- auto input_tensor = _tensor_reg->getPortableTensor(input_index);
- auto output_tensor = _tensor_reg->getPortableTensor(output_index);
-
- auto fn = std::make_unique<ops::SpaceToDepthLayer>();
-
- fn->configure(input_tensor, block_size, output_tensor);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::StatelessRandomUniform &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto shape_index{node.getInputs().at(ir::operation::StatelessRandomUniform::SHAPE)};
- const auto seed_index{node.getInputs().at(ir::operation::StatelessRandomUniform::SEED)};
-
- auto output_alloc = _tensor_reg->getPortableTensor(output_index);
- auto shape_alloc = _tensor_reg->getPortableTensor(shape_index);
- auto seed_alloc = _tensor_reg->getPortableTensor(seed_index);
-
- auto fn = std::make_unique<ops::StatelessRandomUniformLayer>();
-
- fn->configure(shape_alloc, seed_alloc, output_alloc);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::SplitV &node)
-{
- const auto num_splits = node.param().num_splits;
- assert(num_splits == static_cast<int>(node.getOutputs().size()));
-
- const auto input_idx{node.getInputs().at(ir::operation::SplitV::Input::INPUT)};
- const auto size_splits{node.getInputs().at(ir::operation::SplitV::Input::SIZE_SPLITS)};
- const auto split_dim{node.getInputs().at(ir::operation::SplitV::Input::SPLIT_DIM)};
-
- auto in_tensor = _tensor_reg->getPortableTensor(input_idx);
- auto in_size_splits = _tensor_reg->getPortableTensor(size_splits);
- auto in_split_dim = _tensor_reg->getPortableTensor(split_dim);
-
- std::vector<IPortableTensor *> out_tensors;
- for (auto &output_idx : node.getOutputs())
- out_tensors.emplace_back(_tensor_reg->getPortableTensor(output_idx));
-
- auto fn = std::make_unique<ops::SplitVLayer>();
-
- fn->configure(in_tensor, in_size_splits, in_split_dim, num_splits, out_tensors);
-
- _return_fn = std::move(fn);
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/KernelGenerator.h b/runtime/onert/backend/cpu/KernelGenerator.h
deleted file mode 100644
index 786e68ee0..000000000
--- a/runtime/onert/backend/cpu/KernelGenerator.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_KERNEL_GENERATOR_H__
-#define __ONERT_BACKEND_CPU_KERNEL_GENERATOR_H__
-
-#include "ExternalContext.h"
-#include "TensorBuilder.h"
-#include "backend/cpu_common/TensorRegistry.h"
-#include "Tensor.h"
-
-#include <backend/CustomKernelBuilder.h>
-#include <backend/IKernelGenerator.h>
-#include <ir/Operands.h>
-#include <ir/Operations.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class KernelGenerator : public IKernelGenerator
-{
-public:
- KernelGenerator(const ir::Operands &operands_ctx, const ir::Operations &operations_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<cpu_common::TensorRegistry> &tensor_reg,
- const std::shared_ptr<custom::IKernelBuilder> &kernel_builder,
- const std::shared_ptr<ExternalContext> &external_context);
-
- using IKernelGenerator::visit;
-
- void visit(const ir::OpSequence &) override;
- void visit(const ir::operation::Conv2D &) override;
- void visit(const ir::operation::DepthwiseConv2D &) override;
- void visit(const ir::operation::Concat &) override;
- void visit(const ir::operation::Fill &) override;
- void visit(const ir::operation::FullyConnected &) override;
- void visit(const ir::operation::Reshape &) override;
- void visit(const ir::operation::Squeeze &) override;
- void visit(const ir::operation::Softmax &) override;
- void visit(const ir::operation::Comparison &) override;
- void visit(const ir::operation::BinaryArithmetic &) override;
- void visit(const ir::operation::Einsum &) override;
- void visit(const ir::operation::Gather &) override;
- void visit(const ir::operation::Custom &node) override;
- void visit(const ir::operation::ElementwiseActivation &) override;
- void visit(const ir::operation::ElementwiseBinary &) override;
- void visit(const ir::operation::ElementwiseUnary &) override;
- void visit(const ir::operation::ExpandDims &) override;
- void visit(const ir::operation::Pad &) override;
- void visit(const ir::operation::Pack &) override;
- void visit(const ir::operation::Unpack &) override;
- void visit(const ir::operation::OneHot &) override;
- void visit(const ir::operation::Transpose &) override;
- void visit(const ir::operation::Reduce &) override;
- void visit(const ir::operation::Select &) override;
- void visit(const ir::operation::Slice &) override;
- void visit(const ir::operation::StridedSlice &) override;
- void visit(const ir::operation::Split &) override;
- void visit(const ir::operation::Shape &) override;
- void visit(const ir::operation::ResizeBilinear &node) override;
- void visit(const ir::operation::Reverse &) override;
- void visit(const ir::operation::ArgMax &) override;
- void visit(const ir::operation::Pool2D &) override;
- void visit(const ir::operation::Pow &) override;
- void visit(const ir::operation::SquaredDifference &) override;
- void visit(const ir::operation::Tile &) override;
- void visit(const ir::operation::L2Normalization &) override;
- void visit(const ir::operation::Range &) override;
- void visit(const ir::operation::Rank &) override;
- void visit(const ir::operation::MatrixBandPart &) override;
- void visit(const ir::operation::BatchMatMul &) override;
- void visit(const ir::operation::BatchToSpaceND &) override;
- void visit(const ir::operation::BroadcastTo &) override;
- void visit(const ir::operation::FusedBatchNorm &) override;
- void visit(const ir::operation::LogSoftmax &) override;
- void visit(const ir::operation::SpaceToBatchND &) override;
- void visit(const ir::operation::SpaceToDepth &) override;
- void visit(const ir::operation::StatelessRandomUniform &) override;
- void visit(const ir::operation::SplitV &) override;
-
-private:
- const ir::Operands &_ctx;
- const ir::Operations &_operations_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
- std::shared_ptr<cpu_common::TensorRegistry> _tensor_reg;
- std::shared_ptr<backend::custom::IKernelBuilder> _kernel_builder;
- ir::Layout _current_op_seq_layout;
- const std::shared_ptr<ExternalContext> _external_context;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_KERNEL_GENERATOR_H__
diff --git a/runtime/onert/backend/cpu/StaticTensorManager.cc b/runtime/onert/backend/cpu/StaticTensorManager.cc
deleted file mode 100644
index 3edac897c..000000000
--- a/runtime/onert/backend/cpu/StaticTensorManager.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StaticTensorManager.h"
-#include "Tensor.h"
-
-#include <util/logging.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-StaticTensorManager::StaticTensorManager(const std::shared_ptr<cpu_common::TensorRegistry> &reg,
- cpu_common::DynamicTensorManager *dynamic_tensor_manager)
- : _nonconst_mgr{new cpu_common::MemoryManager()}, _tensors{reg},
- _dynamic_tensor_manager{dynamic_tensor_manager}
-{
- // DO NOTHING
-}
-
-void StaticTensorManager::allocateNonconsts(void)
-{
- _nonconst_mgr->allocate();
-
- for (auto &pair : _tensors->native_tensors())
- {
- const auto &ind = pair.first;
- auto tensor = pair.second.get();
- if (!_as_constants[ind] && !tensor->is_dynamic())
- {
- auto *buffer = _nonconst_mgr->getBuffer(ind);
- tensor->setBuffer(buffer);
-
- VERBOSE(CPU_StaticTensorManager) << "TENSOR(#" << ind.value()
- << "): " << static_cast<void *>(buffer) << std::endl;
- }
- }
-}
-
-void StaticTensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
-
-void StaticTensorManager::buildTensor(const ir::OperandIndex &ind,
- const ir::OperandInfo &tensor_info, ir::Layout backend_layout,
- bool as_const)
-{
- assert(!_tensors->getITensor(ind));
- if (as_const)
- {
- auto tensor = std::make_unique<ExternalTensor>(tensor_info, backend_layout);
- _tensors->setNativeTensor(ind, std::move(tensor));
- }
- else
- {
- auto tensor = std::make_unique<Tensor>(tensor_info, backend_layout,
- _dynamic_tensor_manager->dynamic_mem_mgr().get());
- _tensors->setNativeTensor(ind, std::move(tensor));
- }
- _as_constants[ind] = as_const;
-}
-
-void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
-{
- assert(_tensors->getITensor(ind));
-
- // This method is called only when a tensor has proper shape
- assert(!_tensors->getITensor(ind)->is_dynamic());
-
- if (!_as_constants[ind])
- _nonconst_mgr->claimPlan(ind, size);
-}
-
-void StaticTensorManager::releasePlan(const ir::OperandIndex &ind)
-{
- assert(_tensors->getITensor(ind));
-
- // This method is called only when a tensor has proper shape
- assert(!_tensors->getITensor(ind)->is_dynamic());
-
- if (!_as_constants[ind])
- _nonconst_mgr->releasePlan(ind);
-}
-
-void StaticTensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
-{
- for (const auto &it : _tensors->native_tensors())
- fn(it.first);
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/StaticTensorManager.h b/runtime/onert/backend/cpu/StaticTensorManager.h
deleted file mode 100644
index 2af61e4e7..000000000
--- a/runtime/onert/backend/cpu/StaticTensorManager.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__
-
-#include "backend/IStaticTensorManager.h"
-#include "backend/cpu_common/DynamicTensorManager.h"
-#include "backend/cpu_common/MemoryManager.h"
-#include "backend/cpu_common/TensorRegistry.h"
-#include "backend/ITensorManager.h"
-#include "ir/OperandIndexMap.h"
-#include "ir/OperandInfo.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class StaticTensorManager : public backend::IStaticTensorManager
-{
-public:
- StaticTensorManager(const std::shared_ptr<cpu_common::TensorRegistry> &reg,
- cpu_common::DynamicTensorManager *dynamic_tensor_manager);
- virtual ~StaticTensorManager() = default;
-
- void allocateNonconsts(void);
- void deallocateNonconsts(void);
-
- void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout, bool as_const);
-
- void claimPlan(const ir::OperandIndex &ind, uint32_t size);
- void releasePlan(const ir::OperandIndex &ind);
-
- void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
-
-private:
- std::unique_ptr<cpu_common::MemoryManager> _nonconst_mgr;
- const std::shared_ptr<cpu_common::TensorRegistry> _tensors;
- ir::OperandIndexMap<bool> _as_constants;
- cpu_common::DynamicTensorManager *_dynamic_tensor_manager;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__
diff --git a/runtime/onert/backend/cpu/Tensor.cc b/runtime/onert/backend/cpu/Tensor.cc
deleted file mode 100644
index dac8f898b..000000000
--- a/runtime/onert/backend/cpu/Tensor.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Tensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-// `dynamic_cast` not working across library boundaries on NDK
-// With this as a key function, `dynamic_cast` works across dl
-ExternalTensor::~ExternalTensor() {}
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/Tensor.h b/runtime/onert/backend/cpu/Tensor.h
deleted file mode 100644
index 2ad2ad0fb..000000000
--- a/runtime/onert/backend/cpu/Tensor.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_TENSOR_H__
-#define __ONERT_BACKEND_CPU_TENSOR_H__
-
-#include <backend/cpu_common/Tensor.h>
-#include <ir/Data.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-using Tensor = cpu_common::Tensor;
-
-/**
- * @brief Class that uses data from external memory that is not managed by a backend
- * instead of allocating and copying the data. ExternalTensor's data pointer points to
- * an address of memory such as where memory is already allocated, or mmapped area.
- * This is meaning that ExternalTensor can take all of types' ir::Data.
- * To support this, assume below things no padding, always NHWC layout,
- * constant tensor and not dynamic.
- */
-class ExternalTensor : public Tensor
-{
-public:
- ExternalTensor() = delete;
- virtual ~ExternalTensor();
-
-public:
- ExternalTensor(const ir::OperandInfo &info, const ir::Layout layout)
- : Tensor(info, layout, nullptr)
- {
- assert(_layout == ir::Layout::NHWC);
- assert(_info.isConstant());
- assert(_info.isDynamic() == false);
- }
-
-public:
- /**
- * @brief set Data to be shared from external so that this ExternalTensor will not be
- * allocated on CPU backend
- * @param[in] data data of Operand to be set
- */
- void setData(const std::shared_ptr<ir::Data> data)
- {
- assert(data != nullptr);
- _data = data;
- // Note. Some op such as cker::Conv could take buffer as nullptr.
- // That's why _buffer also would be used
- _buffer = const_cast<uint8_t *>(_data->base());
- }
-
-public:
- uint8_t *buffer() const override { return _buffer; }
-
- bool is_constant() const override { return true; }
- bool is_dynamic() const override { return false; }
- void set_dynamic() override
- {
- throw std::runtime_error("This tensor does not support changing dynamic");
- }
-
- void setShape(const ir::Shape &) override
- {
- throw std::runtime_error("This tensor does not support changing shape");
- }
-
- void increase_ref() override { ++_num_references; }
-
- void decrease_ref() override
- {
- assert(_data != nullptr);
- assert(_num_references > 0);
- --_num_references;
- if (_num_references == 0)
- {
- _data.reset();
- _buffer = nullptr;
- }
- }
-
- /**
- * @brief Reset reference count to zero and release data
- */
- void reset_ref() override
- {
- assert(_data != nullptr);
- assert(_num_references > 0);
- _num_references = 0;
-
- _data.reset();
- _buffer = nullptr;
- }
-
- int32_t num_references() override { return _num_references; }
-
-private:
- std::shared_ptr<const ir::Data> _data;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_TENSOR_H__
diff --git a/runtime/onert/backend/cpu/TensorBuilder.cc b/runtime/onert/backend/cpu/TensorBuilder.cc
deleted file mode 100644
index e6bc55b0b..000000000
--- a/runtime/onert/backend/cpu/TensorBuilder.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TensorBuilder.h"
-
-#include <util/logging.h>
-
-#include <cassert>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-TensorBuilder::TensorBuilder(const std::shared_ptr<cpu_common::TensorRegistry> &tensor_reg)
- : _tensor_reg{tensor_reg},
- _dynamic_tensor_mgr{new cpu_common::DynamicTensorManager(_tensor_reg)},
- _static_tensor_mgr{new StaticTensorManager(_tensor_reg, _dynamic_tensor_mgr.get())}
-{
- /* empty */
-}
-
-void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout layout)
-{
- _tensor_info_map.emplace(ind, info);
-
- // CPU backend supports only one layout as NHWC
- assert(layout == ir::Layout::NHWC);
- if (info.isDynamic())
- {
- _dynamic_tensor_mgr->buildTensor(ind, info, layout);
- }
- else
- {
- _static_tensor_mgr->buildTensor(ind, info, layout, info.isConstant());
- }
-}
-
-void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
-{
- assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
- const auto tensor_info = _tensor_info_map.at(ind);
-
- if (!_tensor_reg->getNativeTensor(ind)->is_dynamic())
- {
- const auto size = tensor_info.total_size();
- _static_tensor_mgr->claimPlan(ind, size);
- }
-}
-
-void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind)
-{
- if (!_tensor_reg->getNativeTensor(ind)->is_dynamic())
- {
- _static_tensor_mgr->releasePlan(ind);
- }
-}
-
-bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
-{
- return _tensor_info_map.find(ind) != _tensor_info_map.end();
-}
-
-void TensorBuilder::prepare(void) { _static_tensor_mgr->allocateNonconsts(); }
-
-void TensorBuilder::allocate()
-{
- // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
- // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/TensorBuilder.h b/runtime/onert/backend/cpu/TensorBuilder.h
deleted file mode 100644
index 448abc229..000000000
--- a/runtime/onert/backend/cpu/TensorBuilder.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_TENSOR_BUILDER_H__
-#define __ONERT_BACKEND_CPU_TENSOR_BUILDER_H__
-
-#include <backend/cpu_common/DynamicTensorManager.h>
-#include <backend/cpu_common/TensorRegistry.h>
-
-#include <backend/ITensorBuilder.h>
-#include <ir/OperandIndexMap.h>
-
-#include "StaticTensorManager.h"
-#include "Tensor.h"
-
-#include <unordered_map>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-
-class TensorBuilder : public ITensorBuilder
-{
-public:
- TensorBuilder(const std::shared_ptr<cpu_common::TensorRegistry> &tensor_reg);
-
- /**
- * @brief Register tensor information to allocate on CPU backend
- * @param[in] ind Operand index
- * @param[in] info Operand information
- * @param[in] layout Operand data layout
- */
- void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout) override;
-
- void notifyFirstUse(const ir::OperandIndex &) override;
- void notifyLastUse(const ir::OperandIndex &) override;
-
- bool isRegistered(const ir::OperandIndex &) const override;
-
- void prepare(void) override;
- void allocate() override;
- void postFunctionPrepare() override { /* DO NOTHING */}
-
- IDynamicTensorManager *dynamicTensorManager(void) override { return _dynamic_tensor_mgr.get(); }
-
-private:
- const std::shared_ptr<cpu_common::TensorRegistry> _tensor_reg;
- std::unique_ptr<cpu_common::DynamicTensorManager> _dynamic_tensor_mgr;
- std::unique_ptr<StaticTensorManager> _static_tensor_mgr;
- ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_TENSOR_BUILDER_H__
diff --git a/runtime/onert/backend/cpu/cpu.cc b/runtime/onert/backend/cpu/cpu.cc
deleted file mode 100644
index 5385bb2a3..000000000
--- a/runtime/onert/backend/cpu/cpu.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Backend.h"
-
-#include <util/logging.h>
-
-extern "C" {
-onert::backend::Backend *onert_backend_create()
-{
- VERBOSE(onert_backend_create) << "'cpu' loaded\n";
- return new onert::backend::cpu::Backend;
-}
-
-void onert_backend_destroy(onert::backend::Backend *backend)
-{
- VERBOSE(onert_backend_create) << "'cpu' unloaded\n";
- delete backend;
-}
-}
diff --git a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc
deleted file mode 100644
index 2fd284c91..000000000
--- a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ArgMinMaxLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/ArgMinMax.h>
-#include <assert.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-namespace
-{
-template <typename T> std::function<bool(T, T)> GetComparefunction(bool is_arg_max)
-{
- if (is_arg_max)
- {
- return std::greater<T>();
- }
- else
- {
- return std::less<T>();
- }
-}
-}
-
-void ArgMinMaxLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- const IPortableTensor *axis, bool is_arg_max)
-{
- _input = input;
- _output = output;
- _axis = axis;
- _is_arg_max = is_arg_max;
-}
-
-void ArgMinMaxLayer::run()
-{
- if (_axis->total_size() != sizeof(int32_t))
- {
- throw std::runtime_error("ArgMinMax: wrong shape of axis");
- }
- auto axis = *reinterpret_cast<const int32_t *>(_axis->buffer());
- if (axis < 0)
- {
- axis += _input->num_dimensions();
- }
-#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type) \
- ArgMinMax(getTensorShape(_input), reinterpret_cast<const input_type *>(_input->buffer()), \
- getTensorShape(_output), reinterpret_cast<output_type *>(_output->buffer()), axis, \
- GetComparefunction<input_type>(_is_arg_max));
- if (_output->data_type() == ir::DataType::INT32)
- {
- switch (_input->data_type())
- {
- case ir::DataType::FLOAT32:
- TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
- break;
- case ir::DataType::QUANT_UINT8_ASYMM:
- case ir::DataType::UINT8:
- TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
- break;
- case ir::DataType::INT32:
- TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
- break;
- default:
- throw std::runtime_error("ArgMinMax: unsupported data type");
- }
- }
- else if (_output->data_type() == ir::DataType::INT64)
- {
- switch (_input->data_type())
- {
- case ir::DataType::FLOAT32:
- TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
- break;
- case ir::DataType::QUANT_UINT8_ASYMM:
- case ir::DataType::UINT8:
- TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
- break;
- case ir::DataType::INT32:
- TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
- break;
- default:
- throw std::runtime_error("ArgMinMax: unsupported data type");
- }
- }
- else
- {
- throw std::runtime_error("ArgMinMax: unsupported data type");
- }
-
-#undef TF_LITE_ARG_MIN_MAX
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h
deleted file mode 100644
index 4c864cb98..000000000
--- a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ArgMinMaxLayer : public ::onert::exec::IFunction
-{
-public:
- ArgMinMaxLayer() : _input(nullptr), _output(nullptr), _axis(nullptr), _is_arg_max(true) {}
-
-public:
- void configure(const IPortableTensor *indices, IPortableTensor *output,
- const IPortableTensor *axis, bool is_arg_max);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
- const IPortableTensor *_axis;
- bool _is_arg_max;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc
deleted file mode 100644
index 7ef023788..000000000
--- a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "BatchMatMulLayer.h"
-
-#include <cker/operation/BatchMatMul.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-BatchMatMulLayer::BatchMatMulLayer()
- : _lhs(nullptr), _rhs(nullptr), _output(nullptr), _adj_x(false), _adj_y(false),
- _kernel(new nnfw::cker::BatchMatMul())
-{
- // DO NOTHING
-}
-
-BatchMatMulLayer::~BatchMatMulLayer() = default;
-
-void BatchMatMulLayer::batchMatMulFloat32()
-{
- nnfw::cker::BatchMatMul &batchmatmul_kernel = *_kernel;
- nnfw::cker::Shape lhs_shape = getTensorShape(_lhs);
- nnfw::cker::Shape rhs_shape = getTensorShape(_rhs);
- nnfw::cker::Shape output_shape = getTensorShape(_output);
-
- // TODO implement for constant input
-
- batchmatmul_kernel.prepare(lhs_shape, rhs_shape, _adj_x, _adj_y);
- batchmatmul_kernel(lhs_shape, reinterpret_cast<const float *>(_lhs->buffer()), rhs_shape,
- reinterpret_cast<const float *>(_rhs->buffer()), _adj_x, _adj_y, output_shape,
- reinterpret_cast<float *>(_output->buffer()));
-}
-
-void BatchMatMulLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, bool adj_x,
- bool adj_y, IPortableTensor *output)
-{
- assert(lhs != nullptr);
- assert(rhs != nullptr);
- assert(output != nullptr);
-
- _lhs = lhs;
- _rhs = rhs;
- _adj_x = adj_x;
- _adj_y = adj_y;
- _output = output;
-}
-
-void BatchMatMulLayer::run()
-{
- if (_lhs->data_type() == OperandType::FLOAT32)
- {
- batchMatMulFloat32();
- }
- else
- {
- throw std::runtime_error{"BatchMatMul: unsupported data type"};
- }
-}
-
-#undef AVGPOOLING_PARAMETERS
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h
deleted file mode 100644
index 6770e218b..000000000
--- a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace nnfw
-{
-namespace cker
-{
-class BatchMatMul;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class BatchMatMulLayer : public ::onert::exec::IFunction
-{
-public:
- BatchMatMulLayer();
- ~BatchMatMulLayer();
-
-public:
- void batchMatMulFloat32();
-
- void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, bool adj_x, bool adj_y,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_lhs;
- const IPortableTensor *_rhs;
- IPortableTensor *_output;
-
- bool _adj_x;
- bool _adj_y;
-
- std::unique_ptr<nnfw::cker::BatchMatMul> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc
deleted file mode 100644
index f2f10eb9d..000000000
--- a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "BatchToSpaceNDLayer.h"
-
-#include <cker/operation/BatchToSpaceND.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-BatchToSpaceNDLayer::BatchToSpaceNDLayer()
- : _input(nullptr), _output(nullptr), _block_shape(nullptr), _crops(nullptr)
-{
- // DO NOTHING
-}
-
-template <typename T> void BatchToSpaceNDLayer::batchToSpaceNDGeneric()
-{
- const int32_t NNapiCrops[]{0, 0, 0, 0};
- const int32_t *_crops_buffer;
-
- if (_crops == nullptr)
- {
- _crops_buffer = NNapiCrops;
- }
- else
- {
- _crops_buffer = reinterpret_cast<const int32_t *>(_crops->buffer());
- }
- nnfw::cker::BatchToSpaceND<T>(
- getTensorShape(_input), reinterpret_cast<const T *>(_input->buffer()),
- reinterpret_cast<const int32_t *>(_block_shape->buffer()), _crops_buffer,
- getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
-}
-
-void BatchToSpaceNDLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- IPortableTensor *block_shape, IPortableTensor *crops)
-{
- _output = output;
- _input = input;
- _block_shape = block_shape;
- _crops = crops;
-}
-
-void BatchToSpaceNDLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- batchToSpaceNDGeneric<float>();
- }
- else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- batchToSpaceNDGeneric<uint8_t>();
- }
- else
- {
- throw std::runtime_error{"NYI"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h
deleted file mode 100644
index 6e25b241b..000000000
--- a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class BatchToSpaceNDLayer : public ::onert::exec::IFunction
-{
-public:
- BatchToSpaceNDLayer();
-
-public:
- template <typename T> void batchToSpaceNDGeneric();
-
- void configure(const IPortableTensor *input, IPortableTensor *output,
- IPortableTensor *block_shape, IPortableTensor *crops);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
- IPortableTensor *_block_shape;
- IPortableTensor *_crops;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc
deleted file mode 100644
index 8e51daad5..000000000
--- a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "BinaryArithmeticLayer.h"
-
-#include <cker/operation/BinaryArithmeticOps.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-
-template <nnfw::cker::BinaryArithmeticOpType arithmetic_type, typename T>
-void eval(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output,
- nnfw::cker::BinaryArithmeticOpParam op_params)
-{
- const auto lhs_shape = getTensorShape(lhs);
- const auto rhs_shape = getTensorShape(rhs);
- const bool need_broadcast = nnfw::cker::ProcessBroadcastShapes(lhs_shape, rhs_shape, &op_params);
- if (need_broadcast)
- {
- nnfw::cker::BroadcastBinaryArithmeticOp<arithmetic_type>(
- op_params, lhs_shape, reinterpret_cast<const T *>(lhs->buffer()), rhs_shape,
- reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()));
- return;
- }
-
- nnfw::cker::BinaryArithmeticOp<arithmetic_type>(
- op_params, lhs_shape, reinterpret_cast<const T *>(lhs->buffer()), rhs_shape,
- reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()));
-}
-
-template <nnfw::cker::BinaryArithmeticOpType arithmetic_type>
-std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)>
-generateKernelGeneric(const IPortableTensor *lhs, const ir::Activation activation,
- nnfw::cker::BinaryArithmeticOpParam op_params)
-{
- switch (lhs->data_type())
- {
- case OperandType::FLOAT32:
- {
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(activation, &output_activation_min, &output_activation_max);
- op_params.float_activation_max = output_activation_max;
- op_params.float_activation_min = output_activation_min;
- return std::bind(&eval<arithmetic_type, float>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, op_params);
- break;
- }
- case OperandType::INT32:
- {
- int32_t output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(activation, &output_activation_min, &output_activation_max);
- op_params.quantized_activation_max = output_activation_max;
- op_params.quantized_activation_min = output_activation_min;
- return std::bind(eval<arithmetic_type, int32_t>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, op_params);
- break;
- }
- default:
- throw std::runtime_error{"BinaryArithmetic(generic): Unsupported data type"};
- }
-}
-
-void setAddOrSubQuant8Params(const IPortableTensor *lhs, const IPortableTensor *rhs,
- IPortableTensor *output, ir::Activation activation,
- nnfw::cker::BinaryArithmeticOpParam *params)
-{
- int32_t output_activation_min, output_activation_max;
- CalculateActivationRangeUint8(activation, output, &output_activation_min, &output_activation_max);
- nnfw::cker::BinaryArithmeticOpParam &op_params = *params;
- op_params.quantized_activation_max = output_activation_max;
- op_params.quantized_activation_min = output_activation_min;
- // Parameters for scaled quantized computation
- op_params.left_shift = 20;
- // Zero-points of input and output tensors
- op_params.input1_offset = -lhs->data_offset();
- op_params.input2_offset = -rhs->data_offset();
- op_params.output_offset = output->data_offset();
- assert((op_params.input1_offset >= 0) && (op_params.input1_offset <= 255));
- assert((op_params.input2_offset >= 0) && (op_params.input2_offset <= 255));
- assert((op_params.output_offset >= 0) && (op_params.output_offset <= 255));
-
- // Compute normalized scale for _lhs and _rhs values,
- // and represent in 32-bit fixed point
- const double norm_max_scale = 2 * std::max(lhs->data_scale(), rhs->data_scale());
- const double real_lhs_scale = lhs->data_scale() / norm_max_scale;
- const double real_rhs_scale = rhs->data_scale() / norm_max_scale;
- // output scale is used to normalize final result, so we invert the scale here
- const double real_output_scale =
- norm_max_scale / (output->data_scale() * (1 << op_params.left_shift));
-
- // Represent the scales as fixed int32_t multipliers, and int32_t shifts
- QuantizeMultiplier(real_lhs_scale, &op_params.input1_multiplier, &op_params.input1_shift);
- QuantizeMultiplier(real_rhs_scale, &op_params.input2_multiplier, &op_params.input2_shift);
- QuantizeMultiplier(real_output_scale, &op_params.output_multiplier, &op_params.output_shift);
-}
-
-void setMulQuant8Params(const IPortableTensor *lhs, const IPortableTensor *rhs,
- IPortableTensor *output, ir::Activation activation,
- nnfw::cker::BinaryArithmeticOpParam *params)
-{
- int32_t output_activation_min, output_activation_max;
- CalculateActivationRangeUint8(activation, output, &output_activation_min, &output_activation_max);
- nnfw::cker::BinaryArithmeticOpParam &op_params = *params;
-
- op_params.quantized_activation_max = output_activation_max;
- op_params.quantized_activation_min = output_activation_min;
- op_params.input1_offset = -lhs->data_offset();
- op_params.input2_offset = -rhs->data_offset();
- op_params.output_offset = output->data_offset();
-
- double real_multiplier = lhs->data_scale() * rhs->data_scale() / output->data_scale();
- QuantizeMultiplier(real_multiplier, &op_params.output_multiplier, &op_params.output_shift);
-}
-
-} // namespace
-
-void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- IPortableTensor *output, const ir::Activation activation,
- const ArithmeticType arithmetic_type)
-{
- assert(lhs != nullptr);
- assert(rhs != nullptr);
- assert(output != nullptr);
-
- _lhs = lhs;
- _rhs = rhs;
- _output = output;
-
- nnfw::cker::BinaryArithmeticOpParam op_params;
- switch (arithmetic_type)
- {
- case ArithmeticType::kAdd:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- setAddOrSubQuant8Params(_lhs, _rhs, _output, activation, &op_params);
- _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::ADD, uint8_t>,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
- op_params);
- }
- else
- {
- _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::ADD>(_lhs, activation,
- op_params);
- }
- break;
- case ArithmeticType::kSub:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- setAddOrSubQuant8Params(_lhs, _rhs, _output, activation, &op_params);
- op_params.input2_multiplier *= -1;
- _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::SUB, uint8_t>,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
- op_params);
- }
- else
- {
- _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::SUB>(_lhs, activation,
- op_params);
- }
- break;
- case ArithmeticType::kMul:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- nnfw::cker::BinaryArithmeticOpParam op_params;
- setMulQuant8Params(_lhs, _rhs, _output, activation, &op_params);
- _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::MUL, uint8_t>,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
- op_params);
- }
- else
- {
- _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::MUL>(_lhs, activation,
- op_params);
- }
- break;
- case ArithmeticType::kDiv:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- throw std::runtime_error{
- "BinaryArithmetic(Div): Div operation does not support quantization"};
- }
- else if (_lhs->data_type() == OperandType::INT32)
- {
- throw std::runtime_error{"BinaryArithmetic(Div): Unsupported data type"};
- }
- else
- {
- _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::DIV>(_lhs, activation,
- op_params);
- }
- break;
- default:
- throw std::runtime_error{"BinaryArithmetic: Unsupported BinaryArithmetic type"};
- }
-}
-
-void BinaryArithmeticLayer::run() { _kernel(_lhs, _rhs, _output); }
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h
deleted file mode 100644
index d6b33ad07..000000000
--- a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class ArithmeticType
-{
- kAdd,
- kSub,
- kMul,
- kDiv,
-};
-
-class BinaryArithmeticLayer : public ::onert::exec::IFunction
-{
-public:
- BinaryArithmeticLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr)
- {
- // DO NOTHING
- }
-
-public:
- void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output,
- const ir::Activation activation, const ArithmeticType arithmetic_type);
-
- void run() override;
-
-private:
- const IPortableTensor *_lhs;
- const IPortableTensor *_rhs;
- IPortableTensor *_output;
-
- std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc b/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc
deleted file mode 100644
index d9c1bbfc5..000000000
--- a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "BroadcastToLayer.h"
-
-#include <cker/operation/BroadcastTo.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-BroadcastToLayer::BroadcastToLayer() : _input(nullptr), _shape(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void BroadcastToLayer::configure(const IPortableTensor *input, const IPortableTensor *shape,
- IPortableTensor *output)
-{
- _input = input;
- _shape = shape;
- _output = output;
-}
-
-void BroadcastToLayer::run()
-{
- // NOTE : It was implemented follows tf.broadcast_to operation works and
- // Api Document(https://www.tensorflow.org/api_docs/python/tf/broadcast_to)
-
- switch (_output->data_type())
- {
- // ToDo : It need to support INT8 and UINT8 also when will be applied quantization.
- case OperandType::FLOAT32:
- nnfw::cker::BroadcastTo<float>(
- getTensorShape(_input), reinterpret_cast<float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- break;
- case OperandType::INT32:
- nnfw::cker::BroadcastTo<int32_t>(
- getTensorShape(_input), reinterpret_cast<int32_t *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<int32_t *>(_output->buffer()));
- break;
- case OperandType::UINT32:
- nnfw::cker::BroadcastTo<uint32_t>(
- getTensorShape(_input), reinterpret_cast<uint32_t *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<uint32_t *>(_output->buffer()));
- break;
- default:
- throw std::runtime_error{"BroadcastToLayer: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/BroadcastToLayer.h b/runtime/onert/backend/cpu/ops/BroadcastToLayer.h
deleted file mode 100644
index 8e8433fc9..000000000
--- a/runtime/onert/backend/cpu/ops/BroadcastToLayer.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class BroadcastToLayer : public ::onert::exec::IFunction
-{
-public:
- BroadcastToLayer();
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *shape,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_shape;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/CompareLayer.cc b/runtime/onert/backend/cpu/ops/CompareLayer.cc
deleted file mode 100644
index adf902aaf..000000000
--- a/runtime/onert/backend/cpu/ops/CompareLayer.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "CompareLayer.h"
-
-#include "OperationUtils.h"
-
-#include <assert.h>
-#include <cker/operation/Comparison.h>
-using namespace nnfw::cker;
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-
-using OpType = onert::ir::operation::Comparison::ComparisonType;
-using namespace onert::backend::cpu;
-
-// Assumes these enum values to be in the order like this
-static_assert(static_cast<int>(OpType::Equal) == 0, "An OpType value has changed!");
-static_assert(static_cast<int>(OpType::NotEqual) == 1, "An OpType value has changed!");
-static_assert(static_cast<int>(OpType::Greater) == 2, "An OpType value has changed!");
-static_assert(static_cast<int>(OpType::GreaterEqual) == 3, "An OpType value has changed!");
-static_assert(static_cast<int>(OpType::Less) == 4, "An OpType value has changed!");
-static_assert(static_cast<int>(OpType::LessEqual) == 5, "An OpType value has changed!");
-
-template <typename T>
-void compareQuant8(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output,
- OpType op_type)
-{
- nnfw::cker::ComparisonParams params;
- params.left_shift = 8;
- params.input1_offset = -lhs->data_offset();
- params.input2_offset = -rhs->data_offset();
- const double norm_max_scale =
- 2 * std::max(std::abs(lhs->data_scale()), std::abs(rhs->data_scale()));
- const double adjusted_lhs_scale = lhs->data_scale() / norm_max_scale;
- const double adjusted_rhs_scale = rhs->data_scale() / norm_max_scale;
- QuantizeMultiplierSmallerThanOneExp(adjusted_lhs_scale, &params.input1_multiplier,
- &params.input1_shift);
- QuantizeMultiplierSmallerThanOneExp(adjusted_rhs_scale, &params.input2_multiplier,
- &params.input2_shift);
- params.is_broadcast = !HaveSameShapes(lhs, rhs);
-
- using CompareFunction =
- void (*)(ComparisonParams & params, const Shape &input1_shape, const T *input1_data,
- const Shape &input2_shape, const T *input2_data, const Shape &output_shape,
- bool *output_data);
-
- static const CompareFunction broadcast_fns[] = {
- Broadcast4DSlowEqualWithScaling, Broadcast4DSlowNotEqualWithScaling,
- Broadcast4DSlowGreaterWithScaling, Broadcast4DSlowGreaterEqualWithScaling,
- Broadcast4DSlowLessWithScaling, Broadcast4DSlowLessEqualWithScaling,
- };
- static const CompareFunction non_broadcast_fns[] = {
- EqualWithScaling, NotEqualWithScaling, GreaterWithScaling,
- GreaterEqualWithScaling, LessWithScaling, LessEqualWithScaling,
- };
-
- static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns),
- "Sizes of broadcast_fns and non_broadcast_fns must match!");
-
- auto index = static_cast<int>(op_type);
- if (index < 0 || index >= static_cast<int>(sizeof(broadcast_fns) / sizeof(broadcast_fns[0])))
- throw std::runtime_error{"Invalid OpType for CompareLayer"};
-
- CompareFunction fn = (params.is_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]);
-
- fn(params, getExtendedTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
- getExtendedTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()),
- getExtendedTensorShape(output), reinterpret_cast<bool *>(output->buffer()));
-}
-
-template <typename T>
-void compareScalar(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output,
- OpType op_type)
-{
- bool requires_broadcast = !HaveSameShapes(lhs, rhs);
-
- using CompareFunction =
- void (*)(const Shape &input1_shape, const T *input1_data, const Shape &input2_shape,
- const T *input2_data, const Shape &output_shape, bool *output_data);
-
- static const CompareFunction broadcast_fns[] = {
- Broadcast4DSlowEqual, Broadcast4DSlowNotEqual, Broadcast4DSlowGreater,
- Broadcast4DSlowGreaterEqual, Broadcast4DSlowLess, Broadcast4DSlowLessEqual,
- };
- static const CompareFunction non_broadcast_fns[] = {
- EqualNoScaling, NotEqualNoScaling, GreaterNoScaling,
- GreaterEqualNoScaling, LessNoScaling, LessEqualNoScaling,
- };
-
- static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns),
- "Sizes of broadcast_fns and non_broadcast_fns must match!");
-
- auto index = static_cast<int>(op_type);
- if (index < 0 || index >= static_cast<int>(sizeof(broadcast_fns) / sizeof(broadcast_fns[0])))
- throw std::runtime_error{"Invalid OpType for CompareLayer"};
-
- CompareFunction fn = (requires_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]);
-
- fn(getExtendedTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
- getExtendedTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()),
- getExtendedTensorShape(output), reinterpret_cast<bool *>(output->buffer()));
-}
-
-} // namespace
-
-CompareLayer::CompareLayer()
- : _lhs(nullptr), _rhs(nullptr), _output(nullptr),
- _op_type(ir::operation::Comparison::ComparisonType::Equal)
-{
- // DO NOTHING
-}
-
-void CompareLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- const OpType op_type, IPortableTensor *output)
-{
- _lhs = lhs;
- _rhs = rhs;
- _op_type = op_type;
- _output = output;
-}
-
-void CompareLayer::run()
-{
- if (_lhs->data_type() == OperandType::FLOAT32)
- {
- compareScalar<float>(_lhs, _rhs, _output, _op_type);
- }
- else if (_lhs->data_type() == OperandType::INT32)
- {
- compareScalar<int32_t>(_lhs, _rhs, _output, _op_type);
- }
- else if (_lhs->data_type() == OperandType::INT64)
- {
- compareScalar<int64_t>(_lhs, _rhs, _output, _op_type);
- }
- else if (_lhs->data_type() == OperandType::BOOL8)
- {
- compareScalar<uint8_t>(_lhs, _rhs, _output, _op_type);
- }
- else if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- compareQuant8<uint8_t>(_lhs, _rhs, _output, _op_type);
- }
- else
- {
- throw std::runtime_error{"Compare: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/CompareLayer.h b/runtime/onert/backend/cpu/ops/CompareLayer.h
deleted file mode 100644
index add360ef8..000000000
--- a/runtime/onert/backend/cpu/ops/CompareLayer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-#include <ir/operation/Comparison.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class CompareLayer : public ::onert::exec::IFunction
-{
-public:
- CompareLayer();
-
-public:
- void configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- const ir::operation::Comparison::ComparisonType op_type, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_lhs;
- const IPortableTensor *_rhs;
- IPortableTensor *_output;
- ir::operation::Comparison::ComparisonType _op_type;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ConcatLayer.cc b/runtime/onert/backend/cpu/ops/ConcatLayer.cc
deleted file mode 100644
index d26ed7378..000000000
--- a/runtime/onert/backend/cpu/ops/ConcatLayer.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConcatLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Concatenation.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ConcatLayer::ConcatLayer() : _inputs(), _output(nullptr), _axis(0)
-{
- // DO NOTHING
-}
-
-template <typename T> void ConcatLayer::concatenationGeneral()
-{
- uint32_t num_inputs = _inputs.size();
-
- nnfw::cker::ConcatenationParams op_params;
- op_params.axis = _axis;
- op_params.inputs_count = num_inputs;
-
- std::vector<nnfw::cker::Shape *> inputDimsPtr;
- std::vector<nnfw::cker::Shape> inputDims;
- inputDimsPtr.reserve(num_inputs);
- inputDims.reserve(num_inputs);
-
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputDims.push_back(getTensorShape(_inputs[i]));
- inputDimsPtr.push_back(&inputDims[i]);
- }
-
- std::vector<const T *> inputDataPtrs;
-
- for (const auto input : _inputs)
- {
- inputDataPtrs.emplace_back(reinterpret_cast<const T *>(input->buffer()));
- }
-
- nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
- getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
-}
-void ConcatLayer::concatenationQuant8()
-{
- uint32_t num_inputs = _inputs.size();
-
- std::vector<int32_t> input_zeropoints(num_inputs);
- std::vector<float> input_scales(num_inputs);
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- input_zeropoints[i] = _inputs[i]->data_offset();
- input_scales[i] = _inputs[i]->data_scale();
- }
-
- nnfw::cker::ConcatenationParams op_params;
- op_params.axis = _axis;
- op_params.inputs_count = num_inputs;
- op_params.input_zeropoint = input_zeropoints.data();
- op_params.input_scale = input_scales.data();
- op_params.output_zeropoint = _output->data_offset();
- op_params.output_scale = _output->data_scale();
-
- std::vector<nnfw::cker::Shape *> inputDimsPtr;
- std::vector<nnfw::cker::Shape> inputDims;
- inputDimsPtr.reserve(num_inputs);
- inputDims.reserve(num_inputs);
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputDims.push_back(getTensorShape(_inputs[i]));
- inputDimsPtr.push_back(&inputDims[i]);
- }
-
- std::vector<const uint8_t *> inputDataPtrs;
- for (const auto input : _inputs)
- {
- inputDataPtrs.emplace_back(reinterpret_cast<const uint8_t *>(input->buffer()));
- }
-
- nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
- getTensorShape(_output),
- reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void ConcatLayer::configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis,
- IPortableTensor *output)
-{
- assert(inputs.size() > 0);
- assert(output != nullptr);
-
- _inputs = inputs;
- _axis = axis;
- _output = output;
-}
-
-void ConcatLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- concatenationGeneral<float>();
- }
- else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- concatenationQuant8();
- }
- else if (_output->data_type() == OperandType::INT32)
- {
- concatenationGeneral<int32_t>();
- }
- else if (_output->data_type() == OperandType::INT64)
- {
- concatenationGeneral<int64_t>();
- }
- else
- throw std::runtime_error("Concat: unsupported data type");
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ConcatLayer.h b/runtime/onert/backend/cpu/ops/ConcatLayer.h
deleted file mode 100644
index 0787199d6..000000000
--- a/runtime/onert/backend/cpu/ops/ConcatLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ConcatLayer : public ::onert::exec::IFunction
-{
-public:
- ConcatLayer();
-
-public:
- template <typename T> void concatenationGeneral();
-
- void concatenationQuant8();
-
- void configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- std::vector<const IPortableTensor *> _inputs;
- IPortableTensor *_output;
- int32_t _axis;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc b/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
deleted file mode 100644
index c057267d3..000000000
--- a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConvolutionLayer.h"
-
-#include "../Tensor.h"
-#include "ir/Padding.h"
-#include <cker/operation/Conv.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-ConvolutionLayer::ConvolutionLayer()
- : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
- _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
- _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
- _dilationHeightFactor(1), _activation(ir::Activation::NONE),
- _conv_kernel(new nnfw::cker::Conv()), _prepare(false)
-{
- // DO NOTHING
-}
-
-ConvolutionLayer::~ConvolutionLayer() = default;
-
-void ConvolutionLayer::convFloat32()
-{
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
-
- nnfw::cker::ConvParams op_params;
- op_params.padding_type = getPaddingType(_paddingType);
- op_params.padding_values.width = _paddingLeft;
- op_params.padding_values.height = _paddingTop;
- op_params.stride_width = _strideWidth;
- op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = _dilationWidthFactor;
- op_params.dilation_height_factor = _dilationHeightFactor;
- op_params.float_activation_min = output_activation_min;
- op_params.float_activation_max = output_activation_max;
-
- nnfw::cker::Conv &kernel = *_conv_kernel;
- kernel(op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-void ConvolutionLayer::convQuant8()
-{
- int32_t output_activation_min = 0;
- int32_t output_activation_max = 0;
- CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
- &output_activation_max);
-
- double real_multiplier = 0.0;
- int32_t output_multiplier = 0;
- int32_t output_shift = 0;
- GetQuantizedConvolutionMultiplier(_input, _kernel, _bias, _output, &real_multiplier);
- QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
-
- nnfw::cker::ConvParams op_params;
- op_params.stride_width = _strideWidth;
- op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = _dilationWidthFactor;
- op_params.dilation_height_factor = _dilationHeightFactor;
- op_params.padding_type = getPaddingType(_paddingType);
- op_params.padding_values.width = _paddingLeft;
- op_params.padding_values.height = _paddingTop;
- op_params.input_offset = -_input->data_offset();
- op_params.weights_offset = -_kernel->data_offset();
- op_params.output_offset = _output->data_offset();
- op_params.output_multiplier = output_multiplier;
- op_params.output_shift = output_shift;
- op_params.quantized_activation_min = output_activation_min;
- op_params.quantized_activation_max = output_activation_max;
- op_params.is_replaced_weights = true;
-
- nnfw::cker::Conv &kernel = *_conv_kernel;
- kernel(op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_kernel), reinterpret_cast<const uint8_t *>(_kernel->buffer()),
- getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias->buffer()),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTensor *kernel,
- const IPortableTensor *bias, const ir::PaddingType paddingType,
- const uint32_t paddingLeft, const uint32_t paddingRight,
- const uint32_t paddingTop, const uint32_t paddingBottom,
- const uint32_t strideWidth, const uint32_t strideHeight,
- const uint32_t dilationWidthFactor,
- const uint32_t dilationHeightFactor,
- const ir::Activation activation, IPortableTensor *output)
-{
- _input = input;
- _kernel = kernel;
- _bias = bias;
- _paddingType = paddingType;
- _paddingLeft = paddingLeft;
- _paddingRight = paddingRight;
- _paddingTop = paddingTop;
- _paddingBottom = paddingBottom;
- _strideWidth = strideWidth;
- _strideHeight = strideHeight;
- _dilationWidthFactor = dilationWidthFactor;
- _dilationHeightFactor = dilationHeightFactor;
- _activation = activation;
- _output = output;
-}
-
-void ConvolutionLayer::run()
-{
- prepare();
-
- if (_input->is_dynamic() || _kernel->is_dynamic())
- {
- const auto ifm_shape = _input->getShape().asFeature(_input->layout());
- const auto ofm_shape = _output->getShape().asFeature(_input->layout());
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto ker_shape = _kernel->getShape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
-
- ir::Stride stride;
- stride.vertical = _strideWidth;
- stride.horizontal = _strideWidth;
-
- ir::Padding param_padding;
- param_padding.type = _paddingType;
- param_padding.param.left = _paddingLeft;
- param_padding.param.right = _paddingRight;
- param_padding.param.top = _paddingTop;
- param_padding.param.bottom = _paddingBottom;
-
- const auto padding =
- ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
- _dilationWidthFactor, _dilationHeightFactor);
-
- _paddingLeft = padding.left;
- _paddingRight = padding.right;
- _paddingTop = padding.top;
- _paddingBottom = padding.bottom;
- }
- if (_input->data_type() == OperandType::FLOAT32)
- {
- convFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- convQuant8();
- }
- else
- {
- throw std::runtime_error{"Conv: unsupported data type"};
- }
-}
-
-void ConvolutionLayer::prepare()
-{
- if (_prepare)
- return;
-
- nnfw::cker::Conv &kernel = *_conv_kernel;
- if (_input->data_type() == OperandType::FLOAT32 && _kernel->is_constant())
- {
- bool is_transposed = false;
- kernel.prepare(getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
- getPaddingType(_paddingType), is_transposed, _dilationWidthFactor,
- _dilationHeightFactor);
-
- // Decrease reference of _kernel(weights) only when _kernel is constant
- if (is_transposed)
- {
- auto kernel_tensor = dynamic_cast<const Tensor *>(_kernel);
- if (kernel_tensor)
- // TODO Remove const_cast
- const_cast<Tensor *>(kernel_tensor)->decrease_ref();
- }
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM && _kernel->is_constant() &&
- !_input->is_dynamic() && !_output->is_dynamic())
- {
- kernel.prepareQuant(getTensorShape(_input), getTensorShape(_kernel), getTensorShape(_output),
- _strideWidth, _strideHeight);
- }
- _prepare = true;
-}
-
-#undef ANDROID_NN_CONV_PARAMETERS
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ConvolutionLayer.h b/runtime/onert/backend/cpu/ops/ConvolutionLayer.h
deleted file mode 100644
index 398892e65..000000000
--- a/runtime/onert/backend/cpu/ops/ConvolutionLayer.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-#include <functional>
-#include <memory>
-
-namespace nnfw
-{
-namespace cker
-{
-class Conv;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ConvolutionLayer : public ::onert::exec::IFunction
-{
-public:
- ConvolutionLayer();
- ~ConvolutionLayer();
-
-public:
- void convFloat32();
-
- void convQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *kernel,
- const IPortableTensor *bias, ir::PaddingType _paddingType,
- const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
- const uint32_t paddingBottom, const uint32_t strideWidth,
- const uint32_t strideHeight, const uint32_t dilationWidthFactor,
- const uint32_t dilationHeightFactor, const ir::Activation activation,
- IPortableTensor *output);
-
- void run() override;
-
- void prepare() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_kernel;
- const IPortableTensor *_bias;
- IPortableTensor *_output;
-
- ir::PaddingType _paddingType;
- uint32_t _paddingLeft;
- uint32_t _paddingTop;
- uint32_t _paddingRight;
- uint32_t _paddingBottom;
-
- uint32_t _strideWidth;
- uint32_t _strideHeight;
- uint32_t _dilationWidthFactor;
- uint32_t _dilationHeightFactor;
-
- ir::Activation _activation;
-
- std::unique_ptr<nnfw::cker::Conv> _conv_kernel;
-
- bool _prepare;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc b/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc
deleted file mode 100644
index e67c3f390..000000000
--- a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "DepthwiseConvolutionLayer.h"
-
-#include <cker/operation/DepthwiseConv.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-DepthwiseConvolutionLayer::DepthwiseConvolutionLayer()
- : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr), _paddingLeft(0),
- _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
- _multiplier(0), _activation(ir::Activation::NONE)
-{
- // DO NOTHING
-}
-
-void DepthwiseConvolutionLayer::convFloat32()
-{
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
-
- nnfw::cker::DepthwiseConvParams op_params;
- op_params.stride_width = _strideWidth;
- op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = 1;
- op_params.dilation_height_factor = 1;
- op_params.padding_values.width = _paddingLeft;
- op_params.padding_values.height = _paddingTop;
- op_params.depth_multiplier = _multiplier;
- op_params.float_activation_min = output_activation_min;
- op_params.float_activation_max = output_activation_max;
-
- nnfw::cker::DepthwiseConv(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-void DepthwiseConvolutionLayer::convQuant8()
-{
- int32_t output_activation_min = 0;
- int32_t output_activation_max = 0;
- CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
- &output_activation_max);
-
- double real_multiplier = 0.0;
- int32_t output_multiplier = 0;
- int32_t output_shift = 0;
- GetQuantizedConvolutionMultiplier(_input, _kernel, _bias, _output, &real_multiplier);
- QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
-
- nnfw::cker::DepthwiseConvParams op_params;
- op_params.stride_width = _strideWidth;
- op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = 1;
- op_params.dilation_height_factor = 1;
- op_params.padding_values.width = _paddingLeft;
- op_params.padding_values.height = _paddingTop;
- op_params.depth_multiplier = _multiplier;
- op_params.input_offset = -_input->data_offset();
- op_params.weights_offset = -_kernel->data_offset();
- op_params.output_offset = _output->data_offset();
- op_params.output_multiplier = output_multiplier;
- op_params.output_shift = output_shift;
- op_params.quantized_activation_min = output_activation_min;
- op_params.quantized_activation_max = output_activation_max;
-
- nnfw::cker::DepthwiseConv(
- op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_kernel), reinterpret_cast<const uint8_t *>(_kernel->buffer()),
- getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias->buffer()),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void DepthwiseConvolutionLayer::configure(const IPortableTensor *input,
- const IPortableTensor *kernel,
- const IPortableTensor *bias, const uint32_t paddingLeft,
- const uint32_t paddingRight, const uint32_t paddingTop,
- const uint32_t paddingBottom, const uint32_t strideWidth,
- const uint32_t strideHeight, const uint32_t multiplier,
- const ir::Activation activation, IPortableTensor *output)
-{
- _input = input;
- _kernel = kernel;
- _bias = bias;
- _paddingLeft = paddingLeft;
- _paddingRight = paddingRight;
- _paddingTop = paddingTop;
- _paddingBottom = paddingBottom;
- _strideWidth = strideWidth;
- _strideHeight = strideHeight;
- _multiplier = multiplier;
- _activation = activation;
- _output = output;
-}
-
-void DepthwiseConvolutionLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- convFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- convQuant8();
- }
- else
- {
- throw std::runtime_error{"DepthwiseConv: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h b/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h
deleted file mode 100644
index c898255a3..000000000
--- a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
-#define __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class DepthwiseConvolutionLayer : public ::onert::exec::IFunction
-{
-public:
- DepthwiseConvolutionLayer();
-
-public:
- void convFloat32();
-
- void convQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *kernel,
- const IPortableTensor *bias, const uint32_t paddingLeft,
- const uint32_t paddingRight, const uint32_t paddingTop,
- const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
- const uint32_t multiplier, const ir::Activation activation,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_kernel;
- const IPortableTensor *_bias;
- IPortableTensor *_output;
-
- uint32_t _paddingLeft;
- uint32_t _paddingTop;
- uint32_t _paddingRight;
- uint32_t _paddingBottom;
-
- uint32_t _strideWidth;
- uint32_t _strideHeight;
-
- uint32_t _multiplier;
-
- ir::Activation _activation;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/EinsumLayer.cc b/runtime/onert/backend/cpu/ops/EinsumLayer.cc
deleted file mode 100644
index 8c16740a3..000000000
--- a/runtime/onert/backend/cpu/ops/EinsumLayer.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "EinsumLayer.h"
-
-#include <cker/operation/Einsum.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-EinsumLayer::EinsumLayer()
- : _inputs(), _output(nullptr), _equation(), _einsum_kernel(new nnfw::cker::Einsum())
-{
- // DO NOTHING
-}
-
-EinsumLayer::~EinsumLayer() = default;
-
-void EinsumLayer::einsumFloat32()
-{
- uint32_t num_inputs = _inputs.size();
- nnfw::cker::Einsum &kernel = *_einsum_kernel;
-
- kernel.prepare(_equation);
-
- std::vector<nnfw::cker::Shape> inputShapes;
- std::vector<const float *> inputFloatPtrs;
-
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputShapes.emplace_back(getTensorShape(_inputs[i]));
- inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(_inputs[i]->buffer()));
- }
-
- kernel(_equation, inputShapes, inputFloatPtrs, getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
-}
-
-void EinsumLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- einsumFloat32();
- }
- else
- {
- throw std::runtime_error{"Einsum: unsupported data type"};
- }
-}
-
-void EinsumLayer::configure(const std::vector<const IPortableTensor *> &inputs,
- std::string equation, IPortableTensor *output)
-{
- assert(inputs.size() > 0);
- assert(output != nullptr);
-
- _inputs = inputs;
- _equation = equation;
- _output = output;
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/EinsumLayer.h b/runtime/onert/backend/cpu/ops/EinsumLayer.h
deleted file mode 100644
index a93f87e77..000000000
--- a/runtime/onert/backend/cpu/ops/EinsumLayer.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-#include <functional>
-#include <memory>
-
-namespace nnfw
-{
-namespace cker
-{
-class Einsum;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class EinsumLayer : public ::onert::exec::IFunction
-{
-public:
- EinsumLayer();
- ~EinsumLayer();
-
-public:
- void einsumFloat32();
-
- void configure(const std::vector<const IPortableTensor *> &inputs, std::string equation,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- std::vector<const IPortableTensor *> _inputs;
- IPortableTensor *_output;
-
- std::string _equation;
-
- std::unique_ptr<nnfw::cker::Einsum> _einsum_kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
deleted file mode 100644
index c1d63172b..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ElementwiseActivationLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Logistic.h>
-#include <cker/operation/ReLU.h>
-#include <cker/operation/ReLU6.h>
-#include <cker/operation/Tanh.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ElementwiseActivationLayer::ElementwiseActivationLayer()
- : _input(nullptr), _output(nullptr), _kernel()
-{
- // DO NOTHING
-}
-
-void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
-{
- const auto input_scale = static_cast<double>(_input->data_scale());
- const auto input_zero_point = static_cast<int32_t>(_input->data_offset());
- const auto output_scale = static_cast<double>(_output->data_scale());
- const auto output_zero_point = static_cast<int32_t>(_output->data_offset());
- const float inverse_scale = 1 / output_scale;
- int32_t maxval = std::numeric_limits<uint8_t>::max();
- int32_t minval = std::numeric_limits<uint8_t>::min();
- for (int32_t val = minval; val <= maxval; ++val)
- {
- const float dequantized = input_scale * (val - input_zero_point);
- float transformed = 0.f;
- if (op_type == ElementwiseActivationType::kTanh)
- {
- transformed = std::tanh(dequantized);
- }
- else if (op_type == ElementwiseActivationType::kLogistic)
- {
- transformed = 1.0f / (1.0f + std::exp(-dequantized));
- }
- else
- {
- throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
- }
- const float rescaled = std::round(transformed * inverse_scale);
- const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
- _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
- }
-}
-
-void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
- IPortableTensor *output)
-{
- const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output));
- const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer());
- uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer());
-
- for (int i = 0; i < size; ++i)
- {
- output_data[i] = _table[input_data[i]];
- }
-}
-
-void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- float alpha, float beta,
- ElementwiseActivationType op_type)
-{
- _input = input;
- _output = output;
-
- switch (op_type)
- {
- case ElementwiseActivationType::kLogistic:
- if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- PopulateLookupTable(op_type);
- _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
- std::placeholders::_1, std::placeholders::_2);
- }
- else if (_input->data_type() == OperandType::FLOAT32)
- {
- _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::Logistic(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
- };
- }
- else
- {
- throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
- }
- break;
- case ElementwiseActivationType::kReLU:
- if (_input->data_type() == OperandType::FLOAT32)
- {
- if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
- {
- _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::ReLU(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
- };
- }
- else if (alpha == 6.f && beta == 0.f)
- {
- _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::ReLU6(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- reinterpret_cast<float *>(output->buffer()));
- };
- }
- else
- {
- throw std::runtime_error(
- "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
- }
- }
- else
- {
- throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
- }
- break;
- case ElementwiseActivationType::kTanh:
- if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- PopulateLookupTable(op_type);
- _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
- std::placeholders::_1, std::placeholders::_2);
- }
- else if (_input->data_type() == OperandType::FLOAT32)
- {
- _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
- };
- }
- else
- {
- throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
- }
- break;
- default:
- throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
- }
-}
-
-void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h
deleted file mode 100644
index 3ef580041..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class ElementwiseActivationType
-{
- kLogistic,
- kReLU,
- kTanh
-};
-
-class ElementwiseActivationLayer : public ::onert::exec::IFunction
-{
-public:
- ElementwiseActivationLayer();
-
-public:
- void configure(const IPortableTensor *input, IPortableTensor *output, float alpha, float beta,
- const ElementwiseActivationType op_type);
-
- void run() override;
-
- void PopulateLookupTable(const ElementwiseActivationType op_type);
-
- void EvalUsingLookupTable(const IPortableTensor *input, IPortableTensor *output);
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
- uint8_t _table[256];
- std::function<void(const IPortableTensor *input, IPortableTensor *output)> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc
deleted file mode 100644
index ea3c1e7cd..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ElementwiseBinaryLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/LogicalOr.h>
-#include <cker/operation/MaxMin.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-template <typename T>
-void logicalOrGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs,
- IPortableTensor *output)
-{
- if (!HaveSameShapes(lhs, rhs))
- {
- nnfw::cker::LogicalOrBroadcast<T>(
- getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), getTensorShape(rhs),
- reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()));
- }
- else
- {
- nnfw::cker::LogicalOrElementwise<T>(
- getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
- reinterpret_cast<const T *>(rhs->buffer()), reinterpret_cast<T *>(output->buffer()));
- }
-}
-
-template <typename T>
-void maximumGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output)
-{
- nnfw::cker::Max<T>(getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
- getTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()),
- getTensorShape(output), reinterpret_cast<T *>(output->buffer()));
-}
-
-template <typename T>
-void minimumGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output)
-{
- nnfw::cker::Min<T>(getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
- getTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()),
- getTensorShape(output), reinterpret_cast<T *>(output->buffer()));
-}
-
-bool haveSameQauntInfo(const IPortableTensor *lhs, const IPortableTensor *rhs,
- const IPortableTensor *output)
-{
- return (lhs->data_scale() == rhs->data_scale() && lhs->data_scale() == output->data_scale()) &&
- (lhs->data_offset() == rhs->data_offset() && lhs->data_offset() == output->data_offset());
-}
-} // namespace
-
-void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- IPortableTensor *output, const ElementwiseBinaryType op_type)
-{
- assert(lhs != nullptr);
- assert(rhs != nullptr);
- assert(output != nullptr);
-
- _lhs = lhs;
- _rhs = rhs;
- _output = output;
-
- switch (op_type)
- {
- case ElementwiseBinaryType::kLogicalOr:
- if ((_lhs->data_type() == OperandType::BOOL8) && (_rhs->data_type() == OperandType::BOOL8))
- {
- _kernel = logicalOrGeneric<bool>;
- }
- else
- {
- throw std::runtime_error{"LogicalOr: Unsupported data type"};
- }
- break;
- case ElementwiseBinaryType::kMax:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- if (!haveSameQauntInfo(_lhs, _rhs, _output))
- {
- throw std::runtime_error("Max NYI for quantized");
- }
- _kernel = maximumGeneric<uint8_t>;
- }
- else if (_lhs->data_type() == OperandType::FLOAT32)
- {
- _kernel = maximumGeneric<float>;
- }
- else
- {
- throw std::runtime_error{"Max: unsupported data type"};
- }
- break;
- case ElementwiseBinaryType::kMin:
- if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- if (!haveSameQauntInfo(_lhs, _rhs, _output))
- {
- throw std::runtime_error("Min NYI for quantized");
- }
- _kernel = minimumGeneric<uint8_t>;
- }
- else if (_lhs->data_type() == OperandType::INT32)
- {
- _kernel = minimumGeneric<int32_t>;
- }
- else if (_lhs->data_type() == OperandType::FLOAT32)
- {
- _kernel = minimumGeneric<float>;
- }
- else
- {
- throw std::runtime_error{"Min: unsupported data type"};
- }
- break;
- default:
- throw std::runtime_error{"ElementwiseBinary: Unsupported ElementwiseBinary type"};
- }
-}
-
-void ElementwiseBinaryLayer::run() { _kernel(_lhs, _rhs, _output); }
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h
deleted file mode 100644
index 052747a4c..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class ElementwiseBinaryType
-{
- kLogicalAnd,
- kLogicalOr,
- kMax,
- kMin,
-};
-
-class ElementwiseBinaryLayer : public ::onert::exec::IFunction
-{
-public:
- ElementwiseBinaryLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr)
- {
- // DO NOTHING
- }
-
-public:
- void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output,
- const ElementwiseBinaryType op_type);
-
- void run() override;
-
-private:
- const IPortableTensor *_lhs;
- const IPortableTensor *_rhs;
- IPortableTensor *_output;
- std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc
deleted file mode 100644
index f8f89ab15..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ElementwiseUnaryLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Elementwise.h>
-#include <cker/operation/Erf.h>
-#include <cker/operation/Exp.h>
-#include <cker/operation/LogicalNot.h>
-#include <cker/operation/Quantize.h>
-#include <cker/operation/Round.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-void absFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Abs(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-template <typename FromT>
-void castPtr(const FromT *in, DataPtr out, int num_elements, ir::DataType data_type_out)
-{
- switch (data_type_out)
- {
- case ir::DataType::FLOAT32:
- std::transform(in, in + num_elements, out.f, [](FromT a) { return static_cast<float>(a); });
- return;
- case ir::DataType::INT32:
- std::transform(in, in + num_elements, out.i32,
- [](FromT a) { return static_cast<int32_t>(a); });
- return;
- case ir::DataType::UINT32:
- std::transform(in, in + num_elements, out.u32,
- [](FromT a) { return static_cast<uint32_t>(a); });
- return;
- case ir::DataType::UINT8:
- std::transform(in, in + num_elements, out.u8,
- [](FromT a) { return static_cast<uint8_t>(a); });
- return;
- case ir::DataType::BOOL8:
- std::transform(in, in + num_elements, out.b, [](FromT a) { return static_cast<bool>(a); });
- return;
- case ir::DataType::INT64:
- std::transform(in, in + num_elements, out.i64,
- [](FromT a) { return static_cast<int64_t>(a); });
- return;
- default:
- throw std::runtime_error("Cast: Not supported output type" +
- std::to_string((int)data_type_out));
- }
-}
-
-void cast(const IPortableTensor *input, IPortableTensor *output)
-{
- auto input_buf = input->buffer();
- auto output_buf = output->buffer();
- const auto in = *reinterpret_cast<const DataPtr *>(&input_buf);
- auto out = *reinterpret_cast<DataPtr *>(&output_buf);
-
- auto input_shape = getTensorShape(input);
- auto output_shape = getTensorShape(output);
- const auto num_elements = MatchingFlatSize(input_shape, output_shape);
-
- switch (input->data_type())
- {
- case ir::DataType::FLOAT32:
- castPtr(in.f, out, num_elements, output->data_type());
- return;
- case ir::DataType::INT32:
- castPtr(in.i32, out, num_elements, output->data_type());
- return;
- case ir::DataType::UINT32:
- castPtr(in.u32, out, num_elements, output->data_type());
- return;
- case ir::DataType::UINT8:
- castPtr(in.u8, out, num_elements, output->data_type());
- return;
- case ir::DataType::BOOL8:
- castPtr(in.b, out, num_elements, output->data_type());
- return;
- case ir::DataType::INT64:
- castPtr(in.i64, out, num_elements, output->data_type());
- return;
- default:
- throw std::runtime_error("Cast: unsupported data type" +
- std::to_string((int)input->data_type()));
- }
-}
-
-void cosFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Cos(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void expFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Exp(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void erfFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Erf(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void logFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Log(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void logicalNot(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::LogicalNot(getTensorShape(input), reinterpret_cast<const bool *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<bool *>(output->buffer()));
-}
-
-void negFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Neg(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-template <typename InputT, typename OutputT>
-void affineQuantize(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Quantize(getTensorShape(input), reinterpret_cast<const InputT *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<OutputT *>(output->buffer()),
- output->data_scale(), output->data_offset());
-}
-
-void roundFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Round(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void rsqrtFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Rsqrt(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-void sinFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- nnfw::cker::Sin(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
-}
-
-template <typename T> void zerosLikeFloat32(const IPortableTensor *input, IPortableTensor *output)
-{
- if (!HaveSameShapes(input, output))
- throw std::runtime_error{"ZerosLike: input and output shape don't match."};
-
- auto element_size = getTensorShape(input).FlatSize();
-
- memset(reinterpret_cast<T *>(output->buffer()), 0, element_size * sizeof(T));
-}
-} // namespace
-
-void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- const ElementwiseUnaryType op_type)
-{
- assert(input != nullptr);
- assert(output != nullptr);
-
- _input = input;
- _output = output;
-
- switch (op_type)
- {
- case ElementwiseUnaryType::kAbs:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = absFloat32;
- }
- else
- {
- throw std::runtime_error{"Abs: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kCast:
- _kernel = cast;
- break;
- case ElementwiseUnaryType::kCos:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = cosFloat32;
- }
- else
- {
- throw std::runtime_error{"Cos: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kExp:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = expFloat32;
- }
- else
- {
- throw std::runtime_error{"Exp: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kErf:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = erfFloat32;
- }
- else
- {
- throw std::runtime_error{"Exp: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kLog:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = logFloat32;
- }
- else
- {
- throw std::runtime_error{"Log: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kLogicalNot:
- if ((input->data_type() == OperandType::BOOL8))
- {
- _kernel = logicalNot;
- }
- else
- {
- throw std::runtime_error{"LogicalNot: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kNeg:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = negFloat32;
- }
- else
- {
- throw std::runtime_error{"Neg: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kQuantize:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = affineQuantize<float, uint8_t>;
- }
- else
- {
- throw std::runtime_error{"Quantize: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kRound:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = roundFloat32;
- }
- else
- {
- throw std::runtime_error{"Round: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kRSqrt:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = rsqrtFloat32;
- }
- else
- {
- throw std::runtime_error{"RSqrt: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kSin:
- if ((input->data_type() == OperandType::FLOAT32))
- {
- _kernel = sinFloat32;
- }
- else
- {
- throw std::runtime_error{"Sin: Unsupported data type"};
- }
- break;
- case ElementwiseUnaryType::kZerosLike:
- if (input->data_type() == OperandType::FLOAT32)
- {
- _kernel = zerosLikeFloat32<float>;
- }
- else if (input->data_type() == OperandType::INT32)
- {
- _kernel = zerosLikeFloat32<int32_t>;
- }
- else
- {
- throw std::runtime_error{"ZerosLike: Unsupported data type"};
- }
- break;
- default:
- throw std::runtime_error{"ElementwiseBinary: Unsupported ElementwiseBinary type"};
- }
-}
-
-void ElementwiseUnaryLayer::run() { _kernel(_input, _output); }
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h
deleted file mode 100644
index 74968386d..000000000
--- a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class ElementwiseUnaryType
-{
- kAbs,
- kCast,
- kCos,
- kErf,
- kExp,
- kLog,
- kLogicalNot,
- kNeg,
- kQuantize,
- kRound,
- kRSqrt,
- kSin,
- kZerosLike
-};
-
-class ElementwiseUnaryLayer : public ::onert::exec::IFunction
-{
-public:
- ElementwiseUnaryLayer() : _input(nullptr), _output(nullptr), _kernel()
- {
- // DO NOTHING
- }
-
-public:
- void configure(const IPortableTensor *input, IPortableTensor *output,
- const ElementwiseUnaryType op_type);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
- std::function<void(const IPortableTensor *, IPortableTensor *)> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc b/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc
deleted file mode 100644
index b545e6743..000000000
--- a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ExpandDimsLayer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ExpandDimsLayer::ExpandDimsLayer() : _input(nullptr), _axis(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void ExpandDimsLayer::configure(const IPortableTensor *input, const IPortableTensor *axis,
- IPortableTensor *output)
-{
- _input = input;
- _axis = axis;
- _output = output;
-}
-
-void ExpandDimsLayer::run()
-{
- // TODO use _axis to calculate shape of output when _axis is not constant
- size_t count = _input->total_size();
- memcpy(_output->buffer(), _input->buffer(), count);
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h b/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h
deleted file mode 100644
index b5d4938b5..000000000
--- a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms
-#define ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ExpandDimsLayer : public ::onert::exec::IFunction
-{
-public:
- ExpandDimsLayer();
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *axis,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_axis;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms
diff --git a/runtime/onert/backend/cpu/ops/FillLayer.cc b/runtime/onert/backend/cpu/ops/FillLayer.cc
deleted file mode 100644
index 0a95ab005..000000000
--- a/runtime/onert/backend/cpu/ops/FillLayer.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "FillLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Fill.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-FillLayer::FillLayer() : _input(nullptr), _value(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void FillLayer::configure(const IPortableTensor *input, const IPortableTensor *value,
- IPortableTensor *output)
-{
- _input = input;
- _value = value;
- _output = output;
-}
-
-void FillLayer::run()
-{
- switch (_output->data_type())
- {
- case OperandType::FLOAT32:
- nnfw::cker::Fill<float *>(getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()),
- reinterpret_cast<float *>(_value->buffer()),
- getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
- break;
- case OperandType::INT32:
- nnfw::cker::Fill<int32_t *>(getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()),
- reinterpret_cast<int32_t *>(_value->buffer()),
- getTensorShape(_output),
- reinterpret_cast<int32_t *>(_output->buffer()));
- break;
- case OperandType::UINT32:
- nnfw::cker::Fill<uint32_t *>(
- getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()),
- reinterpret_cast<uint32_t *>(_value->buffer()), getTensorShape(_output),
- reinterpret_cast<uint32_t *>(_output->buffer()));
- break;
- default:
- throw std::runtime_error{"Fill: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/FillLayer.h b/runtime/onert/backend/cpu/ops/FillLayer.h
deleted file mode 100644
index 1f17d6b68..000000000
--- a/runtime/onert/backend/cpu/ops/FillLayer.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class FillLayer : public ::onert::exec::IFunction
-{
-public:
- FillLayer();
-
- void configure(const IPortableTensor *input, const IPortableTensor *value,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_value;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc
deleted file mode 100644
index f873a3430..000000000
--- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "FullyConnectedLayer.h"
-
-#include "../Tensor.h"
-#include <cker/operation/FullyConnected.h>
-#include <cker/TensorUtils.h>
-#include <misc/polymorphic_downcast.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-FullyConnectedLayer::FullyConnectedLayer()
- : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr),
- _activation(ir::Activation::NONE), _temp_arena(new nnfw::cker::FCTempArena()),
- _external_context(nullptr), _is_hybrid(false)
-{
- // DO NOTHING
-}
-
-FullyConnectedLayer::~FullyConnectedLayer() = default;
-
-void FullyConnectedLayer::fullyConnectedFloat32()
-{
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
-
- nnfw::cker::FullyConnectedParams op_params;
- op_params.float_activation_min = output_activation_min;
- op_params.float_activation_max = output_activation_max;
- op_params.activation = convertActivationType(_activation);
-
- nnfw::cker::FullyConnected(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-// executionMutex is used to protect concurrent access of non-threadsafe resources
-// like gemmlowp::GemmContext.
-void FullyConnectedLayer::fullyConnectedQuant8()
-{
- double real_multiplier = 0.0;
- int32_t output_multiplier = 0;
- int32_t output_shift = 0;
- int32_t output_activation_min = 0;
- int32_t output_activation_max = 0;
- GetQuantizedConvolutionMultiplier(_input, _weights, _bias, _output, &real_multiplier);
- QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
- CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
- &output_activation_max);
-
- nnfw::cker::FullyConnectedParams op_params;
- op_params.input_offset = -_input->data_offset();
- op_params.weights_offset = -_weights->data_offset();
- op_params.output_offset = _output->data_offset();
- op_params.output_multiplier = output_multiplier;
- op_params.output_shift = output_shift;
- op_params.quantized_activation_min = output_activation_min;
- op_params.quantized_activation_max = output_activation_max;
-
- nnfw::cker::FullyConnected(
- op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_weights), reinterpret_cast<const uint8_t *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void FullyConnectedLayer::fullyConnectedHybrid()
-{
- nnfw::cker::FCTempArena &temp_arena = *_temp_arena;
- if (!temp_arena.prepared)
- {
- temp_arena.prepare(getTensorShape(_input), getTensorShape(_weights));
- }
-
- nnfw::cker::FullyConnectedParams op_params;
- op_params.activation = convertActivationType(_activation);
- op_params.weights_scale = _weights->data_scale();
-
-#ifndef USE_RUY_GEMV
- nnfw::cker::FullyConnectedHybrid(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_weights), reinterpret_cast<const int8_t *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), temp_arena,
- _external_context->ruy_context());
-#else
- nnfw::cker::FullyConnectedHybrid(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_weights),
- (_cached_weights) ? reinterpret_cast<const int8_t *>(_cached_weights)
- : reinterpret_cast<const int8_t *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), temp_arena,
- _external_context->ruy_context());
-
- if (_cached_weights == nullptr || _is_weights_freed)
- return;
-
- // '_cached_weights is not nullptr and _is_weights_freed is false' means
- // this weight shape is satisfied with the ruy kernel's prepack cache's condition.
- // After entering here, it will not enter again except below the case - input is zero-vector
-
- // if input's elements are filled with zero, it by-passes(does not enter ruy-kernel path)
- // so that handle this case
- const int input_size = getTensorShape(_input).FlatSize();
- if (nnfw::cker::IsZeroVector(reinterpret_cast<float *>(_input->buffer()), input_size))
- return;
-
- auto weight_tensor = nnfw::misc::polymorphic_downcast<const Tensor *>(_weights);
-
- // This weight tensor could be other ops' const tensor.
- // Therefore, below reference should be checked like following
- auto tensor = const_cast<Tensor *>(weight_tensor);
- if (tensor->buffer() == nullptr) // ref is already 0?
- {
- _is_weights_freed = true;
- return;
- }
-
- tensor->decrease_ref();
- if (tensor->buffer() == nullptr) // ref == 0?
- {
- _is_weights_freed = true;
- }
-#endif
-}
-
-void FullyConnectedLayer::fullyConnectedSparseWeight()
-{
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
-
- nnfw::cker::FullyConnectedParams op_params;
- op_params.float_activation_min = output_activation_min;
- op_params.float_activation_max = output_activation_max;
- op_params.activation = convertActivationType(_activation);
-
- const uint16_t *w1_segments = _weights->sparsity()->w1_segments();
- const uint16_t *w1_indices = _weights->sparsity()->w1_indices();
-
- auto block_size = _weights->sparsity()->block_size();
- if (block_size.size() == 0)
- {
- nnfw::cker::FullyConnectedSparseWeightRandom(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), w1_segments,
- w1_indices);
- }
- else if (block_size.size() == 2 && block_size[0] == 16 && block_size[1] == 1)
- {
- nnfw::cker::FullyConnectedSparseWeight16x1(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
- getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), w1_segments,
- w1_indices);
- }
- else
- throw std::runtime_error{"FullyConnected: unsupported sparsity"};
-}
-
-void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortableTensor *weights,
- const IPortableTensor *bias, ir::Activation activation,
- IPortableTensor *output,
- const std::shared_ptr<ExternalContext> &external_context)
-{
- _input = input;
- _weights = weights;
- _bias = bias;
- _activation = activation;
- _output = output;
- _is_hybrid = input->data_type() == OperandType::FLOAT32 &&
- weights->data_type() == OperandType::QUANT_INT8_SYMM;
- _external_context = external_context;
-}
-
-void FullyConnectedLayer::run()
-{
- if (_is_hybrid)
- {
- fullyConnectedHybrid();
- }
- else if (_weights->sparsity())
- {
- fullyConnectedSparseWeight();
- }
- else if (_input->data_type() == OperandType::FLOAT32)
- {
- fullyConnectedFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- fullyConnectedQuant8();
- }
- else
- {
- throw std::runtime_error{"FullyConnected: unsupported data type"};
- }
-}
-
-void FullyConnectedLayer::prepare()
-{
- if (_bias && _bias->is_constant())
- {
- const int bias_size = getTensorShape(_bias).FlatSize();
- if (nnfw::cker::IsZeroVector(reinterpret_cast<float *>(_bias->buffer()), bias_size))
- {
- _bias = nullptr;
- }
- }
-
-#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && defined(USE_RUY_GEMV)
- // TODO This is workaround
- // The only fc hybrid will use ruy kernel
- if (_input->data_type() != OperandType::FLOAT32 ||
- _weights->data_type() != OperandType::QUANT_INT8_SYMM)
- {
- return;
- }
-
- // NOTE. The condition to enable caching on ruy kernel can be changed according to ruy's version
-
- // If input is dynamic, it changes total size of input
- // If weights is not constant, weights cannot be cached
- if (_input->is_dynamic() || !_weights->is_constant())
- return;
-
- const int rows = getTensorShape(_weights).Dims(0);
- if (rows % 4 == 0)
- {
- // TODO If it's possible to extract precaching from ruy kernel,
- // place this instead of below code
-
- // buffer will be used by ruy kernel as a cache key
- _cached_weights = _weights->buffer();
- }
-#endif
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h
deleted file mode 100644
index f1242677c..000000000
--- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "../ExternalContext.h"
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace nnfw
-{
-namespace cker
-{
-class FCTempArena;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class FullyConnectedLayer : public ::onert::exec::IFunction
-{
-public:
- FullyConnectedLayer();
- ~FullyConnectedLayer();
-
-public:
- void fullyConnectedFloat32();
-
- void fullyConnectedQuant8();
-
- void fullyConnectedHybrid();
-
- void fullyConnectedSparseWeight();
-
- void configure(const IPortableTensor *input, const IPortableTensor *weights,
- const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output,
- const std::shared_ptr<ExternalContext> &external_context);
-
- void run() override;
-
- void prepare() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_weights;
- const IPortableTensor *_bias;
- IPortableTensor *_output;
-
- ir::Activation _activation;
- std::unique_ptr<nnfw::cker::FCTempArena> _temp_arena;
-
- std::shared_ptr<ExternalContext> _external_context;
-
- bool _is_hybrid;
-
-#ifdef USE_RUY_GEMV
- uint8_t *_cached_weights = nullptr; // weights to be cached and a key
- bool _is_weights_freed = false; // is weights freed?
-#endif
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc
deleted file mode 100644
index c2c592db7..000000000
--- a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "FusedBatchNormLayer.h"
-
-#include <cker/operation/FusedBatchNorm.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-FusedBatchNormLayer::FusedBatchNormLayer()
- : _inputs(), _output(nullptr), _epsilon(0), _is_training(true),
- _fusedbatchnorm_kernel(new nnfw::cker::FusedBatchNorm())
-{
- // DO NOTHING
-}
-
-FusedBatchNormLayer::~FusedBatchNormLayer() = default;
-
-void FusedBatchNormLayer::fusedbatchnormFloat32()
-{
- uint32_t num_inputs = _inputs.size();
- nnfw::cker::FusedBatchNorm &kernel = *_fusedbatchnorm_kernel;
-
- kernel.prepare();
-
- std::vector<nnfw::cker::Shape> inputShapes;
- std::vector<const float *> inputFloatPtrs;
-
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputShapes.emplace_back(getTensorShape(_inputs[i]));
- inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(_inputs[i]->buffer()));
- }
-
- nnfw::cker::FusedBatchNormParams param;
-
- param.epsilon = _epsilon;
- param.is_training = _is_training;
- param.data_format = _data_format;
-
- kernel(inputShapes, inputFloatPtrs, getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()), param);
-}
-
-void FusedBatchNormLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- fusedbatchnormFloat32();
- }
- else
- {
- throw std::runtime_error{"FusedBatchNorm: unsupported data type"};
- }
-}
-
-void FusedBatchNormLayer::configure(const std::vector<const IPortableTensor *> &inputs,
- float epsilon, bool is_training, std::string data_format,
- IPortableTensor *output)
-{
- assert(inputs.size() > 0);
- assert(output != nullptr);
-
- _inputs = inputs;
- _output = output;
- _epsilon = epsilon;
- _is_training = is_training;
- _data_format = data_format;
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h
deleted file mode 100644
index d42b0c900..000000000
--- a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-#include <functional>
-#include <memory>
-
-namespace nnfw
-{
-namespace cker
-{
-class FusedBatchNorm;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class FusedBatchNormLayer : public ::onert::exec::IFunction
-{
-public:
- FusedBatchNormLayer();
- ~FusedBatchNormLayer();
-
-public:
- void fusedbatchnormFloat32();
-
- void configure(const std::vector<const IPortableTensor *> &inputs, float epsilon,
- bool is_training, std::string data_format, IPortableTensor *output);
-
- void run() override;
-
-private:
- std::vector<const IPortableTensor *> _inputs;
- IPortableTensor *_output;
- float _epsilon;
- bool _is_training;
- std::string _data_format;
-
- std::unique_ptr<nnfw::cker::FusedBatchNorm> _fusedbatchnorm_kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/GatherLayer.cc b/runtime/onert/backend/cpu/ops/GatherLayer.cc
deleted file mode 100644
index 641daa972..000000000
--- a/runtime/onert/backend/cpu/ops/GatherLayer.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "GatherLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Gather.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-void GatherLayer::configure(const IPortableTensor *input, const IPortableTensor *indices,
- IPortableTensor *output, int32_t axis)
-{
- _input = input;
- _indices = indices;
- _axis = axis;
- _output = output;
-}
-
-template <typename InputType> void GatherLayer::runByInputType()
-{
- using OutputType = InputType;
- nnfw::cker::GatherParams op_params;
- op_params.axis = _axis;
-
- switch (_indices->data_type())
- {
- case OperandType::INT32:
- {
- using IndicesType = int32_t;
-
- nnfw::cker::Gather<InputType, IndicesType>(
- op_params, getTensorShape(_input), reinterpret_cast<const InputType *>(_input->buffer()),
- getTensorShape(_indices), reinterpret_cast<const IndicesType *>(_indices->buffer()),
- getTensorShape(_output), reinterpret_cast<OutputType *>(_output->buffer()));
- break;
- }
- case OperandType::INT64:
- {
- using IndicesType = int64_t;
-
- nnfw::cker::Gather<InputType, IndicesType>(
- op_params, getTensorShape(_input), reinterpret_cast<const InputType *>(_input->buffer()),
- getTensorShape(_indices), reinterpret_cast<const IndicesType *>(_indices->buffer()),
- getTensorShape(_output), reinterpret_cast<OutputType *>(_output->buffer()));
- break;
- }
- default:
- throw std::runtime_error("Gather: unsupported indices data type");
- }
-}
-
-void GatherLayer::run()
-{
- switch (_input->data_type())
- {
- case OperandType::FLOAT32:
- runByInputType<float>();
- break;
- case OperandType::QUANT_UINT8_ASYMM:
- runByInputType<uint8_t>();
- break;
- case OperandType::INT32:
- runByInputType<int32_t>();
- break;
- default:
- throw std::runtime_error("Gather: unsupported input data type");
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/GatherLayer.h b/runtime/onert/backend/cpu/ops/GatherLayer.h
deleted file mode 100644
index 8fe80cc2b..000000000
--- a/runtime/onert/backend/cpu/ops/GatherLayer.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class GatherLayer : public ::onert::exec::IFunction
-{
-public:
- GatherLayer() : _input{nullptr}, _indices{nullptr}, _output{nullptr}, _axis{-1}
- {
- // DO NOTHING
- }
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *indices,
- IPortableTensor *output, int32_t axis);
-
- void run() override;
-
-private:
- template <typename OpType> void runByInputType();
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_indices;
- IPortableTensor *_output;
-
- int32_t _axis;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.cc b/runtime/onert/backend/cpu/ops/L2NormLayer.cc
deleted file mode 100644
index 0d99b0586..000000000
--- a/runtime/onert/backend/cpu/ops/L2NormLayer.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "L2NormLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/L2Normalize.h>
-#include <cker/Types.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-void L2NormLayer::configure(const IPortableTensor *input, IPortableTensor *output)
-{
- assert(input != nullptr);
- assert(output != nullptr);
-
- _input = input;
- _output = output;
-}
-
-void L2NormLayer::run()
-{
- switch (_input->data_type())
- {
- case OperandType::FLOAT32:
- nnfw::cker::L2NormalizeFloat32(
- getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- break;
-
- case OperandType::QUANT_UINT8_ASYMM:
- {
- nnfw::cker::L2NormParams params;
- assert(_input->data_offset() == 128);
- params.input_zero_point = _input->data_offset();
- nnfw::cker::L2NormalizeQuant8(
- params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
- }
- break;
-
- default:
- throw std::runtime_error{"L2Norm: Unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.h b/runtime/onert/backend/cpu/ops/L2NormLayer.h
deleted file mode 100644
index 63f2d1133..000000000
--- a/runtime/onert/backend/cpu/ops/L2NormLayer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-class L2NormLayer : public ::onert::exec::IFunction
-{
-public:
- L2NormLayer() : _input(nullptr), _output(nullptr)
- {
- // Nothing
- }
-
-public:
- void configure(const IPortableTensor *_input, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc
deleted file mode 100644
index 1d7ee6caa..000000000
--- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LogSoftMaxLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/LogSoftMax.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-LogSoftMaxLayer::LogSoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0), _axis(0)
-{
- // DO NOTHING
-}
-
-void LogSoftMaxLayer::PopulateLookupTable(const float kBeta)
-{
- const float scale = -_input->data_scale() * kBeta;
- const int32_t max_uint8 = std::numeric_limits<uint8_t>::max();
- for (int32_t val = 0; val <= max_uint8; ++val)
- {
- _table[max_uint8 - val] = expf(scale * val);
- }
-}
-
-void LogSoftMaxLayer::logsoftmaxFloat32()
-{
- nnfw::cker::SoftmaxParams op_params;
- op_params.beta = _beta;
- op_params.axis = _axis;
- nnfw::cker::LogSoftmax(op_params, getTensorShape(_input),
- reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
-}
-
-void LogSoftMaxLayer::logsoftmaxQuant8()
-{
- nnfw::cker::SoftmaxParams op_params;
- op_params.beta = _beta;
- op_params.axis = _axis;
- op_params.table = _table;
- op_params.zero_point = _output->data_offset();
- op_params.scale = _output->data_scale();
- nnfw::cker::LogSoftmax(op_params, _input->data_scale(), getTensorShape(_input),
- reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void LogSoftMaxLayer::configure(const IPortableTensor *input, const float beta, const int axis,
- IPortableTensor *output)
-{
- _input = input;
- _output = output;
- _beta = beta;
- _axis = axis;
- if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- PopulateLookupTable(_beta);
- }
-}
-
-void LogSoftMaxLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- logsoftmaxFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- logsoftmaxQuant8();
- }
- else
- {
- throw std::runtime_error{"LogSoftmax : unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h
deleted file mode 100644
index 1533f3361..000000000
--- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__
-
-#include "../Tensor.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class LogSoftMaxLayer : public ::onert::exec::IFunction
-{
-public:
- LogSoftMaxLayer();
-
-public:
- void logsoftmaxFloat32();
-
- void logsoftmaxQuant8();
-
- void configure(const IPortableTensor *input, const float beta, const int axis,
- IPortableTensor *output);
-
- void run();
-
- void PopulateLookupTable(const float kBeta);
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-
- float _beta;
- int _axis;
- float _table[256];
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc b/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc
deleted file mode 100644
index b770cce5d..000000000
--- a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MatrixBandPartLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/MatrixBandPart.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-MatrixBandPartLayer::MatrixBandPartLayer()
- : _input(nullptr), _num_lower_diag(nullptr), _num_upper_diag(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void MatrixBandPartLayer::matrixBandPartFloat32()
-{
- if (_num_lower_diag->data_type() == OperandType::INT64)
- {
- nnfw::cker::MatrixBandPart<int64_t>(
- *reinterpret_cast<const int64_t *>(_num_lower_diag->buffer()),
- *reinterpret_cast<const int64_t *>(_num_upper_diag->buffer()), getTensorShape(_input),
- reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
- }
- else
- {
- nnfw::cker::MatrixBandPart<int32_t>(
- *reinterpret_cast<const int32_t *>(_num_lower_diag->buffer()),
- *reinterpret_cast<const int32_t *>(_num_upper_diag->buffer()), getTensorShape(_input),
- reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
- }
-}
-
-void MatrixBandPartLayer::matrixBandPartQuant8() { throw std::runtime_error{"NYI"}; }
-
-void MatrixBandPartLayer::configure(const IPortableTensor *input,
- const IPortableTensor *num_lower_diag,
- const IPortableTensor *num_upper_diag, IPortableTensor *output)
-{
- _input = input;
- _num_lower_diag = num_lower_diag;
- _num_upper_diag = num_upper_diag;
- _output = output;
-}
-
-void MatrixBandPartLayer::run()
-{
- if (_num_lower_diag->data_type() != _num_upper_diag->data_type())
- {
- throw std::runtime_error{"MatrixBandpart: num_lower and num_upper must have the same type"};
- }
-
- if (_input->data_type() == OperandType::FLOAT32)
- {
- matrixBandPartFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- matrixBandPartQuant8();
- }
- else
- {
- throw std::runtime_error{"MatrixBandpart: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h b/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h
deleted file mode 100644
index 9dcc6b277..000000000
--- a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class MatrixBandPartLayer : public ::onert::exec::IFunction
-{
-public:
- MatrixBandPartLayer();
-
-public:
- void matrixBandPartFloat32();
-
- void matrixBandPartQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *num_lower_diag,
- const IPortableTensor *num_upper_diag, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_num_lower_diag;
- const IPortableTensor *_num_upper_diag;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/MeanLayer.cc b/runtime/onert/backend/cpu/ops/MeanLayer.cc
deleted file mode 100644
index 4921ac748..000000000
--- a/runtime/onert/backend/cpu/ops/MeanLayer.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MeanLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/ReduceMean.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-MeanLayer::MeanLayer() : _input(nullptr), _axes(nullptr), _output(nullptr), _keep_dims(false)
-{
- // DO NOTHING
-}
-
-void MeanLayer::MeanFloat32()
-{
- nnfw::cker::Mean(getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
- getReducerAxes(_axes));
-}
-
-void MeanLayer::MeanQuant8()
-{
- nnfw::cker::MeanQ8Asymm(getTensorShape(_input),
- reinterpret_cast<const uint8_t *>(_input->buffer()), _input->data_scale(),
- _input->data_offset(), getTensorShape(_output),
- reinterpret_cast<uint8_t *>(_output->buffer()), _output->data_scale(),
- _output->data_offset(), getReducerAxes(_axes));
-}
-
-void MeanLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
- IPortableTensor *output, bool keep_dims)
-{
- _input = input;
- _axes = axes;
- _output = output;
- _keep_dims = keep_dims;
-}
-
-void MeanLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- MeanFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- MeanQuant8();
- }
- else
- {
- throw std::runtime_error{"Mean: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/MeanLayer.h b/runtime/onert/backend/cpu/ops/MeanLayer.h
deleted file mode 100644
index 3e95c1203..000000000
--- a/runtime/onert/backend/cpu/ops/MeanLayer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class MeanLayer : public ::onert::exec::IFunction
-{
-public:
- MeanLayer();
-
-public:
- void MeanFloat32();
-
- void MeanQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output,
- bool keep_dims);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_axes;
- IPortableTensor *_output;
- bool _keep_dims;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/OneHotLayer.cc b/runtime/onert/backend/cpu/ops/OneHotLayer.cc
deleted file mode 100644
index 2a82b00ee..000000000
--- a/runtime/onert/backend/cpu/ops/OneHotLayer.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OneHotLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/OneHot.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-template <typename T> void OneHotLayer::oneHotImpl()
-{
- // It assumes index is int32_t type.
- nnfw::cker::OneHot<T, int32_t>(
- *reinterpret_cast<const int32_t *>(_depth->buffer()),
- *reinterpret_cast<T *>(_on_value->buffer()), *reinterpret_cast<T *>(_off_value->buffer()),
- _axis, getTensorShape(_indices), reinterpret_cast<const int32_t *>(_indices->buffer()),
- getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
-}
-
-void OneHotLayer::configure(const IPortableTensor *indices, const IPortableTensor *depth,
- const IPortableTensor *on_value, const IPortableTensor *off_value,
- IPortableTensor *output, const int32_t axis)
-{
- _indices = indices;
- _output = output;
- _depth = depth;
- _on_value = on_value;
- _off_value = off_value;
- _axis = axis;
-}
-
-void OneHotLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- oneHotImpl<float>();
- }
- else
- {
- throw std::runtime_error{"OneHot: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/OneHotLayer.h b/runtime/onert/backend/cpu/ops/OneHotLayer.h
deleted file mode 100644
index c05498440..000000000
--- a/runtime/onert/backend/cpu/ops/OneHotLayer.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class OneHotLayer : public ::onert::exec::IFunction
-{
-public:
- OneHotLayer()
- : _indices(nullptr), _depth(nullptr), _on_value(nullptr), _off_value(nullptr),
- _output(nullptr), _axis(-1)
- {
- // DO NOTHING
- }
-
-public:
- template <typename T> void oneHotImpl();
-
- void configure(const IPortableTensor *indices, const IPortableTensor *depth,
- const IPortableTensor *on_value, const IPortableTensor *off_value,
- IPortableTensor *output, int32_t axis);
-
- void run() override;
-
-private:
- const IPortableTensor *_indices;
- const IPortableTensor *_depth;
- const IPortableTensor *_on_value;
- const IPortableTensor *_off_value;
- IPortableTensor *_output;
-
- int32_t _axis;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.cc b/runtime/onert/backend/cpu/ops/OperationUtils.cc
deleted file mode 100644
index 2eee6dc85..000000000
--- a/runtime/onert/backend/cpu/ops/OperationUtils.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationUtils.h"
-
-#include <algorithm>
-#include <cassert>
-#include <cmath>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-uint32_t getNumberOfDimensions(const IPortableTensor *tensor)
-{
- assert(tensor);
- return tensor->num_dimensions();
-}
-
-uint32_t getNumberOfElements(const IPortableTensor *tensor)
-{
- assert(tensor);
- uint32_t count = 1;
- for (size_t i = 0; i < tensor->num_dimensions(); i++)
- {
- count *= tensor->dimension(i);
- }
- return count;
-}
-
-uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx)
-{
- assert(tensor);
- if (dimensionIdx >= tensor->num_dimensions())
- {
- // TODO, log the error
- return 0;
- }
- return tensor->dimension(dimensionIdx);
-}
-
-void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
-{
- if (double_multiplier == 0.)
- {
- *quantized_multiplier = 0;
- *shift = 0;
- return;
- }
- const double q = std::frexp(double_multiplier, shift);
- auto q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
-
- assert(q_fixed <= (1ll << 31));
- if (q_fixed == (1ll << 31))
- {
- q_fixed /= 2;
- ++*shift;
- }
- assert(q_fixed <= std::numeric_limits<int32_t>::max());
- *quantized_multiplier = static_cast<int32_t>(q_fixed);
-}
-
-void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter,
- const IPortableTensor *bias, const IPortableTensor *output,
- double *multiplier)
-{
- const double input_product_scale = input->data_scale() * filter->data_scale();
- const double bias_scale = (bias != nullptr) ? bias->data_scale() : input_product_scale;
- const double output_scale = output->data_scale();
- // The following conditions must be guaranteed by the training pipeline.
- UNUSED_RELEASE(bias_scale);
- assert(std::abs(input_product_scale - bias_scale) <=
- 1e-6 * std::min(input_product_scale, bias_scale));
- assert(input_product_scale >= 0);
- assert(input_product_scale < output_scale);
- *multiplier = input_product_scale / output_scale;
-}
-
-void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
- int *left_shift)
-{
- assert(double_multiplier > 1.);
- const double q = std::frexp(double_multiplier, left_shift);
- int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
- assert(q_fixed <= (1ll << 31));
- if (q_fixed == (1ll << 31))
- {
- q_fixed /= 2;
- ++*left_shift;
- }
- assert(*left_shift >= 0);
- assert(q_fixed <= std::numeric_limits<int32_t>::max());
- *quantized_multiplier = static_cast<int32_t>(q_fixed);
-}
-
-void CalculateActivationRangeUint8(ir::Activation activation, const IPortableTensor *output,
- int32_t *act_min, int32_t *act_max)
-{
- const int32_t qmin = std::numeric_limits<uint8_t>::min();
- const int32_t qmax = std::numeric_limits<uint8_t>::max();
- const auto scale = output->data_scale();
- const auto zero_point = output->data_offset();
- auto quantize = [scale, zero_point](float f) {
- return zero_point + static_cast<int32_t>(std::round(f / scale));
- };
- if (activation == ir::Activation::RELU)
- {
- *act_min = std::max(qmin, quantize(0.0));
- *act_max = qmax;
- }
- else if (activation == ir::Activation::RELU6)
- {
- *act_min = std::max(qmin, quantize(0.0));
- *act_max = std::min(qmax, quantize(6.0));
- }
- else if (activation == ir::Activation::RELU1)
- {
- *act_min = std::max(qmin, quantize(-1.0));
- *act_max = std::min(qmax, quantize(1.0));
- }
- else if (activation == ir::Activation::SIGMOID)
- {
- *act_min = std::max(qmin, quantize(0.0));
- *act_max = std::min(qmax, quantize(1.0));
- }
- else if (activation == ir::Activation::NONE)
- {
- *act_min = qmin;
- *act_max = qmax;
- }
- else
- {
- std::cout << "Unsupported fused activation function." << std::endl;
- }
-}
-
-bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2)
-{
- if (input1 == input2)
- return true;
- if (input2 == NULL || input2 == NULL)
- return false;
-
- if (input1 == NULL)
- {
- return (getNumberOfDimensions(input2) == 0);
- }
-
- if (getNumberOfDimensions(input1) != getNumberOfDimensions(input2))
- return false;
-
- for (uint32_t i = 0; i < getNumberOfDimensions(input1); i++)
- if (input1->dimension(i) != input2->dimension(i))
- return false;
-
- return true;
-}
-
-int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
-{
- const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) *
- (1ll << (31 - input_integer_bits)) / (1ll << input_left_shift);
- // Tighten bound using floor. Suppose that we could use the exact value.
- // After scaling the difference, the result would be at the maximum. Thus we
- // must ensure that our value has lower magnitude.
- return static_cast<int32_t>(std::floor(max_input_rescaled));
-}
-
-uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions)
-{
- uint32_t size = 4;
-
- switch (type)
- {
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- size = 4;
- break;
- case OperandType::BOOL8:
- case OperandType::QUANT_UINT8_ASYMM:
- case OperandType::QUANT_INT8_SYMM:
- size = 1;
- break;
- case OperandType::INT64:
- size = 8;
- break;
- default:
- throw std::runtime_error("Not supported operand type.");
- break;
- }
-
- for (auto d : dimensions)
- {
- assert(d >= 0);
- size *= static_cast<uint32_t>(d);
- }
-
- return size;
-}
-
-nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type)
-{
- switch (ir_padding_type)
- {
- case ir::PaddingType::EXPLICIT:
- return nnfw::cker::PaddingType::kNone;
- case ir::PaddingType::SAME:
- return nnfw::cker::PaddingType::kSame;
- case ir::PaddingType::VALID:
- return nnfw::cker::PaddingType::kValid;
- default:
- throw std::runtime_error("Wrong padding type.");
- break;
- }
-}
-
-std::vector<int32_t> getReducerAxes(const IPortableTensor *axes)
-{
- std::vector<int32_t> ret;
-
- assert(axes->layout() == ir::Layout::NHWC);
- assert(axes->dimension(0) == axes->getShape().num_elements());
- switch (axes->data_type())
- {
- case ir::DataType::INT32:
- {
- for (size_t i = 0; i < axes->dimension(0); ++i)
- ret.emplace_back(*(reinterpret_cast<const int32_t *>(axes->buffer()) + i));
- break;
- }
- case ir::DataType::INT64:
- {
- for (size_t i = 0; i < axes->dimension(0); ++i)
- ret.emplace_back(*(reinterpret_cast<const int64_t *>(axes->buffer()) + i));
- break;
- }
- default:
- throw std::runtime_error("getReducerAxes: Not supported data type");
- break;
- }
- return ret;
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.h b/runtime/onert/backend/cpu/ops/OperationUtils.h
deleted file mode 100644
index eb24dd43c..000000000
--- a/runtime/onert/backend/cpu/ops/OperationUtils.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
-#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <cker/Shape.h>
-#include <cker/Types.h>
-#include <iostream>
-#include <ir/DataType.h>
-#include <ir/InternalType.h>
-#include <ir/Operand.h>
-#include <ir/Padding.h>
-
-#include <limits>
-#include <vector>
-
-using OperandType = onert::ir::DataType;
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-union DataPtr {
- uint8_t *u8;
- int8_t *i8;
- uint32_t *u32;
- int32_t *i32;
- bool *b;
- float *f;
- int64_t *i64;
- void *v;
-};
-
-union ConstDataPtr {
- const uint8_t *u8;
- const int8_t *i8;
- const uint32_t *u32;
- const int32_t *i32;
- const bool *b;
- const float *f;
- const int64_t *i64;
- const void *v;
-};
-
-uint32_t getNumberOfDimensions(const IPortableTensor *tensor);
-
-uint32_t getNumberOfElements(const IPortableTensor *tensor);
-
-uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx);
-
-inline nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor)
-{
- assert(tensor);
- const int32_t extended_rank = 4;
- int32_t raw_shape[extended_rank];
- uint32_t src = extended_rank - tensor->num_dimensions();
- for (uint32_t i = 0; i < extended_rank; ++i)
- {
- if (i < src)
- {
- raw_shape[i] = 1;
- }
- else
- {
- raw_shape[i] = tensor->dimension(i - src);
- }
- }
-
- return nnfw::cker::Shape(extended_rank, raw_shape);
-}
-
-inline nnfw::cker::Shape getTensorShape(const IPortableTensor *tensor)
-{
- if (tensor == nullptr)
- return nnfw::cker::Shape();
-
- const ir::Shape &shape = tensor->get_info().shape();
-
- assert(tensor->layout() == ir::Layout::NHWC);
-
- auto rank = shape.rank();
- nnfw::cker::Shape ret(rank);
- auto data = ret.DimsData();
- for (int i = 0; i < rank; ++i)
- {
- data[i] = shape.dim(i);
- }
- return ret;
-}
-
-inline nnfw::cker::FusedActivationFunctionType
-convertActivationType(const ir::Activation activation)
-{
- switch (activation)
- {
- case ir::Activation::NONE:
- return nnfw::cker::FusedActivationFunctionType::kNone;
- case ir::Activation::RELU:
- return nnfw::cker::FusedActivationFunctionType::kRelu;
- case ir::Activation::RELU1:
- return nnfw::cker::FusedActivationFunctionType::kRelu1;
- case ir::Activation::RELU6:
- return nnfw::cker::FusedActivationFunctionType::kRelu6;
- default:
- throw std::runtime_error{"CPU backend: Cannot convert activation type"};
- }
-}
-
-inline int32_t getAxis(uint32_t rank, int32_t axis, ir::Layout frontend_layout)
-{
- auto ret = axis;
-
- if (axis < 0)
- {
- ret += rank;
- }
-
- // NCHW -> NHWC
- if (frontend_layout == ir::Layout::NCHW)
- {
- int32_t permutation[4] = {0, 3, 1, 2};
- ret = permutation[ret];
- }
-
- return ret;
-}
-
-void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
-
-void GetQuantizedConvolutionMultiplier(const IPortableTensor *inputDescr,
- const IPortableTensor *filterDescr,
- const IPortableTensor *biasDescr,
- const IPortableTensor *outputDescr, double *multiplier);
-
-void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
- int *left_shift);
-
-template <typename T>
-void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
-{
- if (activation == ir::Activation::RELU)
- {
- *activation_min = 0;
- *activation_max = std::numeric_limits<T>::max();
- }
- else if (activation == ir::Activation::RELU6)
- {
- *activation_min = 0;
- *activation_max = 6;
- }
- else if (activation == ir::Activation::RELU1)
- {
- *activation_min = -1;
- *activation_max = 1;
- }
- else if (activation == ir::Activation::SIGMOID)
- {
- *activation_min = 0;
- *activation_max = 1;
- }
- else if (activation == ir::Activation::NONE)
- {
- *activation_min = std::numeric_limits<T>::lowest();
- *activation_max = std::numeric_limits<T>::max();
- }
- else
- {
- std::cout << "Unsupported fused activation function." << std::endl;
- }
-}
-
-void CalculateActivationRangeUint8(ir::Activation activation, const IPortableTensor *output,
- int32_t *act_min, int32_t *act_max);
-
-bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2);
-
-int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
-
-uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions);
-
-nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type);
-
-std::vector<int32_t> getReducerAxes(const IPortableTensor *axes);
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
diff --git a/runtime/onert/backend/cpu/ops/PackLayer.cc b/runtime/onert/backend/cpu/ops/PackLayer.cc
deleted file mode 100644
index 314b192a2..000000000
--- a/runtime/onert/backend/cpu/ops/PackLayer.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PackLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Pack.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-PackLayer::PackLayer() : _inputs(), _output(nullptr), _axis(0)
-{
- // DO NOTHING
-}
-
-template <typename T> void PackLayer::packImpl()
-{
- uint32_t num_inputs = _inputs.size();
- nnfw::cker::PackParams op_params;
- op_params.axis = _axis;
- op_params.inputs_count = num_inputs;
-
- std::vector<nnfw::cker::Shape *> inputDimsPtr;
- std::vector<nnfw::cker::Shape> inputDims;
- inputDimsPtr.reserve(num_inputs);
- inputDims.reserve(num_inputs);
-
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputDims.push_back(getTensorShape(_inputs[i]));
- inputDimsPtr.push_back(&inputDims[i]);
- }
-
- std::vector<const T *> inputPtrs;
-
- for (const auto input : _inputs)
- {
- inputPtrs.emplace_back(reinterpret_cast<const T *>(input->buffer()));
- }
-
- nnfw::cker::Pack<T>(op_params, inputPtrs.data(), getTensorShape(_output),
- reinterpret_cast<T *>(_output->buffer()));
-}
-
-void PackLayer::configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis,
- IPortableTensor *output)
-{
- assert(inputs.size() > 0);
- assert(output != nullptr);
-
- _inputs = inputs;
- _axis = axis;
- _output = output;
-}
-
-void PackLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- {
- packImpl<float>();
- }
- else if (_output->data_type() == OperandType::INT32)
- {
- packImpl<int32_t>();
- }
- else
- {
- throw std::runtime_error{"Pack: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/PackLayer.h b/runtime/onert/backend/cpu/ops/PackLayer.h
deleted file mode 100644
index b92c8d48c..000000000
--- a/runtime/onert/backend/cpu/ops/PackLayer.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class PackLayer : public ::onert::exec::IFunction
-{
-public:
- PackLayer();
-
-public:
- template <typename T> void packImpl();
-
- void configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis,
- IPortableTensor *output);
- void run() override;
-
-private:
- std::vector<const IPortableTensor *> _inputs;
- IPortableTensor *_output;
- int32_t _axis;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/PadLayer.cc b/runtime/onert/backend/cpu/ops/PadLayer.cc
deleted file mode 100644
index 6a2bf9da0..000000000
--- a/runtime/onert/backend/cpu/ops/PadLayer.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PadLayer.h"
-
-#include <cker/operation/Pad.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-PadLayer::PadLayer()
- : _input(nullptr), _output(nullptr), _padData(), _padRank(), _constantValueData()
-{
- // DO NOTHING
-}
-
-template <typename T> void PadLayer::padImpl(const T *constant_value_data)
-{
- nnfw::cker::Pad<T>(_padData, _padRank, getTensorShape(_input),
- reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<T *>(_output->buffer()), constant_value_data);
-}
-
-void PadLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- const int32_t *padData, int32_t padRank, const void *constantValueData)
-{
- _input = input;
- _output = output;
- memcpy(_padData, padData, sizeof(_padData));
- _padRank = padRank;
- _constantValueData.v = constantValueData;
-}
-
-void PadLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- padImpl<float>(_constantValueData.f);
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- if (_constantValueData.u8 == nullptr)
- {
- uint8_t pad_value = static_cast<uint8_t>(_output->data_offset());
- padImpl<uint8_t>(&pad_value);
- }
- else
- {
- padImpl<uint8_t>(_constantValueData.u8);
- }
- }
- else
- {
- throw std::runtime_error{"Pad: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/PadLayer.h b/runtime/onert/backend/cpu/ops/PadLayer.h
deleted file mode 100644
index efd73d5e5..000000000
--- a/runtime/onert/backend/cpu/ops/PadLayer.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_PADLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_PADLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-// Note, this is pad with mode=`CONSTANT`: it doesn't support `REFLECT` and
-// `SYMMETRIC`
-class PadLayer : public ::onert::exec::IFunction
-{
-public:
- PadLayer();
-
-public:
- template <typename T> void padImpl(const T *constant_value_data);
-
- void configure(const IPortableTensor *input, IPortableTensor *output, const int32_t *padData,
- int32_t padRank, const void *constantValueData = nullptr);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-
- int32_t _padData[8];
- int32_t _padRank;
- ConstDataPtr _constantValueData;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_PADLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/PoolLayer.cc b/runtime/onert/backend/cpu/ops/PoolLayer.cc
deleted file mode 100644
index 85d02a751..000000000
--- a/runtime/onert/backend/cpu/ops/PoolLayer.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PoolLayer.h"
-
-#include <cker/operation/AveragePool.h>
-#include <cker/operation/MaxPool.h>
-
-#include <unordered_map>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-template <typename T>
-void avgPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
- IPortableTensor *output)
-{
- nnfw::cker::AveragePool<T>(params, getTensorShape(input),
- reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()));
-}
-
-template <typename T>
-void maxPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
- IPortableTensor *output)
-{
- nnfw::cker::MaxPool<T>(params, getTensorShape(input),
- reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()));
-}
-
-template <typename T>
-std::function<void(const IPortableTensor *, IPortableTensor *)>
-generateKernelGeneric(const nnfw::cker::PoolParams &params, PoolType op_type)
-{
- if (op_type == PoolType::kAvg)
- {
- return std::bind(&avgPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
- }
- else if (op_type == PoolType::kMax)
- {
- return std::bind(&maxPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
- }
- else
- {
- throw std::runtime_error{"Pool: unsupported pool type"};
- }
-}
-} // namespace
-
-PoolLayer::PoolLayer() : _input(nullptr), _output(nullptr), _kernel()
-{
- // DO NOTHING
-}
-
-#define POOLING_PARAMETERS \
- nnfw::cker::PoolParams op_params; \
- op_params.stride_height = strideHeight; \
- op_params.stride_width = strideWidth; \
- op_params.filter_height = kernelHeight; \
- op_params.filter_width = kernelWidth; \
- op_params.padding_values.height = (int8_t)paddingTop; \
- op_params.padding_values.width = (int8_t)paddingLeft;
-
-void PoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t,
- const uint32_t paddingTop, const uint32_t, const uint32_t strideWidth,
- const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const ir::Activation activation,
- IPortableTensor *output, const PoolType op_type)
-{
- assert(input != nullptr);
- assert(output != nullptr);
-
- _input = input;
- _output = output;
-
- POOLING_PARAMETERS
- if (_input->data_type() == OperandType::FLOAT32)
- {
- float output_activation_min = 0;
- float output_activation_max = 0;
- CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max);
- op_params.float_activation_min = output_activation_min;
- op_params.float_activation_max = output_activation_max;
-
- _kernel = generateKernelGeneric<float>(op_params, op_type);
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- int32_t output_activation_min = 0;
- int32_t output_activation_max = 0;
- CalculateActivationRangeUint8(activation, _output, &output_activation_min,
- &output_activation_max);
- op_params.quantized_activation_min = output_activation_min;
- op_params.quantized_activation_max = output_activation_max;
- _kernel = generateKernelGeneric<uint8_t>(op_params, op_type);
- }
- else
- {
- throw std::runtime_error{"Pool: unsupported data type"};
- }
-}
-
-void PoolLayer::run() { _kernel(_input, _output); }
-
-#undef AVGPOOLING_PARAMETERS
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/PoolLayer.h b/runtime/onert/backend/cpu/ops/PoolLayer.h
deleted file mode 100644
index b37835946..000000000
--- a/runtime/onert/backend/cpu/ops/PoolLayer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class PoolType
-{
- kAvg,
- kL2,
- kMax,
-};
-
-class PoolLayer : public ::onert::exec::IFunction
-{
-public:
- PoolLayer();
-
-public:
- void configure(const IPortableTensor *input, const uint32_t paddingLeft,
- const uint32_t paddingRight, const uint32_t paddingTop,
- const uint32_t paddingBottom, const uint32_t strideWidth,
- const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const ir::Activation activation,
- IPortableTensor *output, const PoolType op_type);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-
- std::function<void(const IPortableTensor *, IPortableTensor *)> _kernel;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/PowLayer.cc b/runtime/onert/backend/cpu/ops/PowLayer.cc
deleted file mode 100644
index 04a1af1e1..000000000
--- a/runtime/onert/backend/cpu/ops/PowLayer.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PowLayer.h"
-
-#include <cker/operation/Pow.h>
-#include <cker/operation/BinaryArithmeticOps.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-void PowLayer::powFloat32()
-{
- float output_activation_min = 0, output_activation_max = 0;
- CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
- nnfw::cker::BinaryArithmeticOpParam op_params;
- op_params.float_activation_max = output_activation_max;
- op_params.float_activation_min = output_activation_min;
-
- if (!HaveSameShapes(_lhs, _rhs))
- {
- nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::POW>(
- op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
- getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- return;
- }
-
- nnfw::cker::powImpl(getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
- getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-void PowLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- ir::Activation activation, IPortableTensor *output)
-{
- _lhs = lhs;
- _rhs = rhs;
- _activation = activation;
- _output = output;
-}
-
-void PowLayer::run()
-{
- if (_output->data_type() == OperandType::FLOAT32)
- powFloat32();
- else
- throw std::runtime_error{"Pow: unsupportted data type"};
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/PowLayer.h b/runtime/onert/backend/cpu/ops/PowLayer.h
deleted file mode 100644
index 2689aad17..000000000
--- a/runtime/onert/backend/cpu/ops/PowLayer.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_POWLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_POWLAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class PowLayer : public ::onert::exec::IFunction
-{
-public:
- PowLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr)
- {
- // DO NOTHING
- }
-
-public:
- void powFloat32();
-
- void configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
- const ir::Activation activation, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_lhs;
- const IPortableTensor *_rhs;
- IPortableTensor *_output;
-
- ir::Activation _activation{ir::Activation::NONE};
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_POWLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/RangeLayer.cc b/runtime/onert/backend/cpu/ops/RangeLayer.cc
deleted file mode 100644
index f00101fa8..000000000
--- a/runtime/onert/backend/cpu/ops/RangeLayer.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RangeLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Range.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-RangeLayer::RangeLayer() : _start(nullptr), _limit(nullptr), _delta(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void RangeLayer::configure(const IPortableTensor *start, const IPortableTensor *limit,
- const IPortableTensor *delta, IPortableTensor *output)
-{
- _start = start;
- _limit = limit;
- _delta = delta;
- _output = output;
-}
-
-void RangeLayer::run()
-{
- switch (_output->data_type())
- {
- case OperandType::FLOAT32:
- nnfw::cker::Range<float>(reinterpret_cast<float *>(_start->buffer()),
- reinterpret_cast<float *>(_limit->buffer()),
- reinterpret_cast<float *>(_delta->buffer()),
- reinterpret_cast<float *>(_output->buffer()));
- break;
- case OperandType::INT32:
- nnfw::cker::Range<int32_t>(reinterpret_cast<int32_t *>(_start->buffer()),
- reinterpret_cast<int32_t *>(_limit->buffer()),
- reinterpret_cast<int32_t *>(_delta->buffer()),
- reinterpret_cast<int32_t *>(_output->buffer()));
- break;
- default:
- throw std::runtime_error{"Range: unsupported data type"};
- break;
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/RangeLayer.h b/runtime/onert/backend/cpu/ops/RangeLayer.h
deleted file mode 100644
index 2d83b39b1..000000000
--- a/runtime/onert/backend/cpu/ops/RangeLayer.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-class RangeLayer : public ::onert::exec::IFunction
-{
-public:
- RangeLayer();
-
- void configure(const IPortableTensor *start, const IPortableTensor *limit,
- const IPortableTensor *delta, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_start;
- const IPortableTensor *_limit;
- const IPortableTensor *_delta;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/RankLayer.cc b/runtime/onert/backend/cpu/ops/RankLayer.cc
deleted file mode 100644
index 4690bdf72..000000000
--- a/runtime/onert/backend/cpu/ops/RankLayer.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RankLayer.h"
-
-#include "OperationUtils.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-RankLayer::RankLayer() : _input(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void RankLayer::configure(const IPortableTensor *input, IPortableTensor *output)
-{
- _input = input;
- _output = output;
-}
-
-void RankLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32 || _input->data_type() == OperandType::INT32)
- {
- int32_t *output_data = reinterpret_cast<int32_t *>(_output->buffer());
- output_data[0] = _input->num_dimensions();
- }
- else
- {
- throw std::runtime_error{"Rank : unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/RankLayer.h b/runtime/onert/backend/cpu/ops/RankLayer.h
deleted file mode 100644
index 6282ceb07..000000000
--- a/runtime/onert/backend/cpu/ops/RankLayer.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class RankLayer : public ::onert::exec::IFunction
-{
-public:
- RankLayer();
-
-public:
- void configure(const IPortableTensor *input, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.cc b/runtime/onert/backend/cpu/ops/ReduceLayer.cc
deleted file mode 100644
index 4a55b2a33..000000000
--- a/runtime/onert/backend/cpu/ops/ReduceLayer.cc
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ReduceLayer.h"
-
-#include "OperationUtils.h"
-
-#include "cker/neon/neon_check.h"
-#include <cker/operation/Reduce.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-namespace
-{
-
-template <typename T>
-void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
- bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel,
- T reducer(const T current, const T in))
-{
- reduce_kernel.prepare(input->num_dimensions(), axes.size());
- bool result = reduce_kernel.ReduceGeneric<T>(
- getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
- reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer);
-
- if (!result)
- {
- throw std::runtime_error{"Reduce: Fail to run"};
- }
-}
-
-template <typename T>
-std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
-evalType(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
-{
- switch (reduce_type)
- {
- case ReduceType::kSum:
- return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, keep_dims, static_cast<T>(0), reduce_kernel,
- [](const T current, const T in) -> T { return in + current; });
- break;
- case ReduceType::kProd:
- return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, keep_dims, static_cast<T>(1), reduce_kernel,
- [](const T current, const T in) -> T { return in * current; });
- break;
- case ReduceType::kMax:
- return std::bind(
- &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
- keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel,
- [](const T current, const T in) -> T { return (in > current) ? in : current; });
- break;
- case ReduceType::kMin:
- return std::bind(
- &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
- keep_dims, std::numeric_limits<T>::max(), reduce_kernel,
- [](const T current, const T in) -> T { return (in < current) ? in : current; });
- break;
- default:
- throw std::runtime_error{"Reduce: Unsupported reduce type"};
- }
-}
-
-// Template specialization for bool type
-template <>
-std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
-evalType<bool>(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
-{
- switch (reduce_type)
- {
- case ReduceType::kAny:
- return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, keep_dims, false, reduce_kernel,
- [](const bool current, const bool in) -> bool { return in || current; });
- break;
- case ReduceType::kAll:
- return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, keep_dims, true, reduce_kernel,
- [](const bool current, const bool in) -> bool { return in && current; });
- break;
- default:
- throw std::runtime_error{"Reduce: Unsupported reduce type"};
- }
-}
-
-std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
-generateKernelGeneric(const IPortableTensor *input, bool keep_dims,
- nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
-{
- switch (input->data_type())
- {
- case OperandType::FLOAT32:
- return evalType<float>(keep_dims, reduce_kernel, reduce_type);
- case OperandType::INT32:
- return evalType<int32_t>(keep_dims, reduce_kernel, reduce_type);
- case OperandType::BOOL8:
- return evalType<bool>(keep_dims, reduce_kernel, reduce_type);
- default:
- throw std::runtime_error{"Reduce(generic): unsupported data type"};
- }
-}
-
-// TODO Refine this function
-void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output,
- const std::vector<int> &axes, bool keep_dims,
- nnfw::cker::Reduce &reduce_kernel)
-{
- const bool same_scale = (input->data_scale() == output->data_scale() &&
- input->data_offset() == output->data_offset());
-
- reduce_kernel.prepare(input->num_dimensions(), axes.size());
-
- if (!same_scale)
- {
- std::vector<int32_t> temp_sum(output->getShape().num_elements());
- bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>(
- reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(),
- input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()),
- output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims,
- temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t {
- const int32_t actual_in = static_cast<int32_t>(in);
- return current + actual_in;
- });
-
- if (!result)
- {
- throw std::runtime_error{"Reduce: Fail to run"};
- }
-
- return;
- }
-
- const auto kernel = generateKernelGeneric(input, keep_dims, reduce_kernel, ReduceType::kSum);
- kernel(input, output, axes);
-}
-
-} // namespace
-
-ReduceLayer::ReduceLayer()
- : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()),
- _kernel(), _reduceType(ReduceType::kInvalid)
-{
- // DO NOTHING
-}
-
-ReduceLayer::~ReduceLayer() = default;
-
-void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
- IPortableTensor *output, ReduceType reduceType, bool keep_dims)
-{
- _input = input;
- _axes = axes;
- _output = output;
- _reduceType = reduceType;
-
- switch (_reduceType)
- {
- case ReduceType::kSum:
- if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, keep_dims, *_reduce_kernel);
- return;
- }
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum);
- break;
- case ReduceType::kProd:
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd);
- break;
- case ReduceType::kMax:
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax);
- break;
- case ReduceType::kMin:
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin);
- break;
- case ReduceType::kAny:
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny);
- break;
- case ReduceType::kAll:
- _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll);
- break;
- default:
- throw std::runtime_error{"Reduce: Unsupported reduce type"};
- }
-}
-
-void ReduceLayer::run()
-{
- const auto axes = getReducerAxes(_axes);
-#ifdef USE_NEON
- int32_t rank = _input->num_dimensions();
- if (_input->data_type() == ir::DataType::FLOAT32 && _reduceType == ReduceType::kSum &&
- axes.size() == 1 && (axes[0] == -1 || axes[0] == rank - 1))
- {
- OptimizedReduceSum(reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_input),
- reinterpret_cast<float *>(_output->buffer()));
- return;
- }
-#endif // NEON
- _kernel(_input, _output, axes);
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.h b/runtime/onert/backend/cpu/ops/ReduceLayer.h
deleted file mode 100644
index 8265dd41f..000000000
--- a/runtime/onert/backend/cpu/ops/ReduceLayer.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__
-
-#include "cker/neon/neon_check.h"
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-#include <memory>
-
-namespace nnfw
-{
-namespace cker
-{
-class Reduce;
-}
-} // namespace nnfw
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-enum class ReduceType
-{
- kSum,
- kProd,
- kMax,
- kMin,
- kAny,
- kAll,
- kInvalid // For debug and initialize
-};
-
-class ReduceLayer : public ::onert::exec::IFunction
-{
-public:
- ReduceLayer();
- ~ReduceLayer();
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output,
- ReduceType reduceType, bool keep_dims);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_axes;
- IPortableTensor *_output;
-
- std::unique_ptr<nnfw::cker::Reduce> _reduce_kernel;
- std::function<void(const IPortableTensor *input, IPortableTensor *output,
- const std::vector<int> &axes)>
- _kernel;
-
- ReduceType _reduceType;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ReshapeLayer.cc b/runtime/onert/backend/cpu/ops/ReshapeLayer.cc
deleted file mode 100644
index 3c2b115f4..000000000
--- a/runtime/onert/backend/cpu/ops/ReshapeLayer.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ReshapeLayer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ReshapeLayer::ReshapeLayer() : _input(nullptr), _shape(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void ReshapeLayer::reshapeGeneric()
-{
- size_t count = _input->total_size();
- memcpy(_output->buffer(), _input->buffer(), count);
-}
-
-void ReshapeLayer::configure(const IPortableTensor *input, const IPortableTensor *shape,
- IPortableTensor *output)
-{
- _input = input;
- /* note : shape is optional. If not provided from model, _shape is nullptr. */
- _shape = shape;
- _output = output;
-}
-
-void ReshapeLayer::run() { reshapeGeneric(); }
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ReshapeLayer.h b/runtime/onert/backend/cpu/ops/ReshapeLayer.h
deleted file mode 100644
index b49c0bf7d..000000000
--- a/runtime/onert/backend/cpu/ops/ReshapeLayer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ReshapeLayer : public ::onert::exec::IFunction
-{
-public:
- ReshapeLayer();
-
-public:
- void reshapeGeneric();
-
- void configure(const IPortableTensor *input, const IPortableTensor *shape,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_shape;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc
deleted file mode 100644
index 1fe56cb99..000000000
--- a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "OperationUtils.h"
-#include "ResizeBilinearLayer.h"
-#include "cker/operation/ResizeBilinear.h"
-#include <cker/Types.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ResizeBilinearLayer::ResizeBilinearLayer()
- : _input(nullptr), _output(nullptr), _size(nullptr), _output_height(0), _output_width(0),
- _align_corners(false), _half_pixel_centers(false)
-{
- // DO NOTHING
-}
-
-void ResizeBilinearLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- const IPortableTensor *size, bool align_corners,
- bool half_pixel_centers)
-{
- assert(!size->is_constant());
- _input = input;
- _output = output;
- _size = size;
- _align_corners = align_corners;
- _half_pixel_centers = half_pixel_centers;
-}
-
-void ResizeBilinearLayer::configure(const IPortableTensor *input, IPortableTensor *output,
- int32_t output_height, int32_t output_width, bool align_corners,
- bool half_pixel_centers)
-{
- assert(_size == nullptr);
- if (output_height < 0)
- {
- throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_height = " +
- std::to_string(output_height)};
- }
- if (output_width < 0)
- {
- throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_width = " +
- std::to_string(output_width)};
- }
- _input = input;
- _output = output;
- _output_height = output_height;
- _output_width = output_width;
- _align_corners = align_corners;
- _half_pixel_centers = half_pixel_centers;
-}
-
-void ResizeBilinearLayer::run()
-{
- nnfw::cker::ResizeBilinearParams params;
- if (_size == nullptr)
- {
- params.output_height = _output_height;
- params.output_width = _output_width;
- }
- else
- {
- const auto size_buf = reinterpret_cast<const int32_t *>(_size->buffer());
- params.output_height = size_buf[0];
- params.output_width = size_buf[1];
- }
- params.align_corners = _align_corners;
- params.half_pixel_centers = _half_pixel_centers;
-
- switch (_input->data_type())
- {
- case OperandType::FLOAT32:
- nnfw::cker::ResizeBilinear(
- params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- break;
-
- case OperandType::QUANT_UINT8_ASYMM:
- nnfw::cker::ResizeBilinear(
- params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
- break;
-
- case OperandType::UINT8:
- case OperandType::BOOL8:
- case OperandType::FLOAT16:
- case OperandType::INT32:
- case OperandType::INT64:
- case OperandType::QUANT_INT8_SYMM:
- std::runtime_error("ResizeBilinear NYI");
- break;
- default:
- std::runtime_error("ResizeBilinear unsupported data type");
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h
deleted file mode 100644
index d7ae1c620..000000000
--- a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__
-#define __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ResizeBilinearLayer : public ::onert::exec::IFunction
-{
-public:
- ResizeBilinearLayer();
-
-public:
- void configure(const IPortableTensor *input1, IPortableTensor *output,
- const IPortableTensor *size, bool align_corners, bool half_pixel_centers);
-
- void configure(const IPortableTensor *input, IPortableTensor *output, int32_t output_height,
- int32_t output_width, bool align_corners, bool half_pixel_centers);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
- const IPortableTensor *_size;
- int32_t _output_height;
- int32_t _output_width;
- bool _align_corners;
- bool _half_pixel_centers;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__
diff --git a/runtime/onert/backend/cpu/ops/ReverseLayer.cc b/runtime/onert/backend/cpu/ops/ReverseLayer.cc
deleted file mode 100644
index 7979e77a0..000000000
--- a/runtime/onert/backend/cpu/ops/ReverseLayer.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ReverseLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Reverse.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-void ReverseLayer::run()
-{
-
- if (_axis->total_size() != 4)
- {
- throw std::runtime_error{"Reverse: only support 1 axis"};
- }
- int32_t axis = *(reinterpret_cast<int32_t *>(_axis->buffer()));
- if (axis < 0)
- {
- axis += _input->num_dimensions();
- }
-
- switch (_input->data_type())
- {
- case OperandType::FLOAT32:
- nnfw::cker::Reverse<float>(
- axis, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- break;
- default:
- throw std::runtime_error{"Reverse: unsupported data type"};
- }
-}
-
-void ReverseLayer::configure(const IPortableTensor *input, const IPortableTensor *axis,
- IPortableTensor *output)
-{
- _input = input;
- _axis = axis;
- _output = output;
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ReverseLayer.h b/runtime/onert/backend/cpu/ops/ReverseLayer.h
deleted file mode 100644
index 9591dae32..000000000
--- a/runtime/onert/backend/cpu/ops/ReverseLayer.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ReverseLayer : public ::onert::exec::IFunction
-{
-public:
- ReverseLayer() : _input{nullptr}, _axis{nullptr}, _output{nullptr}
- {
- // DO NOTHING
- }
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *axis,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_axis;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SelectLayer.cc b/runtime/onert/backend/cpu/ops/SelectLayer.cc
deleted file mode 100644
index 95cfe1df0..000000000
--- a/runtime/onert/backend/cpu/ops/SelectLayer.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SelectLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Select.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SelectLayer::SelectLayer()
- : _cond(nullptr), _input_true(nullptr), _input_false(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void SelectLayer::configure(const IPortableTensor *cond, const IPortableTensor *input_true,
- const IPortableTensor *input_false, IPortableTensor *output)
-{
- _cond = cond;
- _input_true = input_true;
- _input_false = input_false;
- _output = output;
-}
-
-void SelectLayer::run()
-{
-
-#define KERNEL_SELECT(type, op) \
- nnfw::cker::op(getTensorShape(_cond), reinterpret_cast<uint8_t *>(_cond->buffer()), \
- getTensorShape(_input_true), reinterpret_cast<type *>(_input_true->buffer()), \
- getTensorShape(_input_false), reinterpret_cast<type *>(_input_false->buffer()), \
- getTensorShape(_output), reinterpret_cast<type *>(_output->buffer()));
-
-#define KERNEL_SWITCH(type, op) \
- switch (type) \
- { \
- break; \
- case OperandType::FLOAT32: \
- KERNEL_SELECT(float, op); \
- break; \
- default: \
- throw std::runtime_error{"Select: unsupported data type"}; \
- }
-
- auto input_type = _input_true->data_type();
- bool require_broadcast =
- !HaveSameShapes(_input_true, _cond) || !HaveSameShapes(_input_false, _cond);
- bool rank_one_select = ((_input_true->num_dimensions() == 1) && !require_broadcast);
-
- if (rank_one_select)
- {
- KERNEL_SWITCH(input_type, RankOneSelect);
- }
- else if (require_broadcast)
- {
- KERNEL_SWITCH(input_type, BroadcastSelect4DSlow);
- }
- else
- {
- KERNEL_SWITCH(input_type, Select);
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SelectLayer.h b/runtime/onert/backend/cpu/ops/SelectLayer.h
deleted file mode 100644
index 2ef50f369..000000000
--- a/runtime/onert/backend/cpu/ops/SelectLayer.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SelectLayer : public ::onert::exec::IFunction
-{
-public:
- SelectLayer();
-
-public:
- void configure(const IPortableTensor *cond, const IPortableTensor *input_true,
- const IPortableTensor *input_false, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_cond;
- const IPortableTensor *_input_true;
- const IPortableTensor *_input_false;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/ShapeLayer.cc b/runtime/onert/backend/cpu/ops/ShapeLayer.cc
deleted file mode 100644
index bffb04bc6..000000000
--- a/runtime/onert/backend/cpu/ops/ShapeLayer.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ShapeLayer.h"
-
-#include "OperationUtils.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-ShapeLayer::ShapeLayer() : _input(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-template <typename T> void GetRawShape(const IPortableTensor *input, T *output_data)
-{
- for (uint32_t i = 0; i < input->num_dimensions(); ++i)
- {
- output_data[i] = static_cast<T>(input->dimension(i));
- }
-}
-
-void ShapeLayer::shape()
-{
- if (_output->data_type() == OperandType::UINT32)
- {
- GetRawShape(_input, reinterpret_cast<uint32_t *>(_output->buffer()));
- }
- else if (_output->data_type() == OperandType::INT32)
- {
- GetRawShape(_input, reinterpret_cast<int32_t *>(_output->buffer()));
- }
- else
- {
- throw std::runtime_error{"NYI : not supported output type for ShapeLayer"};
- }
-}
-
-void ShapeLayer::configure(const IPortableTensor *input, IPortableTensor *output)
-{
- _input = input;
- _output = output;
-}
-
-void ShapeLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32 || _input->data_type() == OperandType::INT32 ||
- _input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- shape();
- }
- else
- {
- throw std::runtime_error{"Shape : unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/ShapeLayer.h b/runtime/onert/backend/cpu/ops/ShapeLayer.h
deleted file mode 100644
index fb358c7a4..000000000
--- a/runtime/onert/backend/cpu/ops/ShapeLayer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class ShapeLayer : public ::onert::exec::IFunction
-{
-public:
- ShapeLayer();
-
-public:
- void shape();
-
- void configure(const IPortableTensor *input, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.cc b/runtime/onert/backend/cpu/ops/SliceLayer.cc
deleted file mode 100644
index 449c073e6..000000000
--- a/runtime/onert/backend/cpu/ops/SliceLayer.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SliceLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Slice.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SliceLayer::SliceLayer() : _input(nullptr), _begin(nullptr), _size(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-template <typename T>
-void SliceLayer::GetBeginAndSizeVectors(int dimensions, const IPortableTensor *begin,
- const IPortableTensor *size, std::vector<int> *begins,
- std::vector<int> *sizes)
-{
- for (int idx = dimensions - 1; idx >= 0; --idx)
- {
- begins->push_back(reinterpret_cast<T *>(begin->buffer())[idx]);
- sizes->push_back(reinterpret_cast<T *>(size->buffer())[idx]);
- }
-}
-
-template <typename T> void SliceLayer::sliceImpl()
-{
- const int kMaxDim = nnfw::cker::Shape::kMaxSmallSize;
-
- std::vector<int> begins;
- std::vector<int> sizes;
- begins.reserve(kMaxDim);
- sizes.reserve(kMaxDim);
-
- GetBeginAndSizeVectors<int32_t>(_input->num_dimensions(), _begin, _size, &begins, &sizes);
-
- // begins : 0-based, sizes : 1-based
- for (int i = _input->num_dimensions(); i < kMaxDim; ++i)
- {
- begins.push_back(0);
- sizes.push_back(1);
- }
-
- nnfw::cker::SliceParams op_params;
- op_params.begin_count = 4;
- op_params.size_count = 4;
- for (int i = 0; i < 4; ++i)
- {
- op_params.begin[i] = begins[3 - i];
- op_params.size[i] = sizes[3 - i];
- }
-
- nnfw::cker::Slice(op_params, getExtendedTensorShape(_input),
- reinterpret_cast<const T *>(_input->buffer()),
- reinterpret_cast<T *>(_output->buffer()));
-}
-
-void SliceLayer::configure(const IPortableTensor *input, const IPortableTensor *begin,
- const IPortableTensor *size, IPortableTensor *output)
-{
- _input = input;
- _output = output;
- _begin = begin;
- _size = size;
-}
-
-void SliceLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- sliceImpl<float>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- sliceImpl<uint8_t>();
- }
- else
- {
- throw std::runtime_error{"Slice: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.h b/runtime/onert/backend/cpu/ops/SliceLayer.h
deleted file mode 100644
index 650e2c97a..000000000
--- a/runtime/onert/backend/cpu/ops/SliceLayer.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SliceLayer : public ::onert::exec::IFunction
-{
-public:
- SliceLayer();
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *begin,
- const IPortableTensor *size, IPortableTensor *output);
-
- void run() override;
-
-private:
- template <typename T> void sliceImpl();
-
- template <typename T>
- void GetBeginAndSizeVectors(int dimensions, const IPortableTensor *begin,
- const IPortableTensor *size, std::vector<int> *begins,
- std::vector<int> *sizes);
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_begin;
- const IPortableTensor *_size;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc b/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc
deleted file mode 100644
index b42be3042..000000000
--- a/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SoftMaxLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/SoftMax.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SoftMaxLayer::SoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0)
-{
- // DO NOTHING
-}
-
-void SoftMaxLayer::softmaxFloat32()
-{
- if (getNumberOfDimensions(_input) == 1)
- {
- uint32_t input_size = getNumberOfElements(_input);
- nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, 1, _beta,
- reinterpret_cast<float *>(_output->buffer()));
- }
- else if (getNumberOfDimensions(_input) == 2)
- {
- uint32_t batch_size = getSizeOfDimension(_input, 0);
- if (batch_size == 0)
- throw std::runtime_error("batch_size should not be 0");
-
- uint32_t input_size = getNumberOfElements(_input) / batch_size;
- nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, batch_size,
- _beta, reinterpret_cast<float *>(_output->buffer()));
- }
- else if (getNumberOfDimensions(_input) == 4)
- {
- nnfw::cker::SoftmaxParams op_params;
- op_params.beta = _beta;
- nnfw::cker::Softmax(op_params, getTensorShape(_input),
- reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<float *>(_output->buffer()));
- }
- else
- {
- nnfw::cker::SoftmaxParams op_params;
- op_params.beta = _beta;
- nnfw::cker::reference::Softmax(
- op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
- }
-}
-
-void SoftMaxLayer::softmaxQuant8()
-{
- nnfw::cker::Shape descrIn4D(4);
-
- if (getNumberOfDimensions(_input) == 2)
- {
- auto batch_size = getSizeOfDimension(_input, 0);
- if (batch_size == 0)
- throw std::runtime_error("batch_size should not be 0");
-
- auto input_size = getNumberOfElements(_input) / batch_size;
- descrIn4D.SetDim(0, batch_size);
- descrIn4D.SetDim(1, 1);
- descrIn4D.SetDim(2, 1);
- descrIn4D.SetDim(3, input_size);
- }
- else if (getNumberOfDimensions(_input) == 4)
- {
- descrIn4D.SetDim(0, _input->dimension(0));
- descrIn4D.SetDim(1, _input->dimension(1));
- descrIn4D.SetDim(2, _input->dimension(2));
- descrIn4D.SetDim(3, _input->dimension(3));
- }
- else
- {
- throw std::runtime_error{"only 2D and 4D tensors supported"};
- }
- if (_output->data_offset() != 0 || _output->data_scale() != 1.f / 256)
- {
- throw std::runtime_error{"incorrect scale / offset for output"};
- }
- static const int32_t kScaledDiffIntegerBits = 5;
- const double input_beta_real_multiplier = std::min(
- 1.0 * _beta * _input->data_scale() * (1 << (31 - kScaledDiffIntegerBits)), (1ll << 31) - 1.0);
- int32_t input_multiplier = 0;
- int32_t input_left_shift = 0;
- QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, &input_multiplier,
- &input_left_shift);
- float diff_min = -1.0f * CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift);
-
- nnfw::cker::SoftmaxParams op_params;
- op_params.input_multiplier = input_multiplier;
- op_params.input_left_shift = input_left_shift;
- op_params.diff_min = diff_min;
- nnfw::cker::Softmax(op_params, descrIn4D, reinterpret_cast<const uint8_t *>(_input->buffer()),
- descrIn4D, reinterpret_cast<uint8_t *>(_output->buffer()));
-}
-
-void SoftMaxLayer::configure(const IPortableTensor *input, const float beta,
- IPortableTensor *output)
-{
- _input = input;
- _output = output;
- _beta = beta;
-}
-
-void SoftMaxLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- softmaxFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- softmaxQuant8();
- }
- else
- {
- throw std::runtime_error{"SoftMax: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SoftMaxLayer.h b/runtime/onert/backend/cpu/ops/SoftMaxLayer.h
deleted file mode 100644
index d0c704c2c..000000000
--- a/runtime/onert/backend/cpu/ops/SoftMaxLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SoftMaxLayer : public ::onert::exec::IFunction
-{
-public:
- SoftMaxLayer();
-
-public:
- void softmaxFloat32();
-
- void softmaxQuant8();
-
- void configure(const IPortableTensor *input, const float beta, IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- IPortableTensor *_output;
-
- float _beta;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
deleted file mode 100644
index 896e262ba..000000000
--- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SpaceToBatchNDLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/SpaceToBatchND.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-SpaceToBatchNDLayer::SpaceToBatchNDLayer()
- : _input(nullptr), _block_shape(nullptr), _padding(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-// TO DO : move into shape inferer
-void SpaceToBatchNDLayer::checkDimension()
-{
- const int kSpatialDimensionNum = 2;
- if (_block_shape->dimension(0) != kSpatialDimensionNum)
- {
- throw std::runtime_error("SpaceToBatchND : block_shape(block_size) tensor's rank is wrong\n");
- }
-
- // Ensures the input height and width (with padding) is a multiple of block
- // shape height and width.
- for (int dim = 0; dim < kSpatialDimensionNum; ++dim)
- {
- int final_dim_size =
- (_input->dimension(dim + 1) + reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2] +
- reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2 + 1]);
-
- if (final_dim_size % reinterpret_cast<int32_t *>(_block_shape->buffer())[dim] != 0)
- {
- throw std::runtime_error(
- "SpaceToBatchND : padded input's dimension is not a multiple of block size\n");
- }
-
- if ((int32_t)_output->dimension(dim + 1) !=
- final_dim_size / reinterpret_cast<int32_t *>(_block_shape->buffer())[dim])
- {
- throw std::runtime_error("SpaceToBatchND : wrong output dimension\n");
- }
- }
-}
-
-template <> uint32_t SpaceToBatchNDLayer::getPad<float>() { return 0; }
-template <> uint32_t SpaceToBatchNDLayer::getPad<uint8_t>() { return _output->data_offset(); }
-
-template <typename T> void SpaceToBatchNDLayer::spaceToBatchND()
-{
- checkDimension();
-
- nnfw::cker::SpaceToBatchParams params;
- params.output_offset = getPad<T>();
-
- nnfw::cker::SpaceToBatchND(
- params, getTensorShape(_input), reinterpret_cast<const T *>(_input->buffer()),
- getTensorShape(_block_shape), reinterpret_cast<const int32_t *>(_block_shape->buffer()),
- getTensorShape(_padding), reinterpret_cast<const int32_t *>(_padding->buffer()),
- getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
-}
-
-void SpaceToBatchNDLayer::configure(const IPortableTensor *input,
- const IPortableTensor *block_shape,
- const IPortableTensor *padding, IPortableTensor *output)
-{
- _input = input;
- _block_shape = block_shape;
- _padding = padding;
- _output = output;
-}
-
-void SpaceToBatchNDLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- spaceToBatchND<float>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- spaceToBatchND<uint8_t>();
- }
- else
- {
- throw std::runtime_error{"SpaceToBatchND: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h
deleted file mode 100644
index 6f4638719..000000000
--- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-class SpaceToBatchNDLayer : public ::onert::exec::IFunction
-{
-public:
- SpaceToBatchNDLayer();
-
- void configure(const IPortableTensor *input, const IPortableTensor *block_shape,
- const IPortableTensor *padding, IPortableTensor *output);
-
- void run() override;
-
-private:
- void checkDimension();
-
- template <typename T> uint32_t getPad();
-
- template <typename T> void spaceToBatchND();
-
- const IPortableTensor *_input;
- const IPortableTensor *_block_shape;
- const IPortableTensor *_padding;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc
deleted file mode 100644
index a0869aed8..000000000
--- a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SpaceToDepthLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/SpaceToDepth.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-SpaceToDepthLayer::SpaceToDepthLayer() : _input(nullptr), _block_size(0), _output(nullptr)
-{
- // DO NOTHING
-}
-
-template <typename T> void SpaceToDepthLayer::spaceToDepth()
-{
-
- nnfw::cker::SpaceToDepthParams params;
- params.block_size = _block_size;
-
- nnfw::cker::SpaceToDepth(params, getTensorShape(_input),
- reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<T *>(_output->buffer()));
-}
-
-void SpaceToDepthLayer::configure(const IPortableTensor *input, const int32_t block_size,
- IPortableTensor *output)
-{
- _input = input;
- _block_size = block_size;
- _output = output;
-}
-
-void SpaceToDepthLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- spaceToDepth<float>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- spaceToDepth<uint8_t>();
- }
- else
- {
- throw std::runtime_error{"SpaceToDepth: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h
deleted file mode 100644
index c11ef2b0a..000000000
--- a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-class SpaceToDepthLayer : public ::onert::exec::IFunction
-{
-public:
- SpaceToDepthLayer();
-
- void configure(const IPortableTensor *input, const int32_t block_size, IPortableTensor *output);
-
- void run() override;
-
-private:
- template <typename T> void spaceToDepth();
-
- const IPortableTensor *_input;
- int32_t _block_size;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SplitLayer.cc b/runtime/onert/backend/cpu/ops/SplitLayer.cc
deleted file mode 100644
index 922cde2e3..000000000
--- a/runtime/onert/backend/cpu/ops/SplitLayer.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SplitLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Split.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SplitLayer::SplitLayer() : _input(nullptr), _axis(nullptr), _num_splits(0), _outputs()
-{
- // DO NOTHING
-}
-
-template <typename T> void SplitLayer::split(void)
-{
- nnfw::cker::SplitParams op_params;
- if (_axis->total_size() != sizeof(int32_t))
- {
- throw std::runtime_error("ArgMinMax: wrong shape of axis");
- }
- auto axis = *reinterpret_cast<const int32_t *>(_axis->buffer());
- if (axis < 0)
- {
- axis += _input->num_dimensions();
- }
- op_params.axis = axis;
- op_params.num_split = _num_splits;
-
- std::vector<T *> outputPtrs;
-
- for (const auto output : _outputs)
- {
- assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims()));
- outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer()));
- }
-
- assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims()));
- nnfw::cker::Split<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()),
- getTensorShape(_outputs[0]), outputPtrs.data());
-}
-
-void SplitLayer::configure(const IPortableTensor *input, const IPortableTensor *axis,
- uint16_t num_splits, std::vector<IPortableTensor *> &outputs)
-{
- assert(input != nullptr);
-
- _num_splits = num_splits;
- _input = input;
- _axis = axis;
- _outputs = outputs;
-}
-
-void SplitLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- split<float>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- split<uint8_t>();
- }
- else if (_input->data_type() == OperandType::INT32)
- {
- split<int32_t>();
- }
- else if (_input->data_type() == OperandType::INT64)
- {
- split<int64_t>();
- }
- else
- {
- throw std::runtime_error{"Split: unsupported input type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SplitLayer.h b/runtime/onert/backend/cpu/ops/SplitLayer.h
deleted file mode 100644
index 090f87166..000000000
--- a/runtime/onert/backend/cpu/ops/SplitLayer.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SplitLayer : public ::onert::exec::IFunction
-{
-public:
- SplitLayer();
-
-public:
- template <typename T> void split(void);
-
- void configure(const IPortableTensor *input, const IPortableTensor *axis, uint16_t num_splits,
- std::vector<IPortableTensor *> &outputs);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_axis;
- uint16_t _num_splits;
- std::vector<IPortableTensor *> _outputs;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.cc b/runtime/onert/backend/cpu/ops/SplitVLayer.cc
deleted file mode 100644
index d6ca12442..000000000
--- a/runtime/onert/backend/cpu/ops/SplitVLayer.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SplitVLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/SplitV.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SplitVLayer::SplitVLayer()
- : _input(nullptr), _size_splits(nullptr), _split_dim(nullptr), _num_splits(0), _outputs()
-{
- // DO NOTHING
-}
-
-template <typename T> void SplitVLayer::splitV(void)
-{
- nnfw::cker::SplitVParams op_params;
- op_params.axis = *(reinterpret_cast<const int32_t *>(_split_dim->buffer()));
- op_params.num_split = _num_splits;
-
- std::vector<T *> outputPtrs;
- std::vector<nnfw::cker::Shape> outshape;
-
- for (const auto output : _outputs)
- {
- assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims()));
- outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer()));
- outshape.emplace_back(getTensorShape(output));
- }
-
- assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims()));
- nnfw::cker::SplitV<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()),
- outshape, outputPtrs.data());
-}
-
-void SplitVLayer::configure(const IPortableTensor *input, const IPortableTensor *size_splits,
- const IPortableTensor *split_dim, uint16_t num_splits,
- std::vector<IPortableTensor *> &outputs)
-{
- assert(input != nullptr);
-
- _num_splits = num_splits;
- _size_splits = size_splits;
- _input = input;
- _split_dim = split_dim;
- _outputs = outputs;
-}
-
-void SplitVLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- splitV<float>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- splitV<uint8_t>();
- }
- else if (_input->data_type() == OperandType::INT32)
- {
- splitV<int32_t>();
- }
- else if (_input->data_type() == OperandType::INT64)
- {
- splitV<int64_t>();
- }
- else
- {
- throw std::runtime_error{"SplitV: unsupported input type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.h b/runtime/onert/backend/cpu/ops/SplitVLayer.h
deleted file mode 100644
index 98f2f4406..000000000
--- a/runtime/onert/backend/cpu/ops/SplitVLayer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SplitVLayer : public ::onert::exec::IFunction
-{
-public:
- SplitVLayer();
-
-public:
- template <typename T> void splitV(void);
-
- void configure(const IPortableTensor *input, const IPortableTensor *size_splits,
- const IPortableTensor *size_dim, uint16_t num_splits,
- std::vector<IPortableTensor *> &outputs);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_size_splits;
- const IPortableTensor *_split_dim;
- uint16_t _num_splits;
- std::vector<IPortableTensor *> _outputs;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc b/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc
deleted file mode 100644
index cf67a5c00..000000000
--- a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "SquaredDiffLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/SqDiff.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-SqDiffLayer::SqDiffLayer() : _input1(nullptr), _input2(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void SqDiffLayer::SqDiffFloat32()
-{
- nnfw::cker::SqDiff(getTensorShape(_input1), reinterpret_cast<const float *>(_input1->buffer()),
- getTensorShape(_input2), reinterpret_cast<const float *>(_input2->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-void SqDiffLayer::configure(const IPortableTensor *input1, const IPortableTensor *input2,
- IPortableTensor *output)
-{
- _input1 = input1;
- _input2 = input2;
- _output = output;
-}
-
-void SqDiffLayer::run()
-{
- if (_input1->data_type() == OperandType::FLOAT32)
- {
- SqDiffFloat32();
- }
- else
- {
- throw std::runtime_error{"SquaredDiff: unsupported data type"};
- }
-}
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h b/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h
deleted file mode 100644
index 386eea9ae..000000000
--- a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in riting, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class SqDiffLayer : public ::onert::exec::IFunction
-{
-public:
- SqDiffLayer();
-
-public:
- void SqDiffFloat32();
-
- void configure(const IPortableTensor *input1, const IPortableTensor *input2,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input1;
- const IPortableTensor *_input2;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc
deleted file mode 100644
index b8dfcb4b5..000000000
--- a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StatelessRandomUniformLayer.h"
-
-#include <cker/operation/StatelessRandomUniform.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-StatelessRandomUniformLayer::StatelessRandomUniformLayer()
- : _shape(nullptr), _seed(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void StatelessRandomUniformLayer::configure(const IPortableTensor *shape,
- const IPortableTensor *seed, IPortableTensor *output)
-{
- _shape = shape;
- _seed = seed;
- _output = output;
-}
-
-void StatelessRandomUniformLayer::StatelessRandomUniformFloat32()
-{
- nnfw::cker::StatelessRandomUniform(
- getTensorShape(_shape), reinterpret_cast<const int *>(_shape->buffer()),
- getTensorShape(_seed), reinterpret_cast<const int *>(_seed->buffer()),
- getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
-}
-
-void StatelessRandomUniformLayer::run()
-{
- switch (_output->data_type())
- {
- // ToDo : It need to support INT8 and UINT8 also when will be applied quantization.
- case OperandType::FLOAT32:
- StatelessRandomUniformFloat32();
- break;
- default:
- throw std::runtime_error{"StatelessRandomUniformLayer: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h
deleted file mode 100644
index ef11d623d..000000000
--- a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__
-#define __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class StatelessRandomUniformLayer : public ::onert::exec::IFunction
-{
-public:
- StatelessRandomUniformLayer();
-
-public:
- void configure(const IPortableTensor *shape, const IPortableTensor *seed,
- IPortableTensor *output);
-
- void StatelessRandomUniformFloat32();
-
- void run() override;
-
-private:
- const IPortableTensor *_shape;
- const IPortableTensor *_seed;
-
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__
diff --git a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc b/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc
deleted file mode 100644
index f77f4d691..000000000
--- a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StridedSliceLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/StridedSlice.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-StridedSliceLayer::StridedSliceLayer()
- : _input(nullptr), _begin(nullptr), _end(nullptr), _strides(nullptr), _output(nullptr),
- _begin_mask(0), _ellipsis_mask(0), _end_mask(0), _new_axis_mask(0), _shrink_axis_mask(0)
-{
-}
-
-template <typename T> void StridedSliceLayer::stridedSliceImpl()
-{
- const auto input_shape = getTensorShape(_input);
- const auto output_shape = getTensorShape(_output);
- auto op_params = nnfw::cker::buildStridedSliceParams(
- reinterpret_cast<uint32_t *>(_begin->buffer()), reinterpret_cast<uint32_t *>(_end->buffer()),
- reinterpret_cast<uint32_t *>(_strides->buffer()), _begin_mask, _end_mask, _shrink_axis_mask,
- input_shape.DimensionsCount());
-
- nnfw::cker::checkOutputSize(op_params, input_shape, output_shape, input_shape.DimensionsCount());
-
- nnfw::cker::StridedSlice(op_params, input_shape, reinterpret_cast<const T *>(_input->buffer()),
- output_shape, reinterpret_cast<T *>(_output->buffer()));
-}
-
-void StridedSliceLayer::configure(const IPortableTensor *input, const IPortableTensor *begin,
- const IPortableTensor *end, const IPortableTensor *strides,
- IPortableTensor *output, const int32_t begin_mask,
- const int32_t end_mask, const int32_t shrink_axis_mask)
-{
- _input = input;
- _begin = begin;
- _end = end;
- _strides = strides;
- _output = output;
-
- _begin_mask = begin_mask;
- _ellipsis_mask = 0;
- _end_mask = end_mask;
- _new_axis_mask = 0;
- _shrink_axis_mask = shrink_axis_mask;
-}
-
-void StridedSliceLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- stridedSliceImpl<float>();
- }
- else if (_input->data_type() == OperandType::INT32)
- {
- stridedSliceImpl<int32_t>();
- }
- else if (_input->data_type() == OperandType::INT64)
- {
- stridedSliceImpl<int64_t>();
- }
- else
- {
- throw std::runtime_error{"StridedSlice: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/StridedSliceLayer.h b/runtime/onert/backend/cpu/ops/StridedSliceLayer.h
deleted file mode 100644
index 468408152..000000000
--- a/runtime/onert/backend/cpu/ops/StridedSliceLayer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__
-
-#include <backend/IPortableTensor.h>
-#include "OperationUtils.h"
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class StridedSliceLayer : public ::onert::exec::IFunction
-{
-public:
- StridedSliceLayer();
-
-public:
- void configure(const IPortableTensor *input, const IPortableTensor *begin,
- const IPortableTensor *end, const IPortableTensor *strides,
- IPortableTensor *output, const int32_t begin_mask, const int32_t end_mask,
- const int32_t shrink_axis_mask);
- void run() override;
-
-private:
- template <typename T> void stridedSliceImpl();
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_begin;
- const IPortableTensor *_end;
- const IPortableTensor *_strides;
- IPortableTensor *_output;
-
- int32_t _begin_mask;
- int32_t _ellipsis_mask;
- int32_t _end_mask;
- int32_t _new_axis_mask;
- int32_t _shrink_axis_mask;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/TileLayer.cc b/runtime/onert/backend/cpu/ops/TileLayer.cc
deleted file mode 100644
index bfc371972..000000000
--- a/runtime/onert/backend/cpu/ops/TileLayer.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TileLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Tile.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-TileLayer::TileLayer() : _input(nullptr), _multipliers(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-void TileLayer::tileFloat32()
-{
- TileOneDimension(getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
- reinterpret_cast<const int *>(_multipliers->buffer()),
- reinterpret_cast<float *>(_output->buffer()), 0);
-}
-
-void TileLayer::tileQuant8()
-{
- // cker quant8 tile is not implemented yet
- throw std::runtime_error{"NYI"};
-}
-
-void TileLayer::configure(const IPortableTensor *input, const IPortableTensor *multipliers,
- IPortableTensor *output)
-{
- _input = input;
- _multipliers = multipliers;
- _output = output;
-}
-
-void TileLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- tileFloat32();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- tileQuant8();
- }
- else
- {
- throw std::runtime_error{"Tile: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/TileLayer.h b/runtime/onert/backend/cpu/ops/TileLayer.h
deleted file mode 100644
index d7b793ecc..000000000
--- a/runtime/onert/backend/cpu/ops/TileLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_TILELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_TILELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class TileLayer : public ::onert::exec::IFunction
-{
-public:
- TileLayer();
-
-public:
- void tileFloat32();
-
- void tileQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *_multipliers,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_multipliers;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_TILELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/TransposeLayer.cc b/runtime/onert/backend/cpu/ops/TransposeLayer.cc
deleted file mode 100644
index 3362c3396..000000000
--- a/runtime/onert/backend/cpu/ops/TransposeLayer.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TransposeLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Transpose.h>
-#include <numeric>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-TransposeLayer::TransposeLayer() : _input(nullptr), _perm(nullptr), _output(nullptr)
-{
- // DO NOTHING
-}
-
-template <typename T> void TransposeLayer::transpose()
-{
- nnfw::cker::TransposeParams param;
- assert(_perm->num_dimensions() == 1);
-
- param.perm_count = _input->num_dimensions();
- if (_perm->dimension(0) == 0) // This means _perm is (n-1...0)
- {
- const auto begin = param.perm;
- const auto end = param.perm + _input->num_dimensions();
- std::iota(begin, end, 0);
- std::reverse(begin, end);
- }
- else
- {
- assert(param.perm_count == static_cast<int>(_perm->dimension(0)));
- for (auto i = 0; i < param.perm_count; i++)
- {
- param.perm[i] = *(reinterpret_cast<const int32_t *>(_perm->buffer()) + i);
- }
- }
-
- nnfw::cker::Transpose(param, getTensorShape(_input),
- reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output),
- reinterpret_cast<T *>(_output->buffer()));
-}
-
-void TransposeLayer::transposeQuant8()
-{
- if (_input->data_offset() != _output->data_offset())
- {
- throw std::runtime_error("TransposeLayer : qassym8 input and output offsets unmatched");
- }
-
- if (_input->data_scale() != _output->data_scale())
- {
- throw std::runtime_error("TransposeLayer : qassym8 input and output scales unmatched");
- }
-
- transpose<uint8_t>();
-}
-
-void TransposeLayer::configure(const IPortableTensor *input, const IPortableTensor *perm,
- IPortableTensor *output)
-{
- _input = input;
- _perm = perm;
- _output = output;
-}
-
-void TransposeLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- {
- transpose<float>();
- }
- else if (_input->data_type() == OperandType::INT32)
- {
- transpose<int32_t>();
- }
- else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
- {
- transposeQuant8();
- }
- else
- {
- throw std::runtime_error{"Transpose: unsupported data type"};
- }
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/TransposeLayer.h b/runtime/onert/backend/cpu/ops/TransposeLayer.h
deleted file mode 100644
index c8e9f8ae7..000000000
--- a/runtime/onert/backend/cpu/ops/TransposeLayer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class TransposeLayer : public ::onert::exec::IFunction
-{
-public:
- TransposeLayer();
-
-public:
- template <typename T> void transpose();
-
- void transposeQuant8();
-
- void configure(const IPortableTensor *input, const IPortableTensor *perm,
- IPortableTensor *output);
-
- void run() override;
-
-private:
- const IPortableTensor *_input;
- const IPortableTensor *_perm;
- IPortableTensor *_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__
diff --git a/runtime/onert/backend/cpu/ops/UnpackLayer.cc b/runtime/onert/backend/cpu/ops/UnpackLayer.cc
deleted file mode 100644
index 428b38588..000000000
--- a/runtime/onert/backend/cpu/ops/UnpackLayer.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "UnpackLayer.h"
-
-#include "OperationUtils.h"
-
-#include <cker/operation/Unpack.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-UnpackLayer::UnpackLayer() : _input(nullptr), _outputs(), _axis(0), _num_output(0)
-{
- // DO NOTHING
-}
-
-template <typename T> void UnpackLayer::unpackImpl()
-{
- nnfw::cker::UnpackParams op_params;
- op_params.axis = _axis;
- op_params.num_split = _num_output;
-
- std::vector<nnfw::cker::Shape *> outputDimsPtr;
- std::vector<nnfw::cker::Shape> outputDims;
- outputDimsPtr.reserve(_num_output);
- outputDims.reserve(_num_output);
-
- for (int32_t i = 0; i < _num_output; i++)
- {
- outputDims.push_back(getTensorShape(_outputs[i]));
- outputDimsPtr.push_back(&outputDims[i]);
- }
-
- std::vector<T *> outputPtrs;
-
- for (const auto output : _outputs)
- {
- outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer()));
- }
-
- nnfw::cker::Unpack<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()),
- getTensorShape(_outputs[0]), outputPtrs.data());
-}
-
-void UnpackLayer::configure(const IPortableTensor *input, uint32_t axis, int32_t num,
- std::vector<IPortableTensor *> &outputs)
-{
- assert(input != nullptr);
- assert(outputs.size() > 0);
- assert(outputs.size() == (size_t)num);
-
- _input = input;
- _axis = axis;
- _num_output = num;
- _outputs = outputs;
-}
-
-void UnpackLayer::run()
-{
- if (_input->data_type() == OperandType::FLOAT32)
- unpackImpl<float>();
- else if (_input->data_type() == OperandType::INT32)
- unpackImpl<int32_t>();
- else
- throw std::runtime_error{"Unpack: Unsupported data type"};
-}
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/backend/cpu/ops/UnpackLayer.h b/runtime/onert/backend/cpu/ops/UnpackLayer.h
deleted file mode 100644
index a185b31a0..000000000
--- a/runtime/onert/backend/cpu/ops/UnpackLayer.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__
-#define __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__
-
-#include <backend/IPortableTensor.h>
-
-#include <exec/IFunction.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu
-{
-namespace ops
-{
-
-class UnpackLayer : public ::onert::exec::IFunction
-{
-public:
- UnpackLayer();
-
-public:
- void configure(const IPortableTensor *input, uint32_t axis, int32_t num_output,
- std::vector<IPortableTensor *> &output);
- void run() override;
-
-private:
- template <typename T> void unpackImpl();
-
-private:
- const IPortableTensor *_input;
- std::vector<IPortableTensor *> _outputs;
- uint32_t _axis;
- int32_t _num_output;
-};
-
-} // namespace ops
-} // namespace cpu
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__
diff --git a/runtime/onert/core/CMakeLists.txt b/runtime/onert/core/CMakeLists.txt
deleted file mode 100644
index 344b2a972..000000000
--- a/runtime/onert/core/CMakeLists.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-file(GLOB_RECURSE SOURCES "src/*.cc")
-file(GLOB_RECURSE TESTS "*.test.cc")
-list(REMOVE_ITEM SOURCES ${TESTS})
-
-add_library(onert_core SHARED ${SOURCES})
-set_target_properties(onert_core PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(onert_core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_include_directories(onert_core PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
-target_link_libraries(onert_core PUBLIC nnfw_lib_misc half)
-target_link_libraries(onert_core PRIVATE nnfw_lib_cker)
-target_link_libraries(onert_core PRIVATE nnfw_common)
-target_link_libraries(onert_core PRIVATE nnfw_coverage)
-target_link_libraries(onert_core PRIVATE dl ${LIB_PTHREAD})
-target_link_libraries(onert_core PRIVATE jsoncpp)
-target_link_libraries(onert_core INTERFACE ruy_instrumentation)
-# NOTE Below line is added to remove warning for android build
-# It will be removed after android build uses gold linker
-if (ANDROID)
- target_link_libraries(onert_core INTERFACE log)
-endif (ANDROID)
-
-if(ENVVAR_ONERT_CONFIG)
- target_compile_definitions(onert_core PRIVATE ENVVAR_FOR_DEFAULT_CONFIG)
-endif(ENVVAR_ONERT_CONFIG)
-
-install(TARGETS onert_core LIBRARY DESTINATION lib)
-install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/"
- DESTINATION "include/onert"
- FILES_MATCHING PATTERN "*.h" PATTERN "*.lst"
- )
-
-if(NOT ENABLE_TEST)
- return()
-endif(NOT ENABLE_TEST)
-
-# Unit Tests
-set(TEST_ONERT_BACKEND_CPU_COMMON test_onert_backend_cpu_common)
-
-add_executable(${TEST_ONERT_BACKEND_CPU_COMMON} ${TESTS})
-
-target_link_libraries(${TEST_ONERT_BACKEND_CPU_COMMON} onert_core)
-target_link_libraries(${TEST_ONERT_BACKEND_CPU_COMMON} gtest gtest_main dl ${LIB_PTHREAD})
-
-add_test(${TEST_ONERT_BACKEND_CPU_COMMON} ${TEST_ONERT_BACKEND_CPU_COMMON})
-install(TARGETS ${TEST_ONERT_BACKEND_CPU_COMMON} DESTINATION unittest_standalone)
diff --git a/runtime/onert/core/include/backend/Backend.h b/runtime/onert/core/include/backend/Backend.h
deleted file mode 100644
index 4f6ebbba7..000000000
--- a/runtime/onert/core/include/backend/Backend.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_BACKEND_H__
-#define __ONERT_BACKEND_BACKEND_H__
-
-#include <memory>
-
-#include "ir/Graph.h"
-#include "backend/IConfig.h"
-#include "backend/BackendContext.h"
-
-namespace onert
-{
-namespace backend
-{
-
-namespace custom
-{
-class IKernelBuilder;
-}
-
-class Backend
-{
-public:
- virtual ~Backend() = default;
- virtual std::shared_ptr<onert::backend::IConfig> config() const = 0;
-
- virtual std::unique_ptr<BackendContext>
- newContext(const ir::Graph &graph, const std::shared_ptr<backend::custom::IKernelBuilder> &kb,
- bool is_linear_executor) const = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_BACKEND_H__
diff --git a/runtime/onert/core/include/backend/BackendContext.h b/runtime/onert/core/include/backend/BackendContext.h
deleted file mode 100644
index 1eba29550..000000000
--- a/runtime/onert/core/include/backend/BackendContext.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_BACKEND_CONTEXT_H__
-#define __ONERT_BACKEND_BACKEND_CONTEXT_H__
-
-#include <memory>
-#include "ir/Graph.h"
-
-namespace onert
-{
-namespace backend
-{
-
-class Backend;
-class IConstantInitializer;
-class IKernelGenerator;
-class ITensorRegister;
-struct ITensorRegistry;
-struct ITensorBuilder;
-struct IOptimizer;
-
-class BackendContext
-{
-public:
- struct OperationInfo
- {
- ir::OperationIndex index;
- ir::Layout layout;
-
- OperationInfo(ir::OperationIndex index, ir::Layout layout) : index{index}, layout{layout} {}
- };
-
-public:
- BackendContext(const Backend *backend, const ir::Graph *graph,
- std::shared_ptr<ITensorRegistry> tensor_registry = nullptr,
- std::shared_ptr<ITensorBuilder> tensor_builder = nullptr,
- std::shared_ptr<IConstantInitializer> constant_initializer = nullptr,
- std::shared_ptr<IKernelGenerator> kernel_gen = nullptr,
- std::shared_ptr<ITensorRegister> tensor_register = nullptr,
- std::shared_ptr<IOptimizer> optimizer = nullptr)
- : _backend{backend}, _graph{graph}, tensor_registry{tensor_registry},
- tensor_builder{tensor_builder}, constant_initializer{constant_initializer},
- kernel_gen{kernel_gen}, tensor_register{tensor_register}, optimizer{optimizer}
- {
- }
-
- virtual ~BackendContext() = default;
-
- void initialize(const std::vector<OperationInfo> &operation_list,
- const std::vector<ir::OperandIndex> &operand_list);
- void initConsts();
-
- const Backend *backend() const { return _backend; }
- const ir::Graph *graph() const { return _graph; }
- const std::vector<OperationInfo> &operation_list() { return _operation_list; }
- const std::vector<ir::OperandIndex> &operand_list() { return _operand_list; }
-
-private:
- const Backend *_backend{nullptr};
- const ir::Graph *_graph{nullptr};
- std::vector<OperationInfo> _operation_list;
- std::vector<ir::OperandIndex> _operand_list;
-
-public:
- std::shared_ptr<ITensorRegistry> tensor_registry;
- std::shared_ptr<ITensorBuilder> tensor_builder;
- std::shared_ptr<IConstantInitializer> constant_initializer;
- std::shared_ptr<IKernelGenerator> kernel_gen;
- std::shared_ptr<ITensorRegister> tensor_register;
- std::shared_ptr<IOptimizer> optimizer;
-};
-
-using BackendContexts = std::unordered_map<const Backend *, std::unique_ptr<BackendContext>>;
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_BACKEND_CONTEXT_H__
diff --git a/runtime/onert/core/include/backend/CustomKernelBuilder.h b/runtime/onert/core/include/backend/CustomKernelBuilder.h
deleted file mode 100644
index cae2fc1a3..000000000
--- a/runtime/onert/core/include/backend/CustomKernelBuilder.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CUSTOM_KERNEL_BUILDER_H__
-#define __ONERT_BACKEND_CUSTOM_KERNEL_BUILDER_H__
-
-#include "backend/IPortableTensor.h"
-#include "ir/Shape.h"
-#include "ir/DataType.h"
-
-#include <vector>
-#include <memory>
-
-namespace onert
-{
-namespace exec
-{
-
-class IFunction;
-
-} // namespace exec
-} // namespace onert
-
-namespace onert
-{
-namespace backend
-{
-namespace custom
-{
-
-struct TypeInfo
-{
- ir::Shape shape;
- ir::DataType dtype;
-};
-
-struct CustomKernelConfigParams
-{
- std::vector<backend::IPortableTensor *> input_tensors;
- std::vector<TypeInfo> input_types;
-
- std::vector<backend::IPortableTensor *> output_tensors;
- std::vector<TypeInfo> output_types;
-
- char *userdata;
- size_t userdata_size;
-};
-
-class IKernelBuilder
-{
-public:
- virtual ~IKernelBuilder() = default;
- virtual std::unique_ptr<exec::IFunction> buildKernel(const std::string &id,
- CustomKernelConfigParams &&params) const = 0;
-};
-
-} // namespace custom
-
-} // namespace backend
-
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CUSTOM_KERNEL_BUILDER_H__
diff --git a/runtime/onert/core/include/backend/IConfig.h b/runtime/onert/core/include/backend/IConfig.h
deleted file mode 100644
index ef9c5cdb2..000000000
--- a/runtime/onert/core/include/backend/IConfig.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ICONFIG_H__
-#define __ONERT_BACKEND_ICONFIG_H__
-
-#include "ir/Layout.h"
-#include "ir/Operation.h"
-#include "util/ITimer.h"
-
-#include <memory>
-#include <string>
-
-namespace onert
-{
-namespace backend
-{
-
-struct IConfig
-{
- virtual ~IConfig() = default;
- /**
- * @brief Returns ID of the backend
- *
- * @return std::string ID of this backend
- */
- virtual std::string id() = 0;
- /**
- * @brief Initialize the backend. This is called as soon as the backend is loaded.
- *
- * @return true Initialization succeeded
- * @return false Initialization failed, so it cannot use this backend
- */
- virtual bool initialize() = 0;
- /**
- * @brief Returns supported layout for the given \p node and \p frontend_layout
- *
- * @param node Operation
- * @param frontend_layout The layout defined in the model
- * @return ir::Layout The layout that the backend kernel actually uses
- */
- virtual ir::Layout supportLayout(const ir::Operation &node, ir::Layout frontend_layout) = 0;
- /**
- * @brief The function that is called after each OpSequence run on profiling mode.
- * This may be useful for profiling GPU-based or special computing units.
- */
- virtual void sync() const {}
- /**
- * @brief Returns Timer object for this backend. For some computing units, it may need its own
- * Timer implementation.
- *
- * @return std::unique_ptr<util::ITimer> Timer object for this backend
- */
- virtual std::unique_ptr<util::ITimer> timer() { return nullptr; }
-
- virtual bool supportPermutation() = 0;
- virtual bool supportDynamicTensor() = 0;
- virtual bool supportFP16() = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ICONFIG_H__
diff --git a/runtime/onert/core/include/backend/IConstantInitializer.h b/runtime/onert/core/include/backend/IConstantInitializer.h
deleted file mode 100644
index 149acecb4..000000000
--- a/runtime/onert/core/include/backend/IConstantInitializer.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__
-#define __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__
-
-#include <unordered_map>
-#include <functional>
-
-#include "ITensorBuilder.h"
-#include "ir/Coordinates.h"
-#include "ir/Layout.h"
-#include "ir/Operand.h"
-#include "ir/Operands.h"
-#include "ir/OperationVisitor.h"
-#include "ir/OpSequence.h"
-#include "util/logging.h"
-
-namespace
-{
-template <typename T>
-static void Init(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj, const bool copy,
- const onert::ir::Layout frontend_layout = onert::ir::Layout::UNKNOWN)
-{
- const auto shape = model_obj.shape();
- assert(model_obj.data());
- auto base = reinterpret_cast<const T *>(model_obj.data()->base());
-
- obj.access([&](::onert::backend::ITensor &tensor) {
- switch (shape.rank())
- {
- case 0:
- {
- assert(model_obj.data()->size() == sizeof(T));
- const auto value = *reinterpret_cast<const T *>(base);
- T *into = reinterpret_cast<T *>(tensor.buffer());
- *into = value;
- break;
- }
- case 1:
- {
- auto vec_size = shape.dim(0);
- for (int32_t n = 0; n < vec_size; ++n)
- {
- const T *from = reinterpret_cast<const T *>(base) + n;
- const auto value = *from;
-
- T *into = reinterpret_cast<T *>(tensor.buffer()) + n;
-
- *into = value;
- }
- break;
- }
- case 2:
- {
- const int32_t copy_len = shape.dim(1);
-
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- ::onert::ir::Coordinates coords{i, 0};
- memcpy(tensor.buffer() + tensor.calcOffset(coords), base + i * copy_len,
- copy_len * sizeof(T));
- }
- break;
- }
- case 3:
- {
- const int32_t width = shape.dim(1);
- const int32_t copy_len = shape.dim(2);
-
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- for (auto j = 0; j < shape.dim(1); ++j)
- {
- ::onert::ir::Coordinates coords{i, j, 0};
- memcpy(tensor.buffer() + tensor.calcOffset(coords),
- base + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
- }
- }
- break;
- }
- case 4:
- {
- const int32_t height = shape.dim(1);
- const int32_t width = shape.dim(2);
- const int32_t copy_len = shape.dim(3);
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- for (auto j = 0; j < shape.dim(1); ++j)
- {
- for (auto k = 0; k < shape.dim(2); ++k)
- {
- if (copy)
- {
- ::onert::ir::Coordinates coords{i, j, k, 0};
- memcpy(tensor.buffer() + tensor.calcOffset(coords),
- base + i * height * width * copy_len + j * width * copy_len + k * copy_len,
- copy_len * sizeof(T));
- }
- else
- {
- for (auto l = 0; l < shape.dim(3); ++l)
- {
- const auto coords = ::onert::ir::convertCoordinates({i, j, k, l}, frontend_layout,
- tensor.layout());
- T *into = reinterpret_cast<T *>(tensor.buffer() + tensor.calcOffset(coords));
- T value = *(base + i * height * width * copy_len + j * width * copy_len +
- k * copy_len + l);
- *into = value;
- }
- }
- }
- }
- }
- break;
- }
- default:
- throw std::runtime_error{"Not yet supported"};
- }
- });
-}
-
-template <typename T>
-void copyInit(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj)
-{
- Init<T>(model_obj, obj, true);
-}
-
-template <typename T>
-void permuteInit(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj,
- const onert::ir::Layout frontend_layout)
-{
- const bool copy = frontend_layout == obj.layout();
- Init<T>(model_obj, obj, copy, frontend_layout);
-}
-
-} // namespace
-
-namespace onert
-{
-namespace backend
-{
-
-class IConstantInitializer : public ir::OperationVisitor
-{
-public:
- virtual ~IConstantInitializer() = default;
-
-public:
- void run()
- {
- assert(tensor_registry());
- for (const auto &it : _init_map)
- {
- const auto &ind = it.first;
- const auto &fn = it.second;
-
- const auto &model_obj = _operands.at(ind);
- auto tensor_obj = tensor_registry()->getNativeITensor(ind);
- assert(tensor_obj != nullptr);
- fn(model_obj, *tensor_obj);
- VERBOSE(FillOperandData) << "Fill data for operand " << ind.value() << std::endl;
- }
- _init_map.clear();
- }
-
-public:
- IConstantInitializer(const ir::Operands &operands)
- : _operands{operands}, _current_op_seq_layout{ir::Layout::UNKNOWN}
- {
- }
-
-public:
- using Initializer = std::function<void(const ir::Operand &, backend::ITensor &)>;
-
- void setLayout(ir::Layout layout) { _current_op_seq_layout = layout; }
-
-protected:
- virtual std::shared_ptr<ITensorRegistry> tensor_registry() const = 0;
-
-public:
- virtual void registerDefaultInitializer(const ir::OperandIndex &index, const ir::Operand &obj)
- {
- registerPermuteInitializer(index, obj); // as default
- }
-
-public:
- void registerCopyInitializer(const ir::OperandIndex &index, const ir::Operand &obj);
- void registerPermuteInitializer(const ir::OperandIndex &index, const ir::Operand &obj);
-
-public:
- void registerCustomInitializer(const ir::OperandIndex &index, const ir::Operand &obj,
- void (*customInit)(const onert::ir::Operand &model_obj,
- onert::backend::ITensor &obj))
- {
- // For only CONSTANTS
- // TODO Add to check if tensor has been allocated
- if (!obj.isConstant())
- return;
-
- using namespace std::placeholders;
- _init_map[index] = std::bind(customInit, _1, _2);
- }
-
-public:
- bool exist(const ir::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
-
-protected:
- const ir::Operands &_operands;
- std::unordered_map<ir::OperandIndex, Initializer> _init_map;
- ir::Layout _current_op_seq_layout; // TODO Rename this to _current_layout
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/core/include/backend/IDynamicTensorManager.h b/runtime/onert/core/include/backend/IDynamicTensorManager.h
deleted file mode 100644
index 67cfda24e..000000000
--- a/runtime/onert/core/include/backend/IDynamicTensorManager.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_IDYNAMICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_IDYNAMICTENSOR_MANAGER_H__
-
-#include "ITensorManager.h"
-
-#include <ir/Index.h>
-#include <ir/Operation.h>
-#include <ir/Shape.h>
-#include <backend/ITensor.h>
-
-namespace onert
-{
-namespace backend
-{
-
-/**
- * @brief Interface as an abstract tensor manager, providing ways to handle memory
- * for dynamic tensors.
- */
-struct IDynamicTensorManager : public ITensorManager
-{
- virtual ~IDynamicTensorManager() = default;
-
-public:
- /**
- * @brief Plan when to delete a tensor. Note this planning is done at compilation time.
- * @param op_ind operation index
- * @param tensor candidate ITensor to dealloc. Tensor can be static
- * or dynamic since tensor type may not be clearly known at compilation time.
- */
- virtual void planDealloc(ir::OperationIndex op_ind, backend::ITensor *tensor) = 0;
-
- /**
- * @brief Deallocate input tensors of op if an input tensor is a dynamic tensor and it won't
- * be used anymore
- * @note This will work after calling planDealloc
- */
- virtual void deallocInput(ir::OperationIndex op_ind) = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_IDYNAMICTENSOR_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/IExternalContext.h b/runtime/onert/core/include/backend/IExternalContext.h
deleted file mode 100644
index 88ffb502c..000000000
--- a/runtime/onert/core/include/backend/IExternalContext.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__
-#define __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__
-
-namespace onert
-{
-namespace backend
-{
-
-struct IExternalContext
-{
- virtual ~IExternalContext() = default;
- virtual void setMaxNumThreads(int) = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_IEXTERNAL_CONTEXT__
diff --git a/runtime/onert/core/include/backend/IKernelGenerator.h b/runtime/onert/core/include/backend/IKernelGenerator.h
deleted file mode 100644
index afc34ec21..000000000
--- a/runtime/onert/core/include/backend/IKernelGenerator.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_IKERNEL_GENERATOR_H__
-#define __ONERT_BACKEND_IKERNEL_GENERATOR_H__
-
-#include <assert.h>
-#include <memory>
-#include <functional>
-
-#include "ITensorBuilder.h"
-#include "ir/OperationVisitor.h"
-#include "ir/OpSequence.h"
-#include <memory>
-#include "exec/FunctionSequence.h"
-
-namespace onert
-{
-namespace backend
-{
-
-class IKernelGenerator : public ir::OperationVisitor
-{
-public:
- virtual ~IKernelGenerator() = default;
-
- std::unique_ptr<exec::IFunction> releaseFunction()
- {
- assert(_return_fn);
- return std::move(_return_fn);
- }
-
- std::unique_ptr<exec::FunctionSequence> generate(const ir::OpSequence &op_seq)
- {
- op_seq.accept(*this);
- return std::move(_return_fn_seq);
- }
-
-protected:
- using OperationVisitor::visit;
-
- void visit(const ir::OpSequence &) override
- {
- throw std::runtime_error("KernelGenerator: NYI for operation 'OpSequence'");
- }
-
-#define OP(InternalName) \
- void visit(const ir::operation::InternalName &) override \
- { \
- throw std::runtime_error("KernelGenerator: NYI for operation '" #InternalName "'"); \
- }
-#include "ir/Operations.lst"
-#undef OP
-
-protected:
- std::unique_ptr<exec::IFunction> _return_fn;
- std::unique_ptr<exec::FunctionSequence> _return_fn_seq; // TODO Extract this out
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_IKERNEL_GENERATOR_H__
diff --git a/runtime/onert/core/include/backend/IMemoryManager.h b/runtime/onert/core/include/backend/IMemoryManager.h
deleted file mode 100644
index bad2fd51a..000000000
--- a/runtime/onert/core/include/backend/IMemoryManager.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_IMEMORY_MANAGER_H__
-#define __ONERT_BACKEND_IMEMORY_MANAGER_H__
-
-namespace onert
-{
-namespace backend
-{
-
-struct IMemoryManager
-{
- virtual ~IMemoryManager() = default;
-
- virtual void allocate(void) = 0;
- virtual void deallocate(void) = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#include <unordered_set>
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-
-using MemoryManagerSet = std::unordered_set<std::unique_ptr<backend::IMemoryManager>>;
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_IMEMORY_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/IOptimizer.h b/runtime/onert/core/include/backend/IOptimizer.h
deleted file mode 100644
index 4844d21b9..000000000
--- a/runtime/onert/core/include/backend/IOptimizer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_I_OPTIMIZER_H__
-#define __ONERT_BACKEND_I_OPTIMIZER_H__
-
-namespace onert
-{
-namespace ir
-{
-class LoweredGraph;
-}
-} // namespace onert
-
-namespace onert
-{
-namespace backend
-{
-
-/**
- * @brief Class for backend optimizations. This is an optional class so not all backends must have
- * it.
- *
- */
-struct IOptimizer
-{
- virtual ~IOptimizer() = default;
- /**
- * @brief Run optimization
- *
- */
- virtual void optimize() = 0;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_I_OPTIMIZER_H__
diff --git a/runtime/onert/core/include/backend/IPortableTensor.h b/runtime/onert/core/include/backend/IPortableTensor.h
deleted file mode 100644
index 1b1f05fe1..000000000
--- a/runtime/onert/core/include/backend/IPortableTensor.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_I_PORTABLE_TENSOR_H__
-#define __ONERT_BACKEND_I_PORTABLE_TENSOR_H__
-
-#include "backend/ITensor.h"
-#include "ir/OperandInfo.h"
-#include "ir/Sparsity.h"
-
-namespace onert
-{
-namespace backend
-{
-
-/**
- * @brief A tensor class that is portable for other backends
- *
- * Backends that use derivatives of this interface can reuse each other's tensors without copying.
- * Here's criterion to be a portable tensor:
- * - it must not have any paddings
- * - No special operations on @c access method
- * - e.g. CL memory must map/unmap to use it from CPU, the memory so it cannot be portable
- */
-class IPortableTensor : public ITensor
-{
-public:
- IPortableTensor(const ir::OperandInfo &info) : _info(info) {}
-
- virtual ~IPortableTensor();
- virtual const ir::Sparsity *sparsity() const { return nullptr; }
- const ir::OperandInfo &get_info() const { return _info; }
-
-public:
- bool has_padding() const final { return false; }
- void access(const std::function<void(ITensor &tensor)> &fn) final { fn(*this); }
-
-protected:
- ir::OperandInfo _info;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_I_PORTABLE_TENSOR_H__
diff --git a/runtime/onert/core/include/backend/IStaticTensorManager.h b/runtime/onert/core/include/backend/IStaticTensorManager.h
deleted file mode 100644
index cef1f8a0a..000000000
--- a/runtime/onert/core/include/backend/IStaticTensorManager.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ISTATICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_ISTATICTENSOR_MANAGER_H__
-
-#include "ITensorManager.h"
-
-namespace onert
-{
-namespace backend
-{
-
-struct IStaticTensorManager : public ITensorManager
-{
- virtual ~IStaticTensorManager() = default;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ISTATICTENSOR_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/ITensor.h b/runtime/onert/core/include/backend/ITensor.h
deleted file mode 100644
index b18dd30a2..000000000
--- a/runtime/onert/core/include/backend/ITensor.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_OPERAND_I_TENSOR_H__
-#define __ONERT_BACKEND_OPERAND_I_TENSOR_H__
-
-#include <cstring>
-#include <cstdint>
-#include <functional>
-
-#include "ir/DataType.h"
-#include "ir/Layout.h"
-#include "ir/Shape.h"
-#include "ir/Coordinates.h"
-#include "util/Utils.h"
-
-namespace onert
-{
-namespace backend
-{
-
-struct IDynamicTensorManager;
-
-class ITensor
-{
-public:
- virtual ~ITensor() = default;
-
-public:
- virtual uint8_t *buffer() const = 0;
- virtual size_t total_size() const = 0;
- virtual size_t dimension(size_t index) const = 0;
- virtual size_t num_dimensions() const = 0;
- virtual size_t calcOffset(const ir::Coordinates &coords) const = 0;
- virtual ir::Layout layout() const = 0;
- virtual ir::DataType data_type() const = 0;
- virtual float data_scale() const = 0;
- virtual int32_t data_offset() const = 0;
- virtual bool has_padding() const = 0;
- virtual void access(const std::function<void(ITensor &tensor)> &fn) = 0;
-
- /**
- * @brief Set the shape to @c shape and possibly re-allocate the buffer
- *
- * If a tensor is dynamic tensor and previously allocated memory exists,
- * it will be deallocated.
- * If a tensor is static tensor (with previously allocated memory by StaticTensorManager),
- * @c buffer() will be overwriten
- *
- * @param shape tensor's new shape. While allocating memory for this new_shape,
- * tensor's shape is set to new_shape
- * @return true If applying shape is successful
- * @return false If not applying shape is not supported (it throws for other errors)
- */
- virtual bool applyShape(const ir::Shape &) { return false; }
-
- /**
- * @brief Return true if the tensor is constant
- */
- virtual bool is_constant() const
- {
- throw std::runtime_error("This backend does not support checking constant");
- }
-
- /**
- * @brief Return true if the tensor needs dynamic allocation, meaning that during compile-time
- * the outpus shape cannot be known and the output shape is calculated during
- * kernel execution-time.
- */
- virtual bool is_dynamic() const = 0;
-
- /// @brief set this tensor dynamic
- virtual void set_dynamic()
- {
- throw std::runtime_error("This backend does not support dynamic tensor");
- }
-
- /**
- * @brief Set the shape of tenser to new_shape
- * @note Higer dimension will be placed on front.
- */
- virtual void setShape(const ir::Shape &new_shape)
- {
- UNUSED_RELEASE(new_shape);
- throw std::runtime_error("This backend does not support dynamic setShape");
- }
-
- /**
- * @brief Get ir::Shape of tensor
- * @note Higer dimension will be placed on front.
- */
- virtual ir::Shape getShape() const;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_OPERAND_I_TENSOR_H__
diff --git a/runtime/onert/core/include/backend/ITensorBuilder.h b/runtime/onert/core/include/backend/ITensorBuilder.h
deleted file mode 100644
index 97721cf19..000000000
--- a/runtime/onert/core/include/backend/ITensorBuilder.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ITENSOR_BUILDER_H__
-#define __ONERT_BACKEND_ITENSOR_BUILDER_H__
-
-#include <map>
-
-#include "ir/Index.h"
-#include "ir/OperandInfo.h"
-#include "ir/Operation.h"
-#include "ir/Layout.h"
-#include "ITensor.h"
-#include "ITensorManager.h"
-#include "ITensorRegistry.h"
-#include "IDynamicTensorManager.h"
-
-namespace onert
-{
-namespace backend
-{
-
-struct ITensorBuilder
-{
- using IterateFunction = std::function<void(const ir::OperandIndex &)>;
-
- virtual ~ITensorBuilder(void) = default;
-
- /**
- * @brief Register tensor information to allocate on backend
- *
- * @param ind Index
- * @param info Info
- * @param backend_layout Backend layout
- * @param as_const Whether this tensor is constant
- */
- virtual void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout) = 0;
-
- /**
- * @brief Check if the tensor has been registered with @c registerTensorInfo
- *
- * @return true If the tensor has been registered
- * @return false Otherwise
- */
- virtual bool isRegistered(const ir::OperandIndex &) const = 0;
-
-public: // methods for static tensor allocation
- /**
- * @brief Let the tensor builder know first use(start of lifetime) of a tensor
- * Must be called before calling @c prepare
- * Must be run up to once for each tensor before calling @c notifyLastUse
- * NOTE: Useful only for static models
- */
- virtual void notifyFirstUse(const ir::OperandIndex &) = 0;
- /**
- * @brief Let the tensor builder know last use(end of lifetime) of a tensor
- * Must be run up to once for each tensor after calling @c notifyFirstUse
- * NOTE: Useful only for static models
- */
- virtual void notifyLastUse(const ir::OperandIndex &) = 0;
- /**
- * @brief Prepare the tensors
- * Before calling this, all the tensors must be registered
- */
- virtual void prepare(void) = 0;
- /**
- * @brief Allocate the tensors
- * Before calling this, @c prepare must be called
- */
- virtual void allocate() = 0;
- /**
- * @brief Some actions after functions' @c IFunction::prepare method.
- * This is called right after each function's @c IFunction::prepare function has been
- * called.
- */
- virtual void postFunctionPrepare() = 0;
-
-public: // methods for dynamic tensor allocation
- /**
- * @brief Get dynamicTensorManager. If a backend does not support dynamic tensor, exception
- * will be thrown.
- *
- * @return pointer of IDynamicTensorManager object
- *
- * @note Since it is a pointer, its life time is from the cration of TensorBuilder
- * to the end of execution
- */
- virtual IDynamicTensorManager *dynamicTensorManager(void) { return nullptr; }
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ITENSOR_BUILDER_H__
diff --git a/runtime/onert/core/include/backend/ITensorManager.h b/runtime/onert/core/include/backend/ITensorManager.h
deleted file mode 100644
index 4974b6645..000000000
--- a/runtime/onert/core/include/backend/ITensorManager.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ITENSOR_MANAGER_H__
-#define __ONERT_BACKEND_ITENSOR_MANAGER_H__
-
-namespace onert
-{
-namespace backend
-{
-
-// NOTE This name ITensorManager has been discussed whether or not the name is proper.
-// Anyone can argue with any better name.
-/**
- * @brief Interface as an abstract tensor manager which has MemoryManager
- * This is used as a base class for IStaticTensorManager and IDynamicTensorManager
- */
-struct ITensorManager
-{
- virtual ~ITensorManager() = default;
-};
-
-} // namespace backend
-} // namespace onert
-
-#include <unordered_set>
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-
-using TensorManagerSet = std::unordered_set<std::unique_ptr<backend::ITensorManager>>;
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ITENSOR_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/ITensorRegister.h b/runtime/onert/core/include/backend/ITensorRegister.h
deleted file mode 100644
index b8e521ce3..000000000
--- a/runtime/onert/core/include/backend/ITensorRegister.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ITENSOR_REGISTER_H__
-#define __ONERT_BACKEND_ITENSOR_REGISTER_H__
-
-#include "ir/LowerInfoMap.h"
-#include "ITensorBuilder.h"
-#include "ir/Layout.h"
-#include "ir/OperandIndexSequence.h"
-#include "ir/OperandInfo.h"
-#include "ir/Operands.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace backend
-{
-
-class ITensorRegister : public ir::OperationVisitor
-{
-public:
- virtual ~ITensorRegister() = default;
-
-public:
- void registerTensors(const ir::OpSequence &op_seq, const ir::LowerInfoMap *lower_info_map)
- {
- _current_op_seq_layout = op_seq.getLayout();
- _lower_info_map = lower_info_map;
- assert(_lower_info_map != nullptr);
- assert(tensor_builder().get() != nullptr);
- op_seq.accept(*this);
- }
-
-protected:
- virtual const ir::Operands &operands() const = 0;
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
-
-protected:
-#define OP(InternalName) \
- void visit(const ir::operation::InternalName &node) override \
- { \
- for (const auto &ind : (node.getInputs() | ir::Remove::UNDEFINED) + node.getOutputs()) \
- { \
- defaultRegisterTensorInfo(ind); \
- } \
- }
-#include "ir/Operations.lst"
-#undef OP
-
-protected:
- void defaultRegisterTensorInfo(const ir::OperandIndex &index) const
- {
- if (tensor_builder()->isRegistered(index))
- {
- return;
- }
-
- const auto &obj = operands().at(index);
- const auto frontend_layout = frontendLayout();
- const auto backend_layout = backendLayout(index);
- ir::OperandInfo backend_info{permuteShape(obj.shape(), frontend_layout, backend_layout),
- obj.typeInfo(), obj.info().memAllocType(), obj.isConstant()};
- tensor_builder()->registerTensorInfo(index, backend_info, backend_layout);
- }
-
-protected:
- ir::Layout frontendLayout() const { return _current_op_seq_layout; }
- ir::Layout backendLayout(const ir::OperandIndex &index) const
- {
- assert(_lower_info_map != nullptr);
- const auto lower_info = _lower_info_map->operand.at(index).get();
- return lower_info->def_factors().getOnlyElement().layout();
- }
-
-private:
- ir::Layout _current_op_seq_layout;
- const ir::LowerInfoMap *_lower_info_map{nullptr};
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ITENSOR_REGISTER_H__
diff --git a/runtime/onert/core/include/backend/ITensorRegistry.h b/runtime/onert/core/include/backend/ITensorRegistry.h
deleted file mode 100644
index b256a1fb8..000000000
--- a/runtime/onert/core/include/backend/ITensorRegistry.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_ITENSOR_REGISTRY__
-#define __ONERT_BACKEND_ITENSOR_REGISTRY__
-
-#include <memory>
-
-#include "ir/Index.h"
-#include "backend/ITensor.h"
-#include "backend/IPortableTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-
-struct ITensorRegistry
-{
- /**
- * @brief Deconstruct itself
- */
- virtual ~ITensorRegistry() = default;
-
- /**
- * @brief Returns pointer of ITensor among native and migrant tensors
- *
- * Native Tensor is a tensor that is managed by this backend
- * Migrant Tensor is a tensor that is imported from another backend
- *
- * @note Return tensor cannot be used longer than dynamic tensor manager
- */
- virtual ITensor *getITensor(const ir::OperandIndex &) = 0;
- /**
- * @brief Returns pointer of ITensor among native tensors
- *
- * Unlike @c getITensor , this function only searches from native tensors
- *
- * @note Returned tensor cannot be used longer than dynamic tensor manager
- */
- virtual ITensor *getNativeITensor(const ir::OperandIndex &) = 0;
- /**
- * @brief Set the Migrant Tensor which are from other backends
- *
- * @return true if supported
- * @return false if not supported
- */
- virtual bool setMigrantTensor(const ir::OperandIndex &, IPortableTensor *) { return false; }
-};
-
-} // namespace backend
-} // namespace onert
-
-#include "ir/OperandIndexMap.h"
-
-namespace onert
-{
-namespace backend
-{
-
-/**
- * @brief TensorRegistry template class for the convenience of backend implementations
- *
- * If a backend uses @c IPortableTensor , and there is no special reason to implement @c
- * ITensorRegistry on your own, you may just use this default implementation.
- *
- * @tparam T_Tensor Tensor type. Must be a subclass of @c onert::backend::IPortableTensor .
- */
-template <typename T_Tensor> class PortableTensorRegistryTemplate : public ITensorRegistry
-{
-public:
- ITensor *getITensor(const ir::OperandIndex &ind) override
- {
- static_assert(std::is_base_of<ITensor, T_Tensor>::value, "T_Tensor must derive from ITensor.");
- auto _migrant_tensor = _migrant.find(ind);
- if (_migrant_tensor != _migrant.end())
- return _migrant_tensor->second;
- return getNativeTensor(ind);
- }
-
- ITensor *getNativeITensor(const ir::OperandIndex &ind) override { return getNativeTensor(ind); }
-
- IPortableTensor *getPortableTensor(const ir::OperandIndex &ind)
- {
- auto _migrant_tensor = _migrant.find(ind);
- if (_migrant_tensor != _migrant.end())
- {
- if (_migrant_tensor->second)
- return _migrant_tensor->second;
- }
- return getNativeTensor(ind);
- }
-
- T_Tensor *getNativeTensor(const ir::OperandIndex &ind)
- {
- auto tensor = _native.find(ind);
- if (tensor != _native.end())
- return tensor->second.get();
- return nullptr;
- }
-
- bool setMigrantTensor(const ir::OperandIndex &ind, IPortableTensor *tensor) override
- {
- assert(tensor != nullptr);
- auto itr = _native.find(ind);
- if (itr != _native.end())
- throw std::runtime_error{"Tried to set a migrant tensor but a native tensor already exists."};
- _migrant[ind] = tensor;
- return true;
- }
-
- void setNativeTensor(const ir::OperandIndex &ind, std::unique_ptr<T_Tensor> &&tensor)
- {
- assert(tensor != nullptr);
- auto itr = _migrant.find(ind);
- if (itr != _migrant.end())
- throw std::runtime_error{"Tried to set a native tensor but a migrant tensor already exists."};
- _native[ind] = std::move(tensor);
- }
-
- const ir::OperandIndexMap<std::unique_ptr<T_Tensor>> &native_tensors() { return _native; }
-
- const ir::OperandIndexMap<IPortableTensor *> &migrant_tensors() { return _migrant; }
-
-private:
- ir::OperandIndexMap<IPortableTensor *> _migrant;
- ir::OperandIndexMap<std::unique_ptr<T_Tensor>> _native;
-};
-
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_ITENSOR_REGISTRY__
diff --git a/runtime/onert/core/include/backend/cpu_common/Allocator.h b/runtime/onert/core/include/backend/cpu_common/Allocator.h
deleted file mode 100644
index fa67fc7c4..000000000
--- a/runtime/onert/core/include/backend/cpu_common/Allocator.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file        Allocator.h
- * @brief       This file contains Allocator related classes
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_ALLOCATOR_H__
-#define __ONERT_BACKEND_CPU_COMMON_ALLOCATOR_H__
-
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-/**
- * @brief Class to allocate memory
- */
-class Allocator
-{
-public:
- Allocator(uint32_t capacity);
- /**
- * @brief Get memory base pointer
- * @return base pointer
- */
- uint8_t *base() const { return _base.get(); }
- void release() { _base.reset(); }
-
-private:
- std::unique_ptr<uint8_t[]> _base;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_ALLOCATOR_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/DynamicTensorManager.h b/runtime/onert/core/include/backend/cpu_common/DynamicTensorManager.h
deleted file mode 100644
index c4e06aa82..000000000
--- a/runtime/onert/core/include/backend/cpu_common/DynamicTensorManager.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_DYNAMICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_CPU_COMMON_DYNAMICTENSOR_MANAGER_H__
-
-#include "MemoryManager.h"
-#include "TensorRegistry.h"
-
-#include <backend/IDynamicTensorManager.h>
-#include <ir/OperandInfo.h>
-#include <ir/Operation.h>
-#include <ir/Index.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-// TODO Find optimized algorithm to manage memory.
-
-/**
- * @brief Class to manage dynamic tensor and its memory
- */
-class DynamicTensorManager : public backend::IDynamicTensorManager
-{
-public:
- DynamicTensorManager(const std::shared_ptr<TensorRegistry> &reg);
-
- virtual ~DynamicTensorManager() = default;
-
- void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout);
-
- void planDealloc(ir::OperationIndex op_ind, backend::ITensor *tensor) override;
- void deallocInput(ir::OperationIndex op_ind) override;
-
- std::shared_ptr<DynamicMemoryManager> dynamic_mem_mgr() { return _dynamic_mem_mgr; }
-
-private:
- const ITensor *getRawITensor(ir::OperandIndex ind);
-
-private:
- /**
- * @brief Memory manager for dynamic tensor.
- * @todo DynamicMemoryManager is not optimized. Optimized one is needed
- */
- std::shared_ptr<DynamicMemoryManager> _dynamic_mem_mgr;
- const std::shared_ptr<TensorRegistry> _tensors;
-
- // contains list of dynamic tensor index, which can be deallocated after running operation
- // note: this map could contain static tensor index too. Careful use is required.
- std::unordered_map<ir::OperationIndex, std::unordered_set<backend::ITensor *>>
- _dealloc_tensor_map;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_DYNAMICTENSOR_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/IMemoryPlanner.h b/runtime/onert/core/include/backend/cpu_common/IMemoryPlanner.h
deleted file mode 100644
index 335f8f5c0..000000000
--- a/runtime/onert/core/include/backend/cpu_common/IMemoryPlanner.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_IMEMORY_PLANNER_H__
-#define __ONERT_BACKEND_IMEMORY_PLANNER_H__
-
-#include "ir/OperandIndexMap.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-/**
- * @brief Structure to have memory offset and size
- */
-struct Block
-{
- uint32_t offset;
- size_t size;
-};
-
-/**
- * @brief Interface to plan memory
- */
-struct IMemoryPlanner
-{
- using MemoryPlans = ir::OperandIndexMap<Block>;
-
- /**
- * @brief Claim memory for operand
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- virtual void claim(const ir::OperandIndex &, size_t) = 0;
- /**
- * @brief Release memory for operand
- * @param[in] index The operand index
- */
- virtual void release(const ir::OperandIndex &) = 0;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- virtual uint32_t capacity() = 0;
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- virtual MemoryPlans &memory_plans() = 0;
-
- virtual ~IMemoryPlanner() = default;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_IMEMORY_PLANNER_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/MemoryManager.h b/runtime/onert/core/include/backend/cpu_common/MemoryManager.h
deleted file mode 100644
index 28ec6b803..000000000
--- a/runtime/onert/core/include/backend/cpu_common/MemoryManager.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_MEMORY_MANAGER_H__
-#define __ONERT_BACKEND_CPU_MEMORY_MANAGER_H__
-
-#include "Allocator.h"
-#include "backend/IMemoryManager.h"
-#include "IMemoryPlanner.h"
-
-namespace onert
-{
-namespace backend
-{
-
-class ITensor;
-
-namespace cpu_common
-{
-
-class MemoryManager : public backend::IMemoryManager
-{
-public:
- MemoryManager();
- MemoryManager(const std::string);
- virtual ~MemoryManager() = default;
-
- void allocate(void) override;
- uint8_t *getBuffer(const ir::OperandIndex &ind) const;
- void deallocate(void) override { _mem_alloc->release(); }
-
- void claimPlan(const ir::OperandIndex &ind, uint32_t size);
- void releasePlan(const ir::OperandIndex &ind);
-
-private:
- IMemoryPlanner *createMemoryPlanner();
- IMemoryPlanner *createMemoryPlanner(const std::string);
-
-private:
- ir::OperandIndexMap<Block> _tensor_mem_map;
- std::shared_ptr<IMemoryPlanner> _mem_planner;
- std::shared_ptr<Allocator> _mem_alloc;
-};
-
-class DynamicMemoryManager
-{
-public:
- DynamicMemoryManager() = default;
- virtual ~DynamicMemoryManager() = default;
-
- std::shared_ptr<Allocator> allocate(const ITensor *tensor, uint32_t capacity);
- void deallocate(const ITensor *tensor);
- void deallocate(void);
-
-private:
- std::unordered_map<const ITensor *, std::shared_ptr<Allocator>> _mem_alloc_map;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_MEMORY_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h b/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h
deleted file mode 100644
index fa50b551e..000000000
--- a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_STATICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_CPU_COMMON_STATICTENSOR_MANAGER_H__
-
-#include "MemoryManager.h"
-
-#include "backend/IStaticTensorManager.h"
-#include "ir/OperandIndexMap.h"
-#include "ir/OperandInfo.h"
-#include "TensorRegistry.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-class DynamicTensorManager;
-
-class StaticTensorManager : public backend::IStaticTensorManager
-{
-public:
- StaticTensorManager(const std::shared_ptr<TensorRegistry> &reg,
- DynamicMemoryManager *dynamic_mem_mgr);
- virtual ~StaticTensorManager() = default;
-
- void allocateConsts(void);
- void allocateNonconsts(void);
- void deallocateConsts(void);
- void deallocateNonconsts(void);
-
- void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout, bool as_const);
-
- void claimPlan(const ir::OperandIndex &ind, uint32_t size);
- void releasePlan(const ir::OperandIndex &ind);
-
- void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
-
-private:
- std::unique_ptr<DynamicMemoryManager> _const_mgr;
- std::unique_ptr<MemoryManager> _nonconst_mgr;
- const std::shared_ptr<TensorRegistry> _tensors;
- ir::OperandIndexMap<bool> _as_constants;
- DynamicMemoryManager *_dynamic_mem_mgr;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_STATICTENSOR_MANAGER_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/Tensor.h b/runtime/onert/core/include/backend/cpu_common/Tensor.h
deleted file mode 100644
index 5fa20e15d..000000000
--- a/runtime/onert/core/include/backend/cpu_common/Tensor.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
-#define __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
-
-#include "Allocator.h"
-
-#include <backend/IPortableTensor.h>
-#include <ir/OperandInfo.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-class DynamicMemoryManager;
-
-class Tensor : public IPortableTensor
-{
-public:
- Tensor() = delete;
- virtual ~Tensor();
-
-public:
- Tensor(const ir::OperandInfo &info, const ir::Layout layout,
- DynamicMemoryManager *dynamic_mem_mgr)
- : IPortableTensor(info), _layout(layout), _buffer(nullptr), _num_references(0),
- _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
- {
- // DO NOTHING
- }
-
-public:
- // Only one of two method 'setBuffer' must be called once
-
- /**
- * @brief Set the Buffer object. This method is called for static and non-const tensor
- */
- void setBuffer(uint8_t *buffer)
- {
- assert(_buffer == nullptr);
- _buffer = buffer;
- }
-
- /**
- * @brief Set the Buffer object. This method is called for dynamic or const tensor
- */
- void setBuffer(const std::shared_ptr<Allocator> &alloc)
- {
- assert(_buffer == nullptr);
- _allocator = alloc;
- _buffer = alloc->base();
- }
-
- // This works just as setBuffer but it simply overwrite existing Allocator without nullptr check
- void overwriteBuffer(const std::shared_ptr<Allocator> &alloc)
- {
- _allocator = alloc;
- _buffer = alloc->base();
- }
-
- /**
- * @brief Mark this tensor does not have memory.
- * Real memory deallocation should be done by caller.
- */
- void resetBuffer()
- {
- _allocator.reset();
- _buffer = nullptr;
- }
-
-public:
- uint8_t *buffer() const override { return _buffer; }
- /**
- * @brief Get dimension by index
- *
- * @param index Index to get diemension
- * @return size_t Dimension at index
- * @note N : dimension(0)
- * H : dimension(1)
- * W : dimension(2)
- * C : dimension(3)
- */
- size_t dimension(size_t index) const final override { return _info.shape().dim(index); }
- size_t num_dimensions() const override { return _info.shape().rank(); }
- size_t total_size() const override { return _info.total_size(); }
- size_t calcOffset(const ir::Coordinates &coords) const override;
- ir::Layout layout() const override { return _layout; }
- ir::DataType data_type() const override { return _info.typeInfo().type(); }
- float data_scale() const override { return _info.typeInfo().scale(); }
- int32_t data_offset() const override { return _info.typeInfo().offset(); }
- bool is_constant() const override { return _info.isConstant(); }
- bool is_dynamic() const override { return _info.isDynamic(); }
- void set_dynamic() override { _info.setDynamic(); }
- bool applyShape(const ir::Shape &new_shape) override;
- const ir::Sparsity *sparsity() const override { return _info.typeInfo().sparsity(); }
-
- virtual void increase_ref()
- {
- assert(is_dynamic() ||
- // when not dynamic
- (_buffer != nullptr));
-
- ++_num_references;
- }
-
- virtual void decrease_ref()
- {
- assert(_buffer != nullptr || _allocator != nullptr);
- assert(_num_references > 0);
- --_num_references;
- // constant tensor and dynamic tensor has _allocator
- if (_num_references == 0)
- {
- if (_buffer != nullptr)
- _buffer = nullptr;
- if (_allocator != nullptr)
- {
- _allocator->release();
- _allocator = nullptr;
- }
- }
- }
-
- /**
- * @brief Reset reference count to zero and release data
- */
- virtual void reset_ref()
- {
- assert(_buffer != nullptr || _allocator != nullptr);
- assert(_num_references > 0);
- _num_references = 0;
-
- // Only constant tensor has allocator pointer
- if (_buffer != nullptr)
- _buffer = nullptr;
- else
- {
- _allocator->release();
- _allocator = nullptr;
- }
- }
-
- virtual int32_t num_references() { return _num_references; }
-
- void setShape(const ir::Shape &new_shape) override;
-
-protected:
- ir::Layout _layout;
- uint8_t *_buffer;
- int32_t _num_references;
- DynamicMemoryManager *_dynamic_mem_mgr;
-
-private:
- /**
- * @brief Memory allocator for dynamic tensor and const tensor
- * Since maintaing _allocator and also _buffer makes confusion,
- * we will mainly use _buffer (not _allocator.base()) for memory pointer in this code.
- * _allocator(shared_ptr) is used to guarantee that we have valid _buffer.
- */
- std::shared_ptr<Allocator> _allocator;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
diff --git a/runtime/onert/core/include/backend/cpu_common/TensorRegistry.h b/runtime/onert/core/include/backend/cpu_common/TensorRegistry.h
deleted file mode 100644
index 5896fb7ad..000000000
--- a/runtime/onert/core/include/backend/cpu_common/TensorRegistry.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_TENSOR_REGISTRY__
-#define __ONERT_BACKEND_CPU_COMMON_TENSOR_REGISTRY__
-
-#include "backend/ITensorRegistry.h"
-#include "Tensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-using TensorRegistry = PortableTensorRegistryTemplate<cpu_common::Tensor>;
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_TENSOR_REGISTRY__
diff --git a/runtime/onert/core/include/compiler/BackendManager.h b/runtime/onert/core/include/compiler/BackendManager.h
deleted file mode 100644
index af13d13f7..000000000
--- a/runtime/onert/core/include/compiler/BackendManager.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_BACKEND_MANAGER_H__
-#define __ONERT_COMPILER_BACKEND_MANAGER_H__
-
-#include <memory>
-#include <map>
-
-#include "ir/Operands.h"
-#include "backend/Backend.h"
-#include "backend/controlflow/Backend.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class BackendManager
-{
-public:
- using backend_create_t = backend::Backend *(*)();
- using backend_destroy_t = void (*)(backend::Backend *);
- using dlhandle_destroy_t = void (*)(void *);
-
- static BackendManager &get();
-
-public:
- backend::Backend *get(const std::string &key);
- const backend::Backend *get(const std::string &key) const;
- const backend::controlflow::Backend *getControlflow() const;
- const std::vector<const backend::Backend *> getAll() const
- {
- std::vector<const backend::Backend *> v;
- for (const auto &p : _gen_map)
- v.emplace_back(p.second.get());
- return v;
- }
- size_t num_backends() const { return _gen_map.size(); }
- /**
- * @brief load backend plugin
- *
- * @param backend backend to be loaded
- *
- * @return
- */
- void loadBackend(const std::string &backend);
-
-private:
- BackendManager();
-
-private:
- std::map<std::string, std::unique_ptr<void, dlhandle_destroy_t>> _handle_map;
- std::map<std::string, std::unique_ptr<backend::Backend, backend_destroy_t>> _gen_map;
- backend::controlflow::Backend *_controlflow{nullptr};
- /**
- * @brief load controlflow backend
- *
- * @param backend backend to be loaded
- *
- * @return
- */
- void loadControlflowBackend();
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_BACKEND_MANAGER_H__
diff --git a/runtime/onert/core/include/compiler/BackendResolver.h b/runtime/onert/core/include/compiler/BackendResolver.h
deleted file mode 100644
index a316b4335..000000000
--- a/runtime/onert/core/include/compiler/BackendResolver.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_BACKEND_RESOLVER_H__
-#define __ONERT_COMPILER_BACKEND_RESOLVER_H__
-
-#include <unordered_map>
-#include <typeindex>
-
-#include "backend/Backend.h"
-#include "ir/OperationIndexMap.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class BackendResolver
-{
-public:
- const backend::Backend *getBackend(const ir::OperationIndex &index) const
- {
- return _gen_map.at(index);
- }
-
- void setBackend(const ir::OperationIndex &index, const backend::Backend *backend)
- {
- _gen_map[index] = backend;
- }
-
- void
- iterate(const std::function<void(const ir::OperationIndex &, const backend::Backend &)> &fn) const
- {
- for (const auto &e : _gen_map)
- {
- fn(e.first, *e.second);
- }
- }
-
-private:
- ir::OperationIndexMap<const backend::Backend *> _gen_map;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_BACKEND_RESOLVER_H__
diff --git a/runtime/onert/core/include/compiler/CodeMap.h b/runtime/onert/core/include/compiler/CodeMap.h
deleted file mode 100644
index e13d3334c..000000000
--- a/runtime/onert/core/include/compiler/CodeMap.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_CODE_MAP_H__
-#define __ONERT_COMPILER_CODE_MAP_H__
-
-#include <unordered_map>
-
-namespace onert
-{
-namespace compiler
-{
-
-struct CodeAndInfo
-{
- const ir::OpSequence *op_seq;
- const ir::operation::LowerInfo *lower_info;
- std::unique_ptr<exec::FunctionSequence> fn_seq;
-
- CodeAndInfo(const ir::OpSequence *op_seq, const ir::operation::LowerInfo *lower_info,
- std::unique_ptr<exec::FunctionSequence> &&fn_seq)
- : op_seq{op_seq}, lower_info{lower_info}, fn_seq{std::move(fn_seq)}
- {
- }
-};
-
-using CodeMap = std::unordered_map<ir::OpSequenceIndex, CodeAndInfo>;
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_CODE_MAP_H__
diff --git a/runtime/onert/core/include/compiler/Compiler.h b/runtime/onert/core/include/compiler/Compiler.h
deleted file mode 100644
index 3098be7ba..000000000
--- a/runtime/onert/core/include/compiler/Compiler.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Compiler.h
- * @brief This file contains Compiler class to define and run compilation phase
- */
-
-#ifndef __ONERT_COMPILER_COMPILE_H_
-#define __ONERT_COMPILER_COMPILE_H_
-
-#include "ir/Graph.h"
-#include "exec/IExecutor.h"
-
-namespace onert
-{
-
-namespace compiler
-{
-
-enum class State
-{
- CREATED, // Before compilation
- COMPILED // Success compilation
-};
-
-struct ManualSchedulerOptions
-{
- std::string backend_for_all;
- std::unordered_map<ir::OpCode, std::string> opcode_to_backend;
- std::unordered_map<ir::OperationIndex, std::string> index_to_backend;
-};
-
-struct CompilerOptions
-{
- // GENERAL OPTIONS
- std::vector<std::string> backend_list;
- bool is_primary_subgraph; // TODO Remove this out of this struct as it is not user-given option
-
- // OPTIONS ONLY FOR DEBUGGING/PROFILING
- std::string trace_filepath; //< File path to save trace records
- int graph_dump_level; //< Graph dump level, values between 0 and 2 are valid
- int op_seq_max_node; //< Number of nodes that can be
- std::string executor; //< Executor name to use
- ManualSchedulerOptions manual_scheduler_options; //< Options for ManualScheduler
- bool he_scheduler; //< HEScheduler if true, ManualScheduler otherwise
- bool he_profiling_mode; //< Whether HEScheduler profiling mode ON/OFF
- bool disable_compile; //< Run with Interpreter if true, try compilation otherwise
- bool fp16_enable; //< Whether fp16 mode ON/OFF
-};
-
-CompilerOptions fetchCompilerOptionsFromGlobalConfig(const ir::Subgraphs &subgs);
-
-/**
- * @brief Class to compile graph model
- */
-class Compiler
-{
-public:
- /**
- * @brief Construct a new Compiler object
- * @param[in] subgs All subgraphs of a model
- */
- Compiler(const std::shared_ptr<ir::Subgraphs> &subgs);
-
-public:
- /**
- * @brief Do compilation with the options
- *
- * @return std::shared_ptr<exec::ExecutorMap> Executors as a result of compilation
- */
- std::shared_ptr<exec::ExecutorMap> compile(void);
-
- State state(void) const { return _state; }
-
- /**
- * @brief Check if model can compile
- * @return @c true if model can compile, otherwise @c false
- * @note This method don't check model correctness,\n
- * so model verification should be done before calling this method
- */
- bool checkCompilable();
- CompilerOptions &options() { return _options; }
-
- /**
- * @brief Allow to compute float32 using float16 data type
- */
- void enableToFp16();
-
-private:
- void checkProfilerConditions();
- std::shared_ptr<ir::Graph> &primary_subgraph() { return _subgraphs->at(ir::SubgraphIndex{0}); }
-
-private:
- std::shared_ptr<ir::Subgraphs> _subgraphs;
- // NOTE These executors does not have duplicated subgraph. This mean they do not allow support
- // subgraphs being called recursively because data of non-constant tensor of parent executor will
- // be updated by child executor. If you want to support subgraphs being called recursively, you
- // have to add allocate non-constant tensor memory of executors in execution time when each
- // subgraph is called.
- State _state;
- CompilerOptions _options;
-};
-
-} // namespace compiler
-
-} // namespace onert
-
-#endif // __ONERT_COMPILER_COMPILE_H_
diff --git a/runtime/onert/core/include/compiler/ExecutionBuilder.h b/runtime/onert/core/include/compiler/ExecutionBuilder.h
deleted file mode 100644
index d54d9d046..000000000
--- a/runtime/onert/core/include/compiler/ExecutionBuilder.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_EXECUTION_BUILDER_H__
-#define __ONERT_COMPILER_EXECUTION_BUILDER_H__
-
-#include <memory>
-
-#include "ir/operation/LowerInfo.h"
-#include "ir/OpSequence.h"
-#include "exec/FunctionSequence.h"
-#include "CodeMap.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class ExecutionBuilder
-{
-public:
- void append(const ir::OpSequenceIndex index, CodeAndInfo &&code_and_info)
- {
- _code_map.emplace(index, std::move(code_and_info));
- }
-
- CodeMap releaseCodeMap() { return std::move(_code_map); }
-
-private:
- CodeMap _code_map;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_EXECUTION_BUILDER_H__
diff --git a/runtime/onert/core/include/compiler/LoweredGraph.h b/runtime/onert/core/include/compiler/LoweredGraph.h
deleted file mode 100644
index aadba6857..000000000
--- a/runtime/onert/core/include/compiler/LoweredGraph.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_LOWERED_GRAPH_H__
-#define __ONERT_IR_LOWERED_GRAPH_H__
-
-#include "ir/Graph.h"
-#include "ir/LowerInfoMap.h"
-#include "ir/OpSequences.h"
-#include "compiler/BackendResolver.h"
-#include "compiler/Compiler.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-/**
- * @brief Class that contains lowering information on graph.
- * In addition, after lowering, operands in graph will be set to "dynamic"
- * if the shape of output of an operation cannot be decided at compilation time.
- */
-class LoweredGraph
-{
-public:
- LoweredGraph(const ir::Graph &graph, const compiler::CompilerOptions &options);
-
- ir::Graph &graph() { return _graph; }
- const ir::Graph &graph() const { return _graph; }
- const ir::LowerInfoMap *getLowerInfo() const { return &_lower_info_map; }
- const ir::operation::LowerInfo *getLowerInfo(const ir::OpSequenceIndex &op_seq_index) const;
- void setLowerInfo(const ir::OpSequenceIndex &op_seq_index,
- std::unique_ptr<ir::operation::LowerInfo> &&lower_info);
- void removeLowerInfo(const ir::OpSequenceIndex &op_seq_index);
- const ir::operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index) const;
- ir::operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index);
- void setLowerInfo(const ir::OperandIndex &index,
- std::unique_ptr<ir::operand::LowerInfo> &&lower_info);
- void removeLowerInfo(const ir::OperandIndex &index);
- ir::OpSequences &op_seqs() { return _op_seqs; }
- const ir::OpSequences &op_seqs() const { return _op_seqs; }
- void iterateTopolOpSeqs(
- const std::function<void(const ir::OpSequenceIndex &, const ir::OpSequence &)> &fn) const;
- void
- iterateTopolOpSeqs(const std::function<void(const ir::OpSequenceIndex &, ir::OpSequence &)> &fn);
- const backend::BackendContexts &backend_contexts() { return _backend_contexts; }
- const backend::BackendContexts &backend_contexts() const { return _backend_contexts; }
- std::shared_ptr<ir::OperationIndexMap<int64_t>> indexed_ranks() { return _indexed_ranks; }
-
-private:
- void
- makeOpSequences(ir::OperandIndexMap<std::unique_ptr<ir::operand::LowerInfo>> &operands_lower_info,
- const compiler::CompilerOptions &options,
- const compiler::BackendResolver &backend_resolver);
-
- void manipulateLowerInfo(
- ir::OperandIndexMap<std::unique_ptr<ir::operand::LowerInfo>> &operands_lower_info,
- bool is_primary);
- void dumpLowerInfo();
- bool mergeable(const ir::OpSequenceIndex &op_seq_index, const ir::OperationIndex &node_index,
- ir::Layout layout, const compiler::BackendResolver &backend_resolver);
- ir::OpSequenceIndex appendFreshSingleOpSequence(const ir::OperationIndex &node_index,
- const ir::Operation &node);
-
-private:
- ir::Graph _graph;
- backend::BackendContexts _backend_contexts;
- std::shared_ptr<ir::OperationIndexMap<int64_t>> _indexed_ranks;
- ir::LowerInfoMap _lower_info_map;
- // Pass(for Perm) can accept only graph so that Graph has OpSequences as a member
- ir::OpSequences _op_seqs;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_IR_LOWERED_GRAPH_H__
diff --git a/runtime/onert/core/include/compiler/StaticShapeInference.h b/runtime/onert/core/include/compiler/StaticShapeInference.h
deleted file mode 100644
index 5af11074e..000000000
--- a/runtime/onert/core/include/compiler/StaticShapeInference.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_STATIC_SHAPE_INFERENCE_H__
-#define __ONERT_COMPILER_STATIC_SHAPE_INFERENCE_H__
-
-#include "ir/OperationVisitor.h"
-#include "ir/OpSequence.h"
-#include "compiler/LoweredGraph.h"
-#include "ir/Index.h"
-
-#include <memory>
-#include <unordered_map>
-
-namespace onert
-{
-namespace compiler
-{
-
-/**
- * @brief Class to infer shape before running kernels. It does the following:
- * - re-calculate and set output shape at compile time (before running kernels)
- * - if calculation cannot be done at compile time, mark the outputs to be dynamic, meaning
- * shapes of outputs will be calculated during running kernels
- */
-class StaticShapeInferer : public ir::OperationVisitor
-{
-public:
- StaticShapeInferer(
- const ir::SubgraphIndex &subg_idx,
- const std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>
- &lowered_subgs)
- : _lowered_subgs(lowered_subgs), _operands(lowered_subgs.at(subg_idx)->graph().operands()),
- _operations(lowered_subgs.at(subg_idx)->graph().operations()),
- _return_has_dynamic_tensor(false)
- { /* empty */
- }
- virtual ~StaticShapeInferer() = default;
-
-public:
- /**
- * @brief Infer shape of operands beloning to ops and set the output shape.
- * If output shape cannot be known without running op, mark it so that it can be allocated
- * when running kernel.
- * @param op_seq sequence of operations
- * @return @c true if op_seq's input or output has any dynamic tensor; @c false otherwise.
- */
- bool infer(const ir::OpSequence &op_seq);
-
- void dump();
-
-private:
- bool checkDynamicInput(const ir::Operation &op);
- void setDynamicOutput(const ir::Operation &op);
-
-private:
- // TODO Define visitors for operations. List them in alphabetic order.
- void visit(const ir::operation::ArgMax &op) override;
- void visit(const ir::operation::BatchMatMul &op) override;
- void visit(const ir::operation::BCQFullyConnected &op) override;
- void visit(const ir::operation::BCQGather &op) override;
- void visit(const ir::operation::BinaryArithmetic &op) override;
- void visit(const ir::operation::BroadcastTo &op) override;
- void visit(const ir::operation::Comparison &op) override;
- void visit(const ir::operation::Concat &op) override;
- void visit(const ir::operation::Conv2D &op) override;
- void visit(const ir::operation::ElementwiseActivation &op) override;
- void visit(const ir::operation::ElementwiseBinary &op) override;
- void visit(const ir::operation::ElementwiseUnary &op) override;
- void visit(const ir::operation::ExpandDims &op) override;
- void visit(const ir::operation::Fill &op) override;
- void visit(const ir::operation::FullyConnected &op) override;
- void visit(const ir::operation::FusedBatchNorm &op) override;
- void visit(const ir::operation::Gather &op) override;
- void visit(const ir::operation::If &op) override;
- void visit(const ir::operation::L2Normalization &op) override;
- void visit(const ir::operation::LSTM &op) override;
- void visit(const ir::operation::MatrixBandPart &op) override;
- void visit(const ir::operation::OneHot &op) override;
- void visit(const ir::operation::Pack &op) override;
- void visit(const ir::operation::Pad &op) override;
- void visit(const ir::operation::Permute &op) override;
- void visit(const ir::operation::Pow &op) override;
- void visit(const ir::operation::Range &op) override;
- void visit(const ir::operation::Reduce &op) override;
- void visit(const ir::operation::Reshape &op) override;
- void visit(const ir::operation::ResizeBilinear &op) override;
- void visit(const ir::operation::Reverse &op) override;
- void visit(const ir::operation::Select &op) override;
- void visit(const ir::operation::Shape &op) override;
- void visit(const ir::operation::Slice &op) override;
- void visit(const ir::operation::Softmax &op) override;
- void visit(const ir::operation::SpaceToBatchND &op) override;
- void visit(const ir::operation::Split &op) override;
- void visit(const ir::operation::Squeeze &op) override;
- void visit(const ir::operation::StridedSlice &op) override;
- void visit(const ir::operation::SquaredDifference &op) override;
- void visit(const ir::operation::Tile &op) override;
- void visit(const ir::operation::Transpose &op) override;
- void visit(const ir::operation::Unpack &op) override;
- void visit(const ir::operation::While &op) override;
-
-private:
- /**
- * @brief Performs shape inference for arithmetic operation
- */
- void handleBinaryArithmeticOp(const ir::Operation &op, const ir::OperandIndex lhs_idx,
- const ir::OperandIndex rhs_idx);
-
- /**
- * @brief Performs shape inference for unary op whose output shape is
- * always same with input shape
- */
- void handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx);
-
-private:
- const std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>
- &_lowered_subgs;
- // _operands and _operations can be changed by controlflow operation
- ir::Operands &_operands; // operands of current subgraph
- ir::Operations &_operations; // operations of current subgraph
- bool _return_has_dynamic_tensor;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_STATIC_SHAPE_INFERENCE_H__
diff --git a/runtime/onert/core/include/exec/DynamicShapeInference.h b/runtime/onert/core/include/exec/DynamicShapeInference.h
deleted file mode 100644
index 4a86708d0..000000000
--- a/runtime/onert/core/include/exec/DynamicShapeInference.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_DYNAMIC_SHAPE_INFERENCE_H__
-#define __ONERT_EXEC_DYNAMIC_SHAPE_INFERENCE_H__
-
-#include "ir/Operands.h"
-#include "ir/OperationVisitor.h"
-#include "ir/Index.h"
-#include "backend/IDynamicTensorManager.h"
-#include "backend/ITensorManager.h"
-#include "backend/ITensorRegistry.h"
-
-#include <map>
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Class to infer shape of output tensor at execution time and
- * allocate memory fo output tensor if needed
- */
-class DynamicShapeInferer : public ir::OperationVisitor
-{
-public:
- DynamicShapeInferer(const ir::Operands &operands,
- const std::shared_ptr<backend::ITensorRegistry> &tensor_registry)
- : _operands(operands), _tensor_registry(tensor_registry)
- {
- UNUSED_RELEASE(_operands);
- UNUSED_RELEASE(_tensor_registry);
- }
-
-public:
- // TODO Define visitors for operations. List them in alphabetic order.
- // Remove TODO when any op starting from the alphabet is added
- void visit(const ir::operation::ArgMax &op) override;
- void visit(const ir::operation::BatchMatMul &op) override;
- void visit(const ir::operation::BCQFullyConnected &op) override;
- void visit(const ir::operation::BCQGather &op) override;
- void visit(const ir::operation::BinaryArithmetic &op) override;
- void visit(const ir::operation::BroadcastTo &op) override;
- void visit(const ir::operation::Comparison &op) override;
- void visit(const ir::operation::Concat &op) override;
- void visit(const ir::operation::Conv2D &op) override;
- void visit(const ir::operation::ElementwiseActivation &op) override;
- void visit(const ir::operation::ElementwiseBinary &op) override;
- void visit(const ir::operation::ElementwiseUnary &op) override;
- void visit(const ir::operation::ExpandDims &op) override;
- void visit(const ir::operation::Fill &op) override;
- void visit(const ir::operation::FullyConnected &op) override;
- void visit(const ir::operation::FusedBatchNorm &op) override;
- void visit(const ir::operation::Gather &op) override;
- void visit(const ir::operation::L2Normalization &op) override;
- void visit(const ir::operation::LSTM &op) override;
- void visit(const ir::operation::MatrixBandPart &op) override;
- void visit(const ir::operation::OneHot &op) override;
- void visit(const ir::operation::Pack &op) override;
- void visit(const ir::operation::Pad &op) override;
- void visit(const ir::operation::Permute &op) override;
- void visit(const ir::operation::Pow &op) override;
- // TODO write op starting from Q
- void visit(const ir::operation::Range &op) override;
- void visit(const ir::operation::Reduce &op) override;
- void visit(const ir::operation::Reshape &op) override;
- void visit(const ir::operation::ResizeBilinear &op) override;
- void visit(const ir::operation::Reverse &op) override;
- void visit(const ir::operation::Select &op) override;
- void visit(const ir::operation::Shape &op) override;
- void visit(const ir::operation::Slice &op) override;
- void visit(const ir::operation::Softmax &op) override;
- void visit(const ir::operation::SpaceToBatchND &op) override;
- void visit(const ir::operation::Split &op) override;
- void visit(const ir::operation::Squeeze &op) override;
- void visit(const ir::operation::StridedSlice &op) override;
- void visit(const ir::operation::SquaredDifference &op) override;
- void visit(const ir::operation::Tile &op) override;
- void visit(const ir::operation::Transpose &op) override;
- void visit(const ir::operation::Unpack &op) override;
- // TODO write op starting from V
-
-private:
- /**
- * @brief Performs shape inference and memory allocation for arithmetic operation
- */
- void handleBinaryArithmeticOp(const ir::Operation &op, const ir::OperandIndex lhs_idx,
- const ir::OperandIndex rhs_idx);
- /**
- * @brief Performs shape inference and memory allocation for unary op whose output shape is
- * always same with input shape
- */
- void handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx);
-
-private:
- /**
- * @brief To get operand-level info, e.g., ir::Operand::isConstant()
- */
- const ir::Operands &_operands;
- /**
- * @brief To get tensor object and access tensor-level info, e.g., ITensor::buffer()
- */
- std::shared_ptr<backend::ITensorRegistry> _tensor_registry;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_DYNAMIC_SHAPE_INFERENCE_H__
diff --git a/runtime/onert/core/include/exec/Execution.h b/runtime/onert/core/include/exec/Execution.h
deleted file mode 100644
index d3c5b6dda..000000000
--- a/runtime/onert/core/include/exec/Execution.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Execution.h
- * @brief This file defines execution
- */
-#ifndef __ONERT_EXEC_EXECUTION_H__
-#define __ONERT_EXEC_EXECUTION_H__
-
-#include "ir/Layout.h"
-#include "exec/IExecutor.h"
-#include "IODescription.h"
-
-#include <thread>
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Class to define execution instance to collect input/output information for inference
- * and prepare executor run (TODO)
- */
-class Execution
-{
-
-public:
- /**
- * @brief Construct a new Execution object
- * @param[in] executor Model executor
- */
- Execution(const std::shared_ptr<ExecutorMap> &executors);
-
-public:
- /**
- * @brief Returns primary graph object
- * @return Graph object
- */
- const ir::Graph &primary_subgraph() const { return primary_executor()->graph(); }
-
- /**
- * @brief Change input shape
- * @param[in] index Input index
- * @param[in] new_shape shape to change
- */
- void changeInputShape(const ir::IOIndex &index, const ir::Shape &new_shape);
-
- /**
- * @brief Set input data's information
- * @param[in] index Input index
- * @param[in] buffer Input data's buffer pointer
- * @param[in] length Input data's length
- * @param[in] layout Input data's data format
- */
- void setInput(const ir::IOIndex &index, const void *buffer, size_t length,
- ir::Layout layout = ir::Layout::NHWC);
- /**
- * @brief Set input data's information, especially to specify unknown dimensions on model
- * build time.
- * @param[in] index Input index
- * @param[in] type Input data's type info
- * @param[in] shape Input data's shape
- * @param[in] buffer Input data's buffer pointer
- * @param[in] length Input data's length
- * @param[in] layout Input data's data format
- */
- void setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
- const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC);
- /**
- * @brief Set output data's information
- * @param[in] index Output index
- * @param[in] buffer Output data's buffer pointer
- * @param[in] length Output data's length
- * @param[in] layout Output data's data format
- */
- void setOutput(const ir::IOIndex &index, void *buffer, size_t length,
- ir::Layout layout = ir::Layout::NHWC);
- /**
- * @brief Set output data's information, especially to specify unknown dimensions on model
- * build time.
- * @param[in] index Output index
- * @param[in] type Output data's type info
- * @param[in] shape Output data's shape
- * @param[in] buffer Output data's buffer pointer
- * @param[in] length Output data's length
- * @param[in] layout Output data's data format
- */
- void setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
- void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC);
- /**
- * @brief Set input data's data format
- * @param[in] index Input index
- * @param[in] layout Input data's data format
- */
- void setInputLayout(const ir::IOIndex &index, ir::Layout layout);
- /**
- * @brief Set output data's data format
- * @param[in] index Output index
- * @param[in] layout Output data's data format
- */
- void setOutputLayout(const ir::IOIndex &index, ir::Layout layout);
- /**
- * @brief Execution
- * @note It should be called after setting input and output buffer
- */
- void execute();
-
- /**
- * @brief Start asynchronous execution
- * @note It returns after execution thread is started
- * It should be called after setting input and output buffer
- */
- void startExecute(void);
-
- /**
- * @brief Return when execution is finished
- * @note It waits until execution is finished
- */
- void waitFinish(void);
-
- /**
- * @brief Check execution is finished
- * @return @c true if execution is finished, otherwise @c false
- */
- bool isFinished(void) const;
-
- ir::Shape getInputShape(ir::IOIndex ind) const;
- ir::Shape getOutputShape(ir::IOIndex ind) const;
-
-private:
- const std::unique_ptr<IExecutor> &primary_executor() const
- {
- return _executors->at(ir::SubgraphIndex{0});
- };
- std::unique_ptr<IExecutor> &primary_executor() { return _executors->at(ir::SubgraphIndex{0}); };
-
-private:
- const std::shared_ptr<ExecutorMap> _executors;
- IODescription _io_desc;
- std::unique_ptr<std::thread> _exec_thread;
- bool finished{false};
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_EXECUTION_H__
diff --git a/runtime/onert/core/include/exec/FunctionSequence.h b/runtime/onert/core/include/exec/FunctionSequence.h
deleted file mode 100644
index 49f00dba1..000000000
--- a/runtime/onert/core/include/exec/FunctionSequence.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_FUNCTION_SEQUENCE_H__
-#define __ONERT_EXEC_FUNCTION_SEQUENCE_H__
-
-#include <memory>
-#include <cassert>
-#include <vector>
-#include <functional>
-
-#include "exec/IFunction.h"
-#include "exec/DynamicShapeInference.h"
-#include "ir/Operations.h"
-#include "backend/ITensorRegistry.h"
-#include "backend/IDynamicTensorManager.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class FunctionSequence : public IFunction
-{
-public:
- template <typename... Args> FunctionSequence(Args &&... args) { initialize(std::move(args)...); }
-
-private:
- void initialize()
- {
- // Template base case : do nothing
- }
-
- template <typename T, typename... Args> void initialize(std::unique_ptr<T> &&fn, Args &&... args)
- {
- _functions.emplace_back(std::move(fn));
- initialize(std::move(args)...);
- }
-
-public:
- virtual ~FunctionSequence() = default;
-
- void run() override;
- void prepare() override;
-
- /**
- * @brief Appends an IFunction object to the function sequence
- *
- * @param function IFunction object to be appended
- */
- void append(std::unique_ptr<IFunction> &&function);
-
- void iterate(const std::function<void(IFunction &)> &fn);
-
- template <typename T, typename... Args> void wrap(Args &&... args)
- {
- for (auto &function : _functions)
- {
- function = std::make_unique<T>(std::move(function), args...);
- }
- }
-
-public: // methods related to dynamic tensor
- struct DynamicTensorCtx
- {
- const ir::OpSequence *op_seq = nullptr;
- const ir::Operations *operations = nullptr;
- std::shared_ptr<exec::DynamicShapeInferer> dynamic_shape_inferer = nullptr;
- backend::IDynamicTensorManager *dynamic_tensor_manager = nullptr;
- };
-
- /**
- * @brief Prepare to run FunctionSequence which "might" handle dynamic tensor
- * @note Calling this does not mean that run() will handle dynamic tensor.
- * enableDynamicShapeInferer(true) will make run() will handle dynamic tensor.
- */
- void dynamic_tensor_ctx(std::shared_ptr<DynamicTensorCtx> &dynamic_tensor_ctx)
- {
- _dynamic_tensor_ctx = dynamic_tensor_ctx;
- }
-
- std::shared_ptr<DynamicTensorCtx> &dynamic_tensor_ctx() { return _dynamic_tensor_ctx; }
-
- /**
- * @brief Call this function by passing @c true if this FunctionSequence handles dynamic tensors
- * and should run DynamicShapeInferer. This function can be called multiple times and
- * if @c false is passed during multiple calls, DynamicShapeInfere will not be run.
- * @note This must be called before run(). If not called, run() assumes that all tensors are
- * dynamic and DynamicShapeInferer will be run.
- */
- void enableDynamicShapeInferer(bool enable)
- {
- _enable_dynamic_shape_inferer = _enable_dynamic_shape_inferer || enable;
- }
-
- /**
- * @brief Call this function to initialize vars before running
- * @note When we run a model with static tensor input and then run with dynamic tensor input,
- * _enable_dynamic_shape_inferer is set to @c false at first run.
- * Once _enable_dynamic_shape_inferer is set to @c true it cannot be changed to @c false
- * only with calling enableDynamicShapeInferer(). So initializing it to @c false is
- * necessary.
- * @todo This is a quick fix. Adding this will increase time for run(). Find way to optimize.
- */
- void initRunning() { _enable_dynamic_shape_inferer = false; }
-
-protected:
- std::vector<std::unique_ptr<IFunction>> _functions;
-
-protected:
- bool _enable_dynamic_shape_inferer = false;
-
- std::shared_ptr<DynamicTensorCtx> _dynamic_tensor_ctx = nullptr;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FUNCTION_SEQUENCE_H__
diff --git a/runtime/onert/core/include/exec/IExecutor.h b/runtime/onert/core/include/exec/IExecutor.h
deleted file mode 100644
index 1d2831dd0..000000000
--- a/runtime/onert/core/include/exec/IExecutor.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file IExecutor.h
- * @brief This file defines interface of Executor
- */
-#ifndef __ONERT_EXEC_I_EXECUTOR_H_
-#define __ONERT_EXEC_I_EXECUTOR_H_
-
-#include "ir/Graph.h"
-#include "IFunction.h"
-#include "IODescription.h"
-#include "ir/OperationIndexMap.h"
-#include "backend/IDynamicTensorManager.h"
-
-namespace onert
-{
-namespace exec
-{
-class IExecutionObserver;
-/**
- * @brief Struct to define interface of Executor
- */
-struct IExecutor
-{
- /**
- * @brief Construct a new IExecutor object
- */
- IExecutor() = default;
- /**
- * @brief Destroy the IExecutor object
- */
- virtual ~IExecutor() = default;
-
- /**
- * @brief Returns graph object
- *
- * @return Graph object
- */
- virtual const ir::Graph &graph() = 0;
-
- /**
- * @brief Set an ordering on operations
- * @param[in] ranks The table encoding the ordering
- */
- virtual void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>>) = 0;
-
- /**
- * @brief Start execution
- * @param[in] desc Input and output description
- * @note This method should be thread-safe
- */
- virtual void execute(const IODescription &desc) = 0;
-};
-
-using ExecutorMap = std::unordered_map<ir::SubgraphIndex, std::unique_ptr<IExecutor>>;
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_I_EXECUTOR_H_
diff --git a/runtime/onert/core/include/exec/IFunction.h b/runtime/onert/core/include/exec/IFunction.h
deleted file mode 100644
index 18ba2457a..000000000
--- a/runtime/onert/core/include/exec/IFunction.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_I_FUNCTION_H__
-#define __ONERT_EXEC_I_FUNCTION_H__
-
-namespace onert
-{
-namespace exec
-{
-
-class IFunction
-{
-public:
- virtual ~IFunction() = default;
- virtual void run() = 0;
- virtual void prepare() {}
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_I_FUNCTION_H__
diff --git a/runtime/onert/core/include/exec/IODescription.h b/runtime/onert/core/include/exec/IODescription.h
deleted file mode 100644
index d1810ec3b..000000000
--- a/runtime/onert/core/include/exec/IODescription.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_IO_DESCRIPTION_H__
-#define __ONERT_EXEC_IO_DESCRIPTION_H__
-
-#include <vector>
-#include <unordered_map>
-
-#include "ir/OperandInfo.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace exec
-{
-
-struct InputDesc
-{
- const ir::OperandInfo info;
- const void *buffer;
- const size_t size;
- const ir::Layout layout;
-
- InputDesc(void) = delete;
- InputDesc(const ir::OperandInfo &info, const void *buffer, const size_t size, ir::Layout layout)
- : info(info), buffer(buffer), size(size), layout(layout)
- {
- }
-};
-
-struct OutputDesc
-{
- // not `const` because shape should be modified after execution in case when output is
- // a dynamic tensor
- ir::OperandInfo info;
- void *buffer;
- const size_t size;
- const ir::Layout layout;
-
- OutputDesc(void) = delete;
- OutputDesc(const ir::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout)
- : info(info), buffer(buffer), size(size), layout(layout)
- {
- }
-};
-
-struct IODescription
-{
- std::vector<std::unique_ptr<InputDesc>> inputs;
- std::vector<std::unique_ptr<OutputDesc>> outputs;
- // Contains shape of input set by nnfw_set_input_tensorinfo(..)
- std::unordered_map<ir::IOIndex, ir::Shape> dynamic_input_shapes;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_IO_DESCRIPTION_H__
diff --git a/runtime/onert/core/include/exec/NopFunction.h b/runtime/onert/core/include/exec/NopFunction.h
deleted file mode 100644
index d0ed55921..000000000
--- a/runtime/onert/core/include/exec/NopFunction.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file NopFunction.h
- * @brief This file defines NopFunction
- */
-#ifndef __ONERT_EXEC_NOP_FUNCTION_H_
-#define __ONERT_EXEC_NOP_FUNCTION_H_
-
-#include "IFunction.h"
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief A derivative of IFunction tha does nothing
- *
- */
-class NopFunction : public IFunction
-{
-public:
- NopFunction() = default;
- void run() override
- {
- // DO NOTHING
- }
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_NOP_FUNCTION_H_
diff --git a/runtime/onert/core/include/ir/Coordinates.h b/runtime/onert/core/include/ir/Coordinates.h
deleted file mode 100644
index 3849a5509..000000000
--- a/runtime/onert/core/include/ir/Coordinates.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_COORDINATES_H__
-#define __ONERT_IR_COORDINATES_H__
-
-#include <cassert>
-#include <stdint.h>
-#include <vector>
-
-#include "Layout.h"
-
-namespace onert
-{
-namespace ir
-{
-
-/**
- * @brief Class to represent position(offset) of tensor.\n
- * Assume that the front is higher dimensional.
- * i.g. N: 0, C: 1, H: 2, W: 3 for NCHW layout
- */
-class Coordinates final
-{
-public:
- static constexpr size_t num_max_dimensions = 4;
-
-public:
- /**
- * @brief Construct a new Coordinates object with zero dimension
- * @return N/A
- */
- Coordinates() = default;
- /**
- * @brief Construct a new Coordinates object
- * @param[in] init The initialzer_list with coordinates
- * @return
- */
- Coordinates(std::initializer_list<int32_t> init) : _coordinates{init}
- {
- assert(init.size() <= num_max_dimensions);
- }
- /**
- * @brief Construct a new Coordinates object
- * @param[in] init The initialzer_list with coordinates
- * @return
- */
- Coordinates(std::initializer_list<uint32_t> init) : _coordinates{init.begin(), init.end()}
- {
- assert(init.size() <= num_max_dimensions);
- }
-
-public:
- /**
- * @brief Set the coordinate of one of the coordinates.
- *
- * @param[in] dimension Dimension for which the coordinate is set.
- * @param[in] Coordinate Coordinate to be set for the dimension.
- */
- void set(size_t dimension, int32_t coordinate)
- {
- assert(dimension < num_max_dimensions);
- if (dimension >= _coordinates.size())
- {
- _coordinates.resize(dimension + 1, 0);
- }
- _coordinates[dimension] = coordinate;
- }
-
-public:
- /**
- * @brief Return size of coordinates
- *
- * @return size of coordinates
- */
- size_t size() const { return _coordinates.size(); }
-
-public:
- int32_t operator[](size_t dimension) const
- {
- assert(dimension < _coordinates.size());
- return _coordinates[dimension];
- }
-
-public:
- /**
- * @brief begin() of const_iterator for this class
- *
- * @return The first iterator of the coordinates
- */
- std::vector<int32_t>::const_iterator begin() const { return _coordinates.begin(); }
- /**
- * @brief end() of const_iterator for this class
- *
- * @return The last iterator of the coordinates
- */
- std::vector<int32_t>::const_iterator end() const { return _coordinates.end(); }
-
-private:
- std::vector<int32_t> _coordinates;
-};
-
-Coordinates convertCoordinates(const Coordinates &from_coordinates, Layout from_layout,
- Layout to_layout);
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_COORDINATES_H__
diff --git a/runtime/onert/core/include/ir/Data.h b/runtime/onert/core/include/ir/Data.h
deleted file mode 100644
index d31191b4f..000000000
--- a/runtime/onert/core/include/ir/Data.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_DATA_H__
-#define __ONERT_IR_DATA_H__
-
-#include <algorithm>
-#include <sys/mman.h>
-
-namespace onert
-{
-namespace ir
-{
-
-struct Data
-{
- virtual ~Data() = default;
-
- virtual size_t size(void) const = 0;
- virtual const uint8_t *base(void) const = 0;
-};
-
-class CachedData final : public Data
-{
-public:
- CachedData(const uint8_t *base, size_t size) : _base{new uint8_t[size]}, _size{size}
- {
- std::copy(base, base + size, _base);
- }
-
-public:
- ~CachedData() { delete[] _base; }
-
-public:
- size_t size(void) const override { return _size; }
- const uint8_t *base(void) const override { return _base; }
-
-private:
- uint8_t *_base;
- size_t _size;
-};
-
-class ExternalData : public Data
-{
-public:
- ExternalData(const uint8_t *base, size_t size) : _base{base}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- size_t size(void) const override { return _size; }
- const uint8_t *base(void) const override { return _base; }
-
-private:
- const uint8_t *_base;
- const size_t _size;
-};
-
-class MMapedData final : public ExternalData
-{
-public:
- MMapedData(int fd, const std::ptrdiff_t mmap_offset, const size_t mmap_size,
- const std::ptrdiff_t data_offset, const size_t data_size)
- : ExternalData(nullptr, data_size),
- _mmap_base(
- static_cast<uint8_t *>(mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, mmap_offset))),
- _mmap_size(mmap_size), _offset(data_offset - mmap_offset)
- {
- // DO NOTHING
- }
-
-public:
- ~MMapedData()
- {
- if (_mmap_size > 0)
- {
- munmap(const_cast<uint8_t *>(_mmap_base), _mmap_size);
- }
- }
-
-public:
- const uint8_t *base(void) const override { return _mmap_base + _offset; }
-
-private:
- const uint8_t *_mmap_base;
- size_t _mmap_size;
- std::ptrdiff_t _offset;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_DATA_H__
diff --git a/runtime/onert/core/include/ir/DataType.h b/runtime/onert/core/include/ir/DataType.h
deleted file mode 100644
index fe10b9283..000000000
--- a/runtime/onert/core/include/ir/DataType.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_DATATYPE_H__
-#define __ONERT_IR_DATATYPE_H__
-
-#include <cstdlib>
-
-namespace onert
-{
-namespace ir
-{
-
-enum class DataType
-{
- FLOAT32 = 0,
- INT32 = 1,
- UINT32 = 2,
- QUANT_UINT8_ASYMM = 3,
- BOOL8 = 4,
- UINT8 = 5,
- QUANT_INT8_SYMM = 6,
- FLOAT16 = 7,
- INT64 = 8,
-};
-
-size_t sizeOfDataType(DataType data_type);
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_DATATYPE_H__
diff --git a/runtime/onert/core/include/ir/Graph.h b/runtime/onert/core/include/ir/Graph.h
deleted file mode 100644
index 2103e6e64..000000000
--- a/runtime/onert/core/include/ir/Graph.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_GRAPH_H__
-#define __ONERT_IR_GRAPH_H__
-
-#include <functional>
-#include <unordered_map>
-
-#include "ir/Operands.h"
-#include "ir/Operations.h"
-#include "ir/OpSequence.h"
-#include "ir/OpSequences.h"
-#include "ir/Subgraphs.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace custom
-{
-class IKernelBuilder;
-} // namespace custom
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-
-class Graph
-{
-private:
- enum class Phase
- {
- BUILDING,
- MODEL
- };
-
-public:
- Graph(void);
- ~Graph(void);
-
- // Graph Building
-public:
- OperandIndex addOperand(const Shape &shape, const TypeInfo &type);
- OperationIndex addOperation(std::unique_ptr<Operation> &&node);
- void setOperandValue(const OperandIndex &ind, std::shared_ptr<Data> data);
- void addInput(const OperandIndex &ind, const std::string &name = "");
- void addOutput(const OperandIndex &ind, const std::string &name = "");
- void finishBuilding(void);
- void removeOperand(const OperandIndex &ind) { _operands.remove(ind); }
- bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; }
- void setLayout(Layout layout) { _layout = layout; }
- void setSubgraphs(const std::shared_ptr<Subgraphs> &subgs) { _subgraphs = subgs; }
-
-private:
- void initializeUseDef();
- void sweepGarbageOperands();
-
- // Custom operations support
-public:
- void
- bindKernelBuilder(const std::shared_ptr<onert::backend::custom::IKernelBuilder> &kernel_builder)
- {
- _kernel_builder = kernel_builder;
- }
-
- const std::shared_ptr<backend::custom::IKernelBuilder> &getKernelBuilder() const
- {
- return _kernel_builder;
- }
-
-private:
- std::shared_ptr<backend::custom::IKernelBuilder> _kernel_builder;
-
- // Accessors
-public:
- const OperandIndexSequence &getInputs() const { return _inputs; }
- OperandIndexSequence &getInputs() { return _inputs; }
- const OperandIndexSequence &getOutputs() const { return _outputs; }
- OperandIndexSequence &getOutputs() { return _outputs; }
- IOIndex getInputIndex(const std::string &name) const;
- IOIndex getOutputIndex(const std::string &name) const;
- const Operands &operands() const { return _operands; }
- Operands &operands() { return _operands; } // TODO Remove this non-const accessor
- const Operations &operations() const { return _operations; }
- Operations &operations() { return _operations; }
- const std::shared_ptr<Subgraphs> &subgraphs() const { return _subgraphs; }
- std::shared_ptr<Subgraphs> &subgraphs() { return _subgraphs; }
- Layout layout() const { return _layout; }
-
-private:
- Phase _phase{Phase::BUILDING};
- Operations _operations;
- Operands _operands;
- OperandIndexSequence _inputs;
- OperandIndexSequence _outputs;
- std::unordered_map<std::string, IOIndex> _name_to_input;
- std::unordered_map<std::string, IOIndex> _name_to_output;
- // Child subgraphs
- std::shared_ptr<Subgraphs> _subgraphs;
- // TFLite and circle's default layout is NHWC;
- Layout _layout{Layout::NHWC};
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_GRAPH_H__
diff --git a/runtime/onert/core/include/ir/Index.h b/runtime/onert/core/include/ir/Index.h
deleted file mode 100644
index 2538301a4..000000000
--- a/runtime/onert/core/include/ir/Index.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_INDEX_H__
-#define __ONERT_IR_INDEX_H__
-
-#include "util/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-struct OperationIndexTag;
-using OperationIndex = ::onert::util::Index<uint32_t, OperationIndexTag>;
-
-struct OperandIndexTag;
-using OperandIndex = ::onert::util::Index<uint32_t, OperandIndexTag>;
-
-struct IOIndexTag;
-using IOIndex = ::onert::util::Index<uint32_t, IOIndexTag>;
-
-struct OpSequenceIndexTag;
-using OpSequenceIndex = ::onert::util::Index<uint32_t, OpSequenceIndexTag>;
-
-struct SubgraphIndexTag;
-using SubgraphIndex = ::onert::util::Index<uint32_t, SubgraphIndexTag>;
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_INDEX_H__
diff --git a/runtime/onert/core/include/ir/InternalType.h b/runtime/onert/core/include/ir/InternalType.h
deleted file mode 100644
index 1d962c185..000000000
--- a/runtime/onert/core/include/ir/InternalType.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_INTERNAL_TYPE_H__
-#define __ONERT_IR_INTERNAL_TYPE_H__
-
-#include <cstdint>
-
-namespace onert
-{
-namespace ir
-{
-
-enum class Activation
-{
- NONE = 0,
- RELU = 1,
- RELU1 = 2,
- RELU6 = 3,
- TANH = 4,
- SIGMOID = 5
-};
-
-struct Stride
-{
- uint32_t vertical;
- uint32_t horizontal;
-};
-
-struct Dilation
-{
- uint32_t width_factor;
- uint32_t height_factor;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_INTERNAL_TYPE_H__
diff --git a/runtime/onert/core/include/ir/Layout.h b/runtime/onert/core/include/ir/Layout.h
deleted file mode 100644
index 082810172..000000000
--- a/runtime/onert/core/include/ir/Layout.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_LAYOUT_H__
-#define __ONERT_IR_LAYOUT_H__
-
-#include <functional>
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-
-enum class Layout
-{
- UNKNOWN = 0,
- NHWC,
- NCHW
-};
-
-inline std::string to_string(Layout layout)
-{
- switch (layout)
- {
- case Layout::NHWC:
- return std::string{"NHWC"};
- case Layout::NCHW:
- return std::string{"NCHW"};
- case Layout::UNKNOWN:
- return std::string{"UNKNOWN"};
- default:
- throw std::runtime_error("WRONG LAYOUT");
- }
-}
-
-} // namespace ir
-} // namespace onert
-
-namespace std
-{
-
-template <> struct hash<onert::ir::Layout>
-{
- size_t operator()(onert::ir::Layout value) const noexcept
- {
- using type = typename std::underlying_type<onert::ir::Layout>::type;
- return hash<type>()(static_cast<type>(value));
- }
-};
-
-} // namespace std
-
-#endif // __ONERT_IR_LAYOUT_H__
diff --git a/runtime/onert/core/include/ir/LowerInfoMap.h b/runtime/onert/core/include/ir/LowerInfoMap.h
deleted file mode 100644
index fbabaf39d..000000000
--- a/runtime/onert/core/include/ir/LowerInfoMap.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_LOWER_INFO_MAP_H__
-#define __ONERT_IR_LOWER_INFO_MAP_H__
-
-#include <memory>
-#include <unordered_map>
-
-#include "ir/operand/LowerInfo.h"
-#include "ir/operation/LowerInfo.h"
-#include "ir/OperandIndexMap.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-struct LowerInfoMap
-{
- std::unordered_map<OpSequenceIndex, std::unique_ptr<operation::LowerInfo>> op_seq;
- OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operand;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_LOWER_INFO_MAP_H__
diff --git a/runtime/onert/core/include/ir/OpCode.h b/runtime/onert/core/include/ir/OpCode.h
deleted file mode 100644
index 32e47796e..000000000
--- a/runtime/onert/core/include/ir/OpCode.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OP_CODE_H__
-#define __ONERT_IR_OP_CODE_H__
-
-#include <functional>
-#include <stdint.h>
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-
-enum class OpCode
-{
- Invalid, //< Unused
-#define OP(Name) Name, //< All operations
-#include "ir/Operations.lst"
-#undef OP
- COUNT
-};
-
-const char *toString(OpCode opcode);
-OpCode toOpCode(const std::string str);
-
-} // namespace ir
-} // namespace onert
-
-namespace std
-{
-
-template <> struct hash<onert::ir::OpCode>
-{
- size_t operator()(onert::ir::OpCode value) const noexcept
- {
- using type = typename std::underlying_type<onert::ir::OpCode>::type;
- return hash<type>()(static_cast<type>(value));
- }
-};
-
-} // namespace std
-
-#endif // __ONERT_IR_OP_CODE_H__
diff --git a/runtime/onert/core/include/ir/OpSequence.h b/runtime/onert/core/include/ir/OpSequence.h
deleted file mode 100644
index 754cf3b34..000000000
--- a/runtime/onert/core/include/ir/OpSequence.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OP_SEQUENCE_H__
-#define __ONERT_IR_OP_SEQUENCE_H__
-
-#include <vector>
-#include <string>
-#include <memory>
-
-#include "ir/Layout.h"
-#include "ir/Index.h"
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class Operations;
-
-class OpSequence
-{
-public:
- explicit OpSequence(Layout layout);
- OpSequence(const OpSequence &) = delete;
-
-public:
- void accept(OperationVisitor &v) const;
-
-public:
- const OperandIndexSequence &getInputs() const { return _inputs; }
- const OperandIndexSequence &getOutputs() const { return _outputs; }
- void setInputs(const OperandIndexSequence &indexes) { _inputs = indexes; }
- void setOutputs(const OperandIndexSequence &indexes) { _outputs = indexes; }
- void replaceInputs(const OperandIndex &from, const OperandIndex &to)
- {
- _inputs.replace(from, to);
- }
- void replaceOutputs(const OperandIndex &from, const OperandIndex &to)
- {
- _outputs.replace(from, to);
- }
-
- void appendOperation(const OperationIndex &index) { _operations.emplace_back(index); }
-
- std::vector<OperationIndex> &operations(void) { return _operations; }
-
- const std::vector<OperationIndex> &operations(void) const { return _operations; }
-
- uint32_t size(void) const { return _operations.size(); }
-
-public:
- void remove(const OperationIndex &index);
-
- bool exist(const OperationIndex &index) const;
-
-public:
- Layout getLayout() const { return _layout; }
-
-public:
- std::vector<OperationIndex>::const_iterator begin() const { return _operations.begin(); }
- std::vector<OperationIndex>::const_iterator end() const { return _operations.end(); }
-
-public:
- /**
- * @brief Set @c true if any operation in this opSequence has dynamic input
- * or dynamic output;
- * @c false if all operations' inputs and outputs are static tensors
- */
- void has_dynamic_tensor(bool has_dynamic_tensor) { _has_dynamic_tensor = has_dynamic_tensor; }
- bool has_dynamic_tensor() const { return _has_dynamic_tensor; }
-
-private:
- OperandIndexSequence _inputs;
- OperandIndexSequence _outputs;
- std::vector<OperationIndex> _operations;
-
-private:
- Layout _layout;
- bool _has_dynamic_tensor;
-};
-
-std::string getStrFromOpSeq(const OpSequence &op_seq, const Operations &operations);
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OP_SEQUENCE_H__
diff --git a/runtime/onert/core/include/ir/OpSequences.h b/runtime/onert/core/include/ir/OpSequences.h
deleted file mode 100644
index ab258f395..000000000
--- a/runtime/onert/core/include/ir/OpSequences.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OP_SEQUENCES_H__
-#define __ONERT_IR_OP_SEQUENCES_H__
-
-#include "ir/Index.h"
-#include "ir/OpSequence.h"
-#include "util/ObjectManager.h"
-
-namespace onert
-{
-namespace ir
-{
-
-/**
- * @brief Class that manages OpSequence objects
- */
-class OpSequences : public util::ObjectManager<OpSequenceIndex, OpSequence>
-{
-public:
- /**
- * @brief Create an instance of OpSequence with given op and push it to objects
- *
- * @param[in] op_idx Operation index that is emplaced
- * @param[in] layout OpSequence's layout
- * @return OpSequenceIndex
- */
- OpSequenceIndex emplace(const OperationIndex &op_index, Layout layout);
-
- /**
- * @brief Push an instance of OpSequence to objects
- *
- * @param[in] op_seq An instance of OpSequence
- * @return OpSequenceIndex
- */
- OpSequenceIndex emplace(std::unique_ptr<OpSequence> &&op_seq);
- /**
- * @brief Check if an operation does exist in any OpSequences
- *
- * @param operation_index Operation index to find
- * @return true If such operation exists in any OpSequences otherwise false
- */
- bool containsOperation(const OperationIndex &operation_index) const;
- /**
- * @brief Find an operation from all OpSequences
- *
- * @param operation_index Operation index to find
- * @return OpSequenceIndex Index of OpSequence that contains given operation index
- */
- OpSequenceIndex getOperation(const OperationIndex &operation_index) const;
- /**
- * @brief Remove an operation from OpSequence
- *
- * @param operation_index Operation index to be removed
- */
- void removeFromOpSequence(const OperationIndex &operation_index);
-
-private:
- void cacheSequenceIndex(const OpSequenceIndex &seq_index, const OperationIndex &op_index) const;
- OpSequenceIndex *findSequenceIndex(const OperationIndex &operation_index) const;
-
- OpSequenceIndex findOperation(const OperationIndex &operation_index) const;
- mutable std::unordered_map<OperationIndex, OpSequenceIndex> _seq_indexes;
-};
-
-/**
- * @brief Dump OpSequences
- *
- * @param op_seqs Operation Sequences
- * @param operations Operation context
- */
-void dumpOpSequences(const OpSequences &op_seqs, const Operations &operations);
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OP_SEQUENCES_H__
diff --git a/runtime/onert/core/include/ir/Operand.h b/runtime/onert/core/include/ir/Operand.h
deleted file mode 100644
index f149a744b..000000000
--- a/runtime/onert/core/include/ir/Operand.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERAND_H__
-#define __ONERT_IR_OPERAND_H__
-
-#include <cassert>
-#include <cstdint>
-#include <memory>
-#include <algorithm>
-
-#include "ir/Data.h"
-#include "ir/DataType.h"
-#include "ir/OperandInfo.h"
-#include "ir/OperationIndexSet.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class Operand
-{
-public:
- explicit Operand(const Shape &shape, const TypeInfo &type)
- : _info{shape, type, MemAllocType::STATIC}
- {
- // DO NOTHING
- }
- explicit Operand(const Operand &) = default;
-
-public:
- const Shape &shape(void) const { return _info.shape(); }
- const TypeInfo &typeInfo(void) const { return _info.typeInfo(); }
- const OperandInfo &info(void) const { return _info; }
- OperandInfo &info(void) { return _info; }
- size_t operandSize(void) const;
-
- const OperationIndexSet &getUses() const { return _uses; }
- OperationIndex getDef() const { return _def; }
- void insertUse(const OperationIndex &idx);
- void removeUse(const OperationIndex &idx);
- void setDef(const OperationIndex &idx);
- void unsetDef();
-
-public:
- void type(const DataType type) { _info.type(type); };
-
-public:
- void data(std::shared_ptr<Data> &&data)
- {
- _data = std::move(data);
- _info.setAsConstant();
- }
- const Data *data(void) const { return _data.get(); }
-
- void releaseData(void) { _data.reset(); }
-
- std::shared_ptr<Data> shareData(void) const { return _data; }
-
- /**
- * @brief Get true if Operand is const, otherwise @c false
- a @return @c true if Operand is const, otherwise @c false
- */
- bool isConstant(void) const { return _info.isConstant(); }
-
-public:
- template <typename T, typename... Args> void data(Args &&... args)
- {
- data(std::make_unique<T>(std::forward<Args>(args)...));
- }
-
-public:
- template <typename T> T asScalar(void) const
- {
- assert((shape().rank() == 0) || ((shape().rank() == 1) && (shape().dim(0) == 1)));
- assert(_data != nullptr);
- assert((_data->base() != nullptr) && (_data->size() == sizeof(T)));
-
- return *(reinterpret_cast<const T *>(_data->base()));
- }
-
- template <typename T> std::vector<T> asVector() const
- {
- assert(_data != nullptr);
- assert(_data->size() % sizeof(T) == 0);
-
- const auto *base = reinterpret_cast<const T *>(_data->base());
- const std::size_t size = _data->size() / sizeof(T);
- return std::vector<T>(base, base + size);
- }
-
-private:
- OperandInfo _info;
- std::shared_ptr<Data> _data;
-
- OperationIndexSet _uses;
- OperationIndex _def;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERAND_H__
diff --git a/runtime/onert/core/include/ir/OperandConstraint.h b/runtime/onert/core/include/ir/OperandConstraint.h
deleted file mode 100644
index 8da922bea..000000000
--- a/runtime/onert/core/include/ir/OperandConstraint.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_MODEL_OPERAND_CONSTRAINT_H__
-#define __ONERT_MODEL_OPERAND_CONSTRAINT_H__
-
-#include <stdint.h>
-#include <limits>
-#include <set>
-
-namespace onert
-{
-namespace ir
-{
-
-class OperandConstraint
-{
-private:
- static const uint32_t INF = std::numeric_limits<uint32_t>::max();
-
-public:
- static OperandConstraint createAny() { return OperandConstraint{0u, INF}; }
- static OperandConstraint createExact(uint32_t exact) { return OperandConstraint{exact, exact}; }
- static OperandConstraint createAtMost(uint32_t end) { return OperandConstraint{0u, end}; }
- static OperandConstraint createAtLeast(uint32_t begin) { return OperandConstraint{begin, INF}; }
- static OperandConstraint createInRange(uint32_t begin, uint32_t end)
- {
- return OperandConstraint{begin, end};
- }
-
-private:
- OperandConstraint(uint32_t begin, uint32_t end) : _begin{begin}, _end{end} {}
-
-public:
- bool check(uint32_t ind) const { return _begin <= ind && ind <= _end; }
-
-private:
- uint32_t _begin;
- uint32_t _end;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_MODEL_OPERAND_CONSTRAINT_H__
diff --git a/runtime/onert/core/include/ir/OperandIndexMap.h b/runtime/onert/core/include/ir/OperandIndexMap.h
deleted file mode 100644
index 468162ffb..000000000
--- a/runtime/onert/core/include/ir/OperandIndexMap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERAND_INDEX_MAP_H__
-#define __ONERT_IR_OPERAND_INDEX_MAP_H__
-
-#include <unordered_map>
-
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-template <typename T> using OperandIndexMap = std::unordered_map<OperandIndex, T>;
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERAND_INDEX_MAP_H__
diff --git a/runtime/onert/core/include/ir/OperandIndexSequence.h b/runtime/onert/core/include/ir/OperandIndexSequence.h
deleted file mode 100644
index 2f78cc832..000000000
--- a/runtime/onert/core/include/ir/OperandIndexSequence.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_MODEL_OPERAND_INDEX_SEQUENCE_H__
-#define __ONERT_MODEL_OPERAND_INDEX_SEQUENCE_H__
-
-#include <initializer_list>
-#include <vector>
-
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-enum class Remove
-{
- DUPLICATED = 1,
- UNDEFINED = 2
-};
-
-class OperandIndexSequence
-{
-public:
- OperandIndexSequence(void) = default;
- OperandIndexSequence(std::initializer_list<OperandIndex> list);
- OperandIndexSequence(std::initializer_list<int32_t> list);
- OperandIndexSequence(std::initializer_list<uint32_t> list);
-
-public:
- void append(const OperandIndex &index) { _vec.emplace_back(index); }
- void append(const OperandIndexSequence &l) { _vec.insert(_vec.end(), l.begin(), l.end()); }
-
-public:
- uint32_t size() const { return static_cast<uint32_t>(_vec.size()); }
- const OperandIndex &at(IOIndex set_index) const { return _vec.at(set_index.value()); }
- const OperandIndex &at(uint32_t index) const { return _vec.at(index); }
- bool contains(const OperandIndex &index) const;
- void replace(const OperandIndex &from, const OperandIndex &to);
- OperandIndexSequence operator|(ir::Remove filter) const
- {
- switch (filter)
- {
- case ir::Remove::DUPLICATED:
- {
- ir::OperandIndexSequence seq;
- for (const auto &ind : _vec)
- if (!seq.contains(ind))
- seq.append(ind);
- return seq;
- }
- case ir::Remove::UNDEFINED:
- {
- ir::OperandIndexSequence seq;
- for (const auto &ind : _vec)
- if (!ind.undefined())
- seq.append(ind);
- return seq;
- }
- }
- return *this;
- }
-
-public:
- OperandIndexSequence operator+(const OperandIndexSequence &other) const;
- friend std::ostream &operator<<(std::ostream &o, const OperandIndexSequence &op_seq);
-
-public:
- std::vector<OperandIndex>::const_iterator begin(void) const { return _vec.begin(); }
- std::vector<OperandIndex>::const_iterator end(void) const { return _vec.end(); }
- std::vector<OperandIndex>::iterator begin(void) { return _vec.begin(); }
- std::vector<OperandIndex>::iterator end(void) { return _vec.end(); }
-
-private:
- std::vector<OperandIndex> _vec;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_MODEL_OPERAND_INDEX_SET_H__
diff --git a/runtime/onert/core/include/ir/OperandInfo.h b/runtime/onert/core/include/ir/OperandInfo.h
deleted file mode 100644
index 67aeb0e65..000000000
--- a/runtime/onert/core/include/ir/OperandInfo.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file OperandInfo.h
- * @brief This file contains OperandInfo class
- */
-#ifndef __ONERT_IR_OPERAND_INFO_H__
-#define __ONERT_IR_OPERAND_INFO_H__
-
-#include "ir/Shape.h"
-#include "ir/TypeInfo.h"
-#include "ir/Layout.h"
-
-namespace onert
-{
-namespace ir
-{
-
-/**
- * @brief enum class indicating when the memory for a tensor is allocated
- */
-enum class MemAllocType
-{
- /**
- * @brief At compile time, shape for a tensor is known, thus requried memory capacity can be
- * calculated
- */
- STATIC,
-
- /**
- * @brief At kernel execution time, shape for a tensor is known, thus requried memory capacity
- * can be calculated
- */
- DYNAMIC
-};
-
-/**
- * @brief Class to save tensor's shape and type
- */
-class OperandInfo
-{
-public:
- /**
- * @brief Construct a new OperandInfo object (deleted)
- */
- OperandInfo() = delete;
-
- /**
- * @brief Construct a new OperandInfo object
- * @param[in] shape Tensor shape
- * @param[in] typeInfo Tensor data type
- * @param[in] alloc_type When the thesor needs memory allocation
- */
- OperandInfo(const Shape &shape, const TypeInfo &typeInfo, MemAllocType alloc_type,
- bool is_const = false)
- : _shape(shape), _typeInfo(typeInfo), _alloc_type(alloc_type), _const(is_const)
- {
- // DO NOTHING
- }
- /**
- * @brief Construct a new OperandInfo object
- * @param[in] origin info for copy
- */
- OperandInfo(const OperandInfo &origin) = default;
-
- /**
- * @brief Create a static OperandInfo object
- */
- static OperandInfo createStaticInfo(const Shape &shape, const TypeInfo &typeInfo)
- {
- return OperandInfo(shape, typeInfo, MemAllocType::STATIC);
- }
-
-public:
- /**
- * @brief Return tensor shape
- * @return Tensor shape
- */
- const Shape &shape() const { return _shape; }
- /**
- * @brief Return mutable tensor shape
- * @return Tensor shape
- */
- Shape &shape() { return _shape; }
- /**
- * @brief set shape
- */
- void shape(const ir::Shape &new_shape) { _shape = new_shape; }
- /**
- * @brief Return tensor data type info
- * @return Tensor data type
- */
- const TypeInfo &typeInfo() const { return _typeInfo; }
- /**
- * @brief Set tensor data type
- */
- void type(const DataType type) { _typeInfo.type(type); }
- /**
- * @brief Return size of tensor (bytes)
- * @return Tensor size
- */
- size_t total_size() const { return _shape.num_elements() * sizeOfDataType(_typeInfo.type()); }
-
- MemAllocType memAllocType() const { return _alloc_type; }
- void setAsConstant() { _const = true; }
- void setAsNonConst() { _const = false; }
- bool isConstant() const
- {
- // Impossible case: constant and dynamic operand
- assert(!(isDynamic() && _const));
- return _const;
- }
- bool isDynamic() const { return _alloc_type == MemAllocType::DYNAMIC; }
- void setDynamic() { _alloc_type = MemAllocType::DYNAMIC; }
-
-private:
- Shape _shape;
- TypeInfo _typeInfo;
-
- MemAllocType _alloc_type;
- bool _const;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERAND_INFO_H__
diff --git a/runtime/onert/core/include/ir/Operands.h b/runtime/onert/core/include/ir/Operands.h
deleted file mode 100644
index be7b7061f..000000000
--- a/runtime/onert/core/include/ir/Operands.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERANDS_H__
-#define __ONERT_IR_OPERANDS_H__
-
-#include <memory>
-#include <unordered_map>
-
-#include "ir/Operand.h"
-#include "ir/Index.h"
-#include "util/ObjectManager.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class Operands : public util::ObjectManager<OperandIndex, Operand>
-{
-public:
- Operands() = default;
- Operands(const Operands &obj);
- Operands(Operands &&) = default;
- Operands &operator=(const Operands &) = delete;
- Operands &operator=(Operands &&) = default;
- ~Operands() = default;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_MODEL_OPERAND_SET_H__
diff --git a/runtime/onert/core/include/ir/Operation.h b/runtime/onert/core/include/ir/Operation.h
deleted file mode 100644
index 89f7e340d..000000000
--- a/runtime/onert/core/include/ir/Operation.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_H__
-#define __ONERT_IR_OPERATION_H__
-
-#include <memory>
-
-#include "ir/OpCode.h"
-#include "ir/Operand.h"
-#include "ir/OperandIndexSequence.h"
-#include "ir/OperandConstraint.h"
-
-namespace onert
-{
-namespace ir
-{
-
-struct OperationVisitor;
-
-class Operation
-{
-public:
- // TODO Remove default parameter
- Operation(OperandConstraint input_constr, const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs,
- OperandConstraint output_constr = OperandConstraint::createAny());
- explicit Operation(OperandConstraint input_constr,
- OperandConstraint output_constr = OperandConstraint::createAny());
-
- Operation(const Operation &) = default;
- Operation(Operation &&) = default;
- Operation &operator=(const Operation &) = default;
- Operation &operator=(Operation &&) = default;
-
- virtual ~Operation();
-
-public:
- virtual void accept(OperationVisitor &v) const = 0;
- virtual std::string name() const { return std::string{toString(opcode())}; }
- virtual OpCode opcode() const = 0;
-
-public:
- void replaceInputs(const OperandIndex &from, const OperandIndex &to);
- void replaceOutputs(const OperandIndex &from, const OperandIndex &to);
- OperandIndexSequence &getInputs() { return _inputs; }
- const OperandIndexSequence &getInputs() const { return _inputs; }
- const OperandIndexSequence &getOutputs() const { return _outputs; }
- // It's for only input/output tensors but const data.
- void setInputs(const OperandIndexSequence &indexes);
- void setOutputs(const OperandIndexSequence &indexes);
-
-private:
- OperandConstraint _input_constr;
- OperandConstraint _output_constr;
- OperandIndexSequence _inputs;
- OperandIndexSequence _outputs;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_H__
diff --git a/runtime/onert/core/include/ir/OperationIndexMap.h b/runtime/onert/core/include/ir/OperationIndexMap.h
deleted file mode 100644
index 50c21c0ab..000000000
--- a/runtime/onert/core/include/ir/OperationIndexMap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_INDEX_MAP_H__
-#define __ONERT_IR_OPERATION_INDEX_MAP_H__
-
-#include <unordered_map>
-
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-template <typename T> using OperationIndexMap = std::unordered_map<OperationIndex, T>;
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_INDEX_MAP_H__
diff --git a/runtime/onert/core/include/ir/OperationIndexSet.h b/runtime/onert/core/include/ir/OperationIndexSet.h
deleted file mode 100644
index 067aa19e1..000000000
--- a/runtime/onert/core/include/ir/OperationIndexSet.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_MODEL_OPERATION_INDEX_SET_H__
-#define __ONERT_MODEL_OPERATION_INDEX_SET_H__
-
-#include <algorithm>
-#include <cassert>
-#include <initializer_list>
-#include <unordered_set>
-
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class OperationIndexSet
-{
-public:
- OperationIndexSet(void) = default;
- OperationIndexSet(std::initializer_list<OperationIndex> list);
-
-public:
- void insert(const OperationIndex &index) { _set.insert(index); }
- void clear(void) { _set.clear(); }
- void remove(const OperationIndex &index)
- {
- auto itr = std::find(_set.begin(), _set.end(), index);
- assert(itr != _set.end());
- _set.erase(itr);
- }
-
-public:
- uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
- bool contains(const OperationIndex &index) const;
-
-public:
- std::unordered_set<OperationIndex>::iterator begin(void) { return _set.begin(); }
- std::unordered_set<OperationIndex>::iterator end(void) { return _set.end(); }
- std::unordered_set<OperationIndex>::const_iterator begin(void) const { return _set.begin(); }
- std::unordered_set<OperationIndex>::const_iterator end(void) const { return _set.end(); }
-
-private:
- std::unordered_set<OperationIndex> _set;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_MODEL_OPERATION_INDEX_SET_H__
diff --git a/runtime/onert/core/include/ir/OperationVisitor.h b/runtime/onert/core/include/ir/OperationVisitor.h
deleted file mode 100644
index a27770744..000000000
--- a/runtime/onert/core/include/ir/OperationVisitor.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_VISITOR_H__
-#define __ONERT_IR_OPERATION_VISITOR_H__
-
-#include "ir/Operations.Include.h"
-#include "ir/OpSequence.h"
-
-namespace onert
-{
-namespace ir
-{
-
-struct OperationVisitor
-{
- virtual ~OperationVisitor() = default;
-
-#define OP(InternalName) \
- virtual void visit(const operation::InternalName &) {}
-#include "ir/Operations.lst"
-#undef OP
-
- // This OpSequence node should be handled specially so that
- // Op.lst doesn't have OpSequence
- // TODO Remove by pushing it down to derived classes.
- virtual void visit(const OpSequence &)
- {
- throw std::runtime_error{
- "OperationVisitor: This does not privide visit function in OpSequence"};
- }
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_VISITOR_H__
diff --git a/runtime/onert/core/include/ir/Operations.Include.h b/runtime/onert/core/include/ir/Operations.Include.h
deleted file mode 100644
index 17bbbc29c..000000000
--- a/runtime/onert/core/include/ir/Operations.Include.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// This file has no ifdef guard intentionally
-
-#include "ir/operation/BatchToSpaceND.h"
-#include "ir/operation/BinaryArithmetic.h"
-#include "ir/operation/BroadcastTo.h"
-#include "ir/operation/Conv2D.h"
-#include "ir/operation/Pool2D.h"
-#include "ir/operation/Concat.h"
-#include "ir/operation/Reshape.h"
-#include "ir/operation/Fill.h"
-#include "ir/operation/FullyConnected.h"
-#include "ir/operation/Softmax.h"
-#include "ir/operation/Transpose.h"
-#include "ir/operation/Permute.h"
-#include "ir/operation/Reduce.h"
-#include "ir/operation/DepthwiseConv2D.h"
-#include "ir/operation/Slice.h"
-#include "ir/operation/StridedSlice.h"
-#include "ir/operation/Squeeze.h"
-#include "ir/operation/ElementwiseActivation.h"
-#include "ir/operation/ElementwiseBinary.h"
-#include "ir/operation/ElementwiseUnary.h"
-#include "ir/operation/ExpandDims.h"
-#include "ir/operation/Comparison.h"
-#include "ir/operation/LSTM.h"
-#include "ir/operation/ResizeBilinear.h"
-#include "ir/operation/ResizeNearestNeighbor.h"
-#include "ir/operation/Reverse.h"
-#include "ir/operation/RNN.h"
-#include "ir/operation/SpaceToBatchND.h"
-#include "ir/operation/SpaceToDepth.h"
-#include "ir/operation/EmbeddingLookup.h"
-#include "ir/operation/L2Normalization.h"
-#include "ir/operation/HashtableLookup.h"
-#include "ir/operation/InstanceNorm.h"
-#include "ir/operation/PReLU.h"
-#include "ir/operation/TransposeConv.h"
-#include "ir/operation/SquaredDifference.h"
-#include "ir/operation/TopKV2.h"
-#include "ir/operation/Gather.h"
-#include "ir/operation/ArgMax.h"
-#include "ir/operation/LocalResponseNormalization.h"
-#include "ir/operation/DepthToSpace.h"
-#include "ir/operation/Pack.h"
-#include "ir/operation/Select.h"
-#include "ir/operation/Split.h"
-#include "ir/operation/SplitV.h"
-#include "ir/operation/Unpack.h"
-#include "ir/operation/Pad.h"
-#include "ir/operation/Custom.h"
-#include "ir/operation/Einsum.h"
-#include "ir/operation/OneHot.h"
-#include "ir/operation/Shape.h"
-#include "ir/operation/ConvertFp32ToFp16.h"
-#include "ir/operation/ConvertFp16ToFp32.h"
-#include "ir/operation/If.h"
-#include "ir/operation/While.h"
-#include "ir/operation/Pow.h"
-#include "ir/operation/Tile.h"
-#include "ir/operation/Range.h"
-#include "ir/operation/Rank.h"
-#include "ir/operation/BCQFullyConnected.h"
-#include "ir/operation/BCQGather.h"
-#include "ir/operation/MatrixBandPart.h"
-#include "ir/operation/BatchMatMul.h"
-#include "ir/operation/FusedBatchNorm.h"
-#include "ir/operation/LogSoftmax.h"
-#include "ir/operation/StatelessRandomUniform.h"
diff --git a/runtime/onert/core/include/ir/Operations.h b/runtime/onert/core/include/ir/Operations.h
deleted file mode 100644
index 0b5fbf529..000000000
--- a/runtime/onert/core/include/ir/Operations.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATIONS_H__
-#define __ONERT_IR_OPERATIONS_H__
-
-#include "ir/Index.h"
-#include "ir/Operation.h"
-#include "util/ObjectManager.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class Operations : public util::ObjectManager<OperationIndex, Operation>
-{
-public:
- Operations() = default;
- Operations(const Operations &obj);
- Operations(Operations &&) = default;
- Operations &operator=(const Operations &) = delete;
- Operations &operator=(Operations &&) = default;
- ~Operations() = default;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_MODEL_OPERATION_MANAGER_H__
diff --git a/runtime/onert/core/include/ir/Operations.lst b/runtime/onert/core/include/ir/Operations.lst
deleted file mode 100644
index ab2146821..000000000
--- a/runtime/onert/core/include/ir/Operations.lst
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OP
-#error Define OP before including this file
-#endif
-
-// Internal Name
-OP(BatchToSpaceND)
-OP(BinaryArithmetic)
-OP(BroadcastTo)
-OP(Conv2D)
-OP(DepthwiseConv2D)
-OP(Pool2D)
-OP(Concat)
-OP(Fill)
-OP(FullyConnected)
-OP(Reduce)
-OP(Reshape)
-OP(Softmax)
-OP(Squeeze)
-OP(Slice)
-OP(StridedSlice)
-OP(Transpose)
-OP(ElementwiseActivation)
-OP(ElementwiseBinary)
-OP(ElementwiseUnary)
-OP(ExpandDims)
-OP(Comparison)
-OP(LSTM)
-OP(ResizeBilinear)
-OP(ResizeNearestNeighbor)
-OP(Reverse)
-OP(RNN)
-OP(SpaceToBatchND)
-OP(SpaceToDepth)
-OP(EmbeddingLookup)
-OP(L2Normalization)
-OP(HashtableLookup)
-OP(InstanceNorm)
-OP(PReLU)
-OP(TransposeConv)
-OP(SquaredDifference)
-OP(TopKV2)
-OP(Gather)
-OP(ArgMax)
-OP(Einsum)
-OP(LocalResponseNormalization)
-OP(DepthToSpace)
-OP(Pack)
-OP(Select)
-OP(Split)
-OP(SplitV)
-OP(Unpack)
-OP(Pad)
-OP(Custom)
-OP(Permute)
-OP(OneHot)
-OP(Shape)
-OP(ConvertFp32ToFp16)
-OP(ConvertFp16ToFp32)
-OP(If)
-OP(While)
-OP(Pow)
-OP(Tile)
-OP(Range)
-OP(Rank)
-OP(BCQFullyConnected)
-OP(BCQGather)
-OP(MatrixBandPart)
-OP(BatchMatMul)
-OP(FusedBatchNorm)
-OP(LogSoftmax)
-OP(StatelessRandomUniform)
diff --git a/runtime/onert/core/include/ir/Padding.h b/runtime/onert/core/include/ir/Padding.h
deleted file mode 100644
index 8a7bcdbeb..000000000
--- a/runtime/onert/core/include/ir/Padding.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_PADDIGN_H__
-#define __ONERT_IR_PADDIGN_H__
-
-#include "Shape.h"
-#include "InternalType.h"
-
-#include <cstdint>
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-
-enum class PaddingType
-{
- EXPLICIT = 0,
- SAME = 1,
- VALID = 2
-};
-
-/**
- * @brief Converts a internal padding type to const char*
- * @param[in] type Padding type to be converted
- * @return A string holding the converted value
- */
-inline std::string to_string(const PaddingType type);
-
-struct ExplicitPadding
-{
- uint32_t left;
- uint32_t right;
- uint32_t top;
- uint32_t bottom;
-};
-
-// TODO Resolve explicit padding param at frontend and save in value field
-struct Padding
-{
- Padding(void);
- Padding(PaddingType paddingType);
- Padding(uint32_t left, uint32_t right, uint32_t top, uint32_t bottom);
-
- // TODO Change to private field
- PaddingType type;
- ExplicitPadding param;
-};
-
-// TODO Change to Padding struct's method
-const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape,
- const FeatureShape &ofm_shape, const Stride &stride,
- uint32_t kw, uint32_t kh, uint32_t dwf = 1,
- uint32_t dhf = 1);
-
-} // namespace ir
-} // namespace onert
-
-#endif
diff --git a/runtime/onert/core/include/ir/Shape.h b/runtime/onert/core/include/ir/Shape.h
deleted file mode 100644
index a0b4bb196..000000000
--- a/runtime/onert/core/include/ir/Shape.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-#ifndef __ONERT_IR_SHAPE_H__
-#define __ONERT_IR_SHAPE_H__
-
-#include "ir/Layout.h"
-
-#include <cassert>
-#include <cstdint>
-#include <vector>
-#include <algorithm>
-
-namespace onert
-{
-namespace ir
-{
-
-/**
- * @brief Structure to have values of dimensions for feature
- */
-struct FeatureShape
-{
- int32_t N; /**< The batch value */
- int32_t C; /**< The depth value */
- int32_t H; /**< The height value */
- int32_t W; /**< The width value */
-
- /**
- * @brief Construct FeatureShape object using default constrcutor
- */
- FeatureShape() = default;
- /**
- * @brief Construct FeatureShape object with three values of dimensions
- * @param[in] depth The depth value
- * @param[in] height The height value
- * @param[in] width The width value
- */
- FeatureShape(int32_t depth, int32_t height, int32_t width) : N{1}, C{depth}, H{height}, W{width}
- {
- // DO NOTHING
- }
- /**
- * @brief Construct FeatureShape object with four values of dimensions
- * @param[in] batch The batch value
- * @param[in] depth The depth value
- * @param[in] height The height value
- * @param[in] width The width value
- */
- FeatureShape(int32_t batch, int32_t depth, int32_t height, int32_t width)
- : N{batch}, C{depth}, H{height}, W{width}
- {
- // DO NOTHING
- }
-};
-
-struct Shape
-{
-public:
- static int32_t const UNSPECIFIED_DIM;
- static int32_t const MAX_RANK;
-
- Shape() = default;
-
- explicit Shape(int rank) : _dimensions(rank) {}
-
- Shape(std::initializer_list<int32_t> dimensions) : _dimensions(dimensions) {}
-
- int rank() const { return _dimensions.size(); }
-
- const std::vector<int32_t> &dims() const { return _dimensions; }
-
- int32_t dim(int i) const
- {
- assert(rank() != 0 || i == 0);
- return rank() == 0 ? 1 : _dimensions.at(i);
- }
-
- int32_t &dim(int i) { return _dimensions.at(i); }
-
- /**
- * @brief Returns number of elements when rank or dim is specified
- */
- uint64_t num_elements() const;
-
-public:
- FeatureShape asFeature(Layout layout) const;
-
- /**
- * @brief Add dimension to the beginning
- * @param[in] d dimension to add to the beginning
- */
- void prepend(int32_t d) { _dimensions.insert(_dimensions.cbegin(), d); }
-
- /**
- * @brief Add dimension to the end
- * @param[in] d dimension to add to the end
- */
- void append(int32_t d) { _dimensions.emplace_back(d); }
-
- /**
- * @brief Extend rank of Shape object for operand with param.
- * @param[in] to_rank The rank value to be extended to
- */
- void extendRank(int to_rank);
-
- /**
- * @brief Find out if any dimension is unspecified. If the rank is not specified, it returns
- * false.
- * \see https://developer.android.com/ndk/reference/struct/a-neural-networks-operand-type
- * @note base_loader set dim to -1 when there is unknown dim in input tensor
- */
- bool hasUnspecifiedDims() const
- {
- return (std::find(_dimensions.begin(), _dimensions.end(), UNSPECIFIED_DIM) !=
- _dimensions.end());
- }
-
-private:
- std::vector<int32_t> _dimensions;
-};
-
-inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); }
-inline bool operator!=(const Shape &lhs, const Shape &rhs) { return lhs.dims() != rhs.dims(); }
-
-Shape permuteShape(const Shape &shape, Layout frontend_layout, Layout backend_layout);
-
-/**
-* @brief Find out if tha rank in this shape is "maybe" unspecified.
-* Note that when rank == 0, shape could represent scalar or unspecified rank
-* \see https://developer.android.com/ndk/reference/struct/a-neural-networks-operand-type
-*/
-inline bool rankMaybeUnspecified(const ir::Shape &shape) { return (shape.rank() == 0); }
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_SHAPE_H__
diff --git a/runtime/onert/core/include/ir/Sparsity.h b/runtime/onert/core/include/ir/Sparsity.h
deleted file mode 100644
index ad4d8259b..000000000
--- a/runtime/onert/core/include/ir/Sparsity.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-#ifndef __ONERT_IR_SPARSITY_H__
-#define __ONERT_IR_SPARSITY_H__
-
-#include <cassert>
-#include <cstdint>
-#include <vector>
-
-namespace onert
-{
-namespace ir
-{
-
-/**
- * @brief Structure for Sparse Tensor
- */
-struct Sparsity
-{
-public:
- Sparsity() = default;
- Sparsity(std::vector<uint16_t> &&w1_segments, std::vector<uint16_t> &&w1_indices,
- std::vector<int32_t> &&block_size)
- : _w1_segments(w1_segments), _w1_indices(w1_indices), _block_size(block_size)
- {
- }
-
- /**
- * @brief Returns segments array. See compressed sparse row format.
- */
- const uint16_t *w1_segments() const { return _w1_segments.data(); }
- /**
- * @brief Returns indices array. See compressed sparse row format.
- */
- const uint16_t *w1_indices() const { return _w1_indices.data(); }
- /**
- * @brief Returns block size which is used for block sparsity
- */
- const std::vector<int32_t> &block_size() const { return _block_size; }
-
-private:
- std::vector<uint16_t> _w1_segments;
- std::vector<uint16_t> _w1_indices;
- std::vector<int32_t> _block_size;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_SPARSITY_H__
diff --git a/runtime/onert/core/include/ir/Subgraphs.h b/runtime/onert/core/include/ir/Subgraphs.h
deleted file mode 100644
index 7b4c33b76..000000000
--- a/runtime/onert/core/include/ir/Subgraphs.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_SUBGRAPHS_H__
-#define __ONERT_IR_SUBGRAPHS_H__
-
-#include <memory>
-#include <unordered_map>
-
-#include "ir/Index.h"
-#include "util/ObjectManager.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class Graph;
-
-class Subgraphs
-{
-public:
- Subgraphs() = default;
- Subgraphs(const Subgraphs &obj) = default;
- Subgraphs(Subgraphs &&) = default;
- Subgraphs &operator=(const Subgraphs &) = default;
- Subgraphs &operator=(Subgraphs &&) = default;
- ~Subgraphs() = default;
-
- /**
- * @brief Put subgraph in the container with a new Index for that
- *
- * @param[in] subg Subgraph to be pushed
- * @param[in] index Index of subgraph to be pushed
- * @return Created
- */
- void push(SubgraphIndex index, const std::shared_ptr<Graph> &subg) { _subgraphs[index] = subg; }
-
- /**
- * @brief Remove the subgraph that is associated with the given index
- *
- * @param[in] index Index of the subgraph to be removed
- * @return N/A
- */
- void remove(const SubgraphIndex &index) { _subgraphs.erase(index); }
-
- /**
- * @brief Get the subgraph that is associated with the given index
- *
- * @param[in] index Index of the subgraph to be returned
- * @return Graph
- */
- const std::shared_ptr<Graph> &at(const SubgraphIndex &index) const
- {
- return _subgraphs.at(index);
- }
- /**
- * @brief Get the subgraph that is associated with the given index
- *
- * @param[in] index Index of the subgraph to be returned
- * @return Graph
- */
- std::shared_ptr<Graph> &at(const SubgraphIndex &index) { return _subgraphs.at(index); }
-
- /**
- * @brief Get the subgraph that is associated with the given index
- *
- * @param[in] index Index of the subgraph to be returned
- * @return true if such entry exists otherwise false
- */
- bool exist(const SubgraphIndex &index) const
- {
- auto it = _subgraphs.find(index);
- return it != _subgraphs.end();
- }
-
- /**
- * @brief Iterate over the container with given function
- *
- * @param[in] fn Function to be run for every container entry
- * @return N/A
- */
- void iterate(const std::function<void(const SubgraphIndex &, const Graph &)> &fn) const
- {
- for (const auto &e : _subgraphs)
- {
- fn(e.first, *e.second);
- }
- }
-
- /**
- * @brief Iterate over the container with given function
- *
- * @param[in] fn Function to be run for every container entry
- * @return N/A
- */
- void iterate(const std::function<void(const SubgraphIndex &, Graph &)> &fn)
- {
- for (const auto &e : _subgraphs)
- {
- fn(e.first, *e.second);
- }
- }
-
- /**
- * @brief Get count of Subgraphs
- *
- * @return count of Subgraphs
- */
- size_t count() { return _subgraphs.size(); }
-
- /**
- * @brief Return the primary subgraph
- *
- * @return std::shared_ptr<Graph> Primary sugraph
- */
- std::shared_ptr<Graph> primary() const { return _subgraphs.at(SubgraphIndex{0}); }
-
-private:
- std::unordered_map<SubgraphIndex, std::shared_ptr<Graph>> _subgraphs;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_SUBGRAPHS_H__
diff --git a/runtime/onert/core/include/ir/TypeInfo.h b/runtime/onert/core/include/ir/TypeInfo.h
deleted file mode 100644
index a1ae4d2e4..000000000
--- a/runtime/onert/core/include/ir/TypeInfo.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_TYPEINFO_H__
-#define __ONERT_IR_TYPEINFO_H__
-
-#include <cstdint>
-#include <memory>
-#include <vector>
-
-#include "ir/DataType.h"
-#include "ir/Sparsity.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class TypeInfo
-{
-public:
- TypeInfo() = delete;
-
- explicit TypeInfo(DataType type, float scale = 0, int32_t offset = 0)
- : _type(type), _scale(scale), _offset(offset), _sparsity(nullptr)
- {
- }
-
-public:
- DataType type() const { return _type; }
- float scale() const { return _scale; }
- int32_t offset() const { return _offset; }
- const ir::Sparsity *sparsity() const { return _sparsity.get(); }
- void sparsity(std::shared_ptr<ir::Sparsity> sparsity) { _sparsity = sparsity; }
-
-public:
- void type(const DataType type) { _type = type; }
-
-private:
- DataType _type;
- // for quantization
- float _scale;
- int32_t _offset;
- // for sparsity
- std::shared_ptr<ir::Sparsity> _sparsity;
-};
-
-bool operator==(const TypeInfo &lhs, const TypeInfo &rhs);
-bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs);
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_TYPEINFO_H__
diff --git a/runtime/onert/core/include/ir/operand/LowerInfo.h b/runtime/onert/core/include/ir/operand/LowerInfo.h
deleted file mode 100644
index b7f032b02..000000000
--- a/runtime/onert/core/include/ir/operand/LowerInfo.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERAND_LOWER_INFO_H__
-#define __ONERT_IR_OPERAND_LOWER_INFO_H__
-
-#include <functional>
-#include <stdint.h>
-
-#include "ir/operand/PermuteFactor.h"
-#include "util/Set.h"
-
-namespace onert
-{
-namespace backend
-{
-class Backend;
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace operand
-{
-using PermuteFactorSet = util::Set<PermuteFactor>;
-
-class LowerInfo
-{
-public:
- LowerInfo()
- {
- // DO NOTHING
- }
-
-public:
- const PermuteFactorSet &def_factors(void) const { return _def_factors; }
- const PermuteFactorSet &use_factors(void) const { return _use_factors; }
-
-public:
- void addDefPermuteFactor(const PermuteFactor &factor) { _def_factors.add(factor); }
- void addUsePermuteFactor(const PermuteFactor &factor) { _use_factors.add(factor); }
- void removeDefPermuteFactor(const PermuteFactor &factor) { _def_factors.remove(factor); }
- void removeUsePermuteFactor(const PermuteFactor &factor) { _use_factors.remove(factor); }
-
-private:
- PermuteFactorSet _def_factors;
- PermuteFactorSet _use_factors;
-};
-
-} // namespace operand
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERAND_LOWER_INFO_H__
diff --git a/runtime/onert/core/include/ir/operand/PermuteFactor.h b/runtime/onert/core/include/ir/operand/PermuteFactor.h
deleted file mode 100644
index d0bfed337..000000000
--- a/runtime/onert/core/include/ir/operand/PermuteFactor.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file PermuteFactor.h
- * @brief This file contains onert::ir::operand::PermuteFactor class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __ONERT_IR_OPERAND_PERMUTE_FACTOR_H__
-#define __ONERT_IR_OPERAND_PERMUTE_FACTOR_H__
-
-#include <functional>
-
-#include "ir/Layout.h"
-
-namespace onert
-{
-namespace backend
-{
-class Backend;
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace operand
-{
-
-/**
- * @brief Class that has factors of permutation
- */
-class PermuteFactor
-{
-public:
- /**
- * @brief Construct PermuteFactor object.
- * @param backend The backend factor
- * @param layout The layout factor
- */
- PermuteFactor(const backend::Backend *backend, Layout layout) : _backend{backend}, _layout{layout}
- {
- // DO NOTHING
- }
- /**
- * @brief Construct PermuteFactor object by copy semantics.
- */
- PermuteFactor(const PermuteFactor &f) : _backend{f._backend}, _layout{f._layout}
- {
- // DO NOTHING
- }
- /**
- * @brief Construct PermuteFactor object by move semantics.
- */
- PermuteFactor(PermuteFactor &&) = default;
-
-public:
- /**
- * @brief Get backend
- *
- * @return Backend factor
- */
- const backend::Backend *backend() const { return _backend; }
- /**
- * @brief Get layout
- *
- * @return Layout factor
- */
- Layout layout() const { return _layout; }
-
-public:
- /**
- * @brief operator overloading function for `==`
- *
- * @return Whether two PermuteFactor are the same
- */
- bool operator==(const PermuteFactor &other) const
- {
- return _backend == other.backend() && _layout == other.layout();
- }
- /**
- * @brief operator overloading function for `!=`
- *
- * @return Whether two PermuteFactor are differenct
- */
- bool operator!=(const PermuteFactor &other) const { return !(*this == other); }
-
-private:
- const backend::Backend *_backend{nullptr};
- Layout _layout{Layout::UNKNOWN};
-};
-
-} // namespace operand
-} // namespace ir
-} // namespace onert
-
-namespace std
-{
-
-/**
- * @brief Structure that provides hash value of PermuteFactor
- */
-template <> struct hash<onert::ir::operand::PermuteFactor>
-{
- size_t operator()(const onert::ir::operand::PermuteFactor &factor) const noexcept
- {
- hash<const onert::backend::Backend *> b_hash{};
- hash<onert::ir::Layout> l_hash{};
- return b_hash(factor.backend()) ^ (l_hash(factor.layout()) << 1);
- }
-};
-
-} // namespace std
-
-#endif // __ONERT_IR_OPERAND_PERMUTE_FACTOR_H__
diff --git a/runtime/onert/core/include/ir/operation/ArgMax.h b/runtime/onert/core/include/ir/operation/ArgMax.h
deleted file mode 100644
index ea7eabb83..000000000
--- a/runtime/onert/core/include/ir/operation/ArgMax.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_ARG_MAX_H__
-#define __ONERT_IR_OPERATION_ARG_MAX_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ArgMax : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- AXIS = 1
- };
-
- struct Param
- {
- DataType output_type;
- };
-
-public:
- ArgMax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ArgMax; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_ARG_MAX_H__
diff --git a/runtime/onert/core/include/ir/operation/BCQFullyConnected.h b/runtime/onert/core/include/ir/operation/BCQFullyConnected.h
deleted file mode 100644
index 4bf3a0bdb..000000000
--- a/runtime/onert/core/include/ir/operation/BCQFullyConnected.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BCQFULLYCONNECTED_H__
-#define __ONERT_IR_OPERATION_BCQFULLYCONNECTED_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BCQFullyConnected : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- WEIGHTS_SCALES,
- WEIGHTS_BINARY,
- BIAS,
- WEIGHTS_CLUSTERS,
- };
-
- struct Param
- {
- uint32_t weights_hidden_size;
- Activation activation;
- };
-
-public:
- BCQFullyConnected(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::BCQFullyConnected; }
-
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BCQFULLYCONNECTED_H__
diff --git a/runtime/onert/core/include/ir/operation/BCQGather.h b/runtime/onert/core/include/ir/operation/BCQGather.h
deleted file mode 100644
index 1349b8c6d..000000000
--- a/runtime/onert/core/include/ir/operation/BCQGather.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BCQGATHER_H__
-#define __ONERT_IR_OPERATION_BCQGATHER_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BCQGather : public Operation
-{
-public:
- enum Input
- {
- INPUT_SCALES = 0,
- INPUT_BINARY,
- INDICES,
- INPUT_CLUSTERS,
- };
-
- struct Param
- {
- uint32_t input_hidden_size;
- uint32_t axis;
- };
-
-public:
- BCQGather(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::BCQGather; }
-
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BCQGATHER_H__
diff --git a/runtime/onert/core/include/ir/operation/BatchMatMul.h b/runtime/onert/core/include/ir/operation/BatchMatMul.h
deleted file mode 100644
index 183f60abe..000000000
--- a/runtime/onert/core/include/ir/operation/BatchMatMul.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BATCH_MATMUL_H__
-#define __ONERT_IR_OPERATION_BATCH_MATMUL_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BatchMatMul : public Operation
-{
-public:
- enum Input
- {
- LHS = 0,
- RHS
- };
-
- struct Param
- {
- bool adj_x;
- bool adj_y;
- };
-
-public:
- BatchMatMul(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::BatchMatMul; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BATCH_MATMUL_H__
diff --git a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h b/runtime/onert/core/include/ir/operation/BatchToSpaceND.h
deleted file mode 100644
index 3e69b42c7..000000000
--- a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BATCH_TO_SPACE_ND_H__
-#define __ONERT_IR_OPERATION_BATCH_TO_SPACE_ND_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BatchToSpaceND : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- BLOCK_SIZE = 1,
- CROPS_DATA = 2
- };
-
-public:
- BatchToSpaceND(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::BatchToSpaceND; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BATCH_TO_SPACE_ND_H__
diff --git a/runtime/onert/core/include/ir/operation/BinaryArithmetic.h b/runtime/onert/core/include/ir/operation/BinaryArithmetic.h
deleted file mode 100644
index 110fff565..000000000
--- a/runtime/onert/core/include/ir/operation/BinaryArithmetic.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BINARY_ARITHMETIC_H__
-#define __ONERT_IR_OPERATION_BINARY_ARITHMETIC_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BinaryArithmetic final : public Operation
-{
-public:
- enum Input
- {
- LHS = 0,
- RHS
- };
-
- enum class ArithmeticType
- {
- ADD,
- SUB,
- MUL,
- DIV
- };
-
- struct Param
- {
- ArithmeticType arithmetic_type;
- Activation activation;
- };
-
-public:
- BinaryArithmetic(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::BinaryArithmetic; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BINARY_ARITHMETIC_H__
diff --git a/runtime/onert/core/include/ir/operation/BroadcastTo.h b/runtime/onert/core/include/ir/operation/BroadcastTo.h
deleted file mode 100644
index 06c033497..000000000
--- a/runtime/onert/core/include/ir/operation/BroadcastTo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_BROADCAST_TO_H__
-#define __ONERT_IR_OPERATION_BROADCAST_TO_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class BroadcastTo : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SHAPE = 1
- };
-
-public:
- BroadcastTo(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::BroadcastTo; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_BROADCAST_TO_H__
diff --git a/runtime/onert/core/include/ir/operation/Comparison.h b/runtime/onert/core/include/ir/operation/Comparison.h
deleted file mode 100644
index 8b53f163b..000000000
--- a/runtime/onert/core/include/ir/operation/Comparison.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_COMPARISON_H__
-#define __ONERT_IR_OPERATION_COMPARISON_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Comparison : public Operation
-{
-public:
- enum Input
- {
- INPUT0 = 0,
- INPUT1
- };
-
- enum class ComparisonType
- {
- Equal,
- NotEqual,
- Greater,
- GreaterEqual,
- Less,
- LessEqual
- };
-
- struct Param
- {
- ComparisonType comparison_type;
- };
-
-public:
- Comparison(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Comparison; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_COMPARISON_H__
diff --git a/runtime/onert/core/include/ir/operation/Concat.h b/runtime/onert/core/include/ir/operation/Concat.h
deleted file mode 100644
index 2dff04e93..000000000
--- a/runtime/onert/core/include/ir/operation/Concat.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_CONCAT_H__
-#define __ONERT_IR_OPERATION_CONCAT_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Concat : public Operation
-{
-public:
- struct Param
- {
- int32_t axis;
- };
-
-public:
- Concat(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Concat; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_CONCAT_H__
diff --git a/runtime/onert/core/include/ir/operation/Conv2D.h b/runtime/onert/core/include/ir/operation/Conv2D.h
deleted file mode 100644
index d8c7b671b..000000000
--- a/runtime/onert/core/include/ir/operation/Conv2D.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_CONV2D_H__
-#define __ONERT_IR_OPERATION_CONV2D_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-#include "ir/Padding.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Conv2D : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- KERNEL,
- BIAS
- };
-
- struct Param
- {
- Stride stride;
- Padding padding;
- Activation activation;
- Dilation dilation;
- };
-
-public:
- Conv2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Conv2D; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_CONV2D_H__
diff --git a/runtime/onert/core/include/ir/operation/ConvertFp16ToFp32.h b/runtime/onert/core/include/ir/operation/ConvertFp16ToFp32.h
deleted file mode 100644
index 15c48357f..000000000
--- a/runtime/onert/core/include/ir/operation/ConvertFp16ToFp32.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_CONVERT_FP16_TO_FP32_H__
-#define __ONERT_IR_OPERATION_CONVERT_FP16_TO_FP32_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ConvertFp16ToFp32 : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
-public:
- ConvertFp16ToFp32(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ConvertFp16ToFp32; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_CONVERT_FP16_TO_FP32_H__
diff --git a/runtime/onert/core/include/ir/operation/ConvertFp32ToFp16.h b/runtime/onert/core/include/ir/operation/ConvertFp32ToFp16.h
deleted file mode 100644
index 983ce4891..000000000
--- a/runtime/onert/core/include/ir/operation/ConvertFp32ToFp16.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_CONVERT_FP32_TO_FP16_H__
-#define __ONERT_IR_OPERATION_CONVERT_FP32_TO_FP16_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ConvertFp32ToFp16 : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
-public:
- ConvertFp32ToFp16(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ConvertFp32ToFp16; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_CONVERT_FP32_TO_FP16_H__
diff --git a/runtime/onert/core/include/ir/operation/Custom.h b/runtime/onert/core/include/ir/operation/Custom.h
deleted file mode 100644
index c2a4b354a..000000000
--- a/runtime/onert/core/include/ir/operation/Custom.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_CUSTOM_H__
-#define __ONERT_IR_OPERATION_CUSTOM_H__
-
-#include "ir/Operation.h"
-
-#include <cstring>
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Custom : public Operation
-{
-public:
- struct Userdata
- {
- char *data;
- size_t size;
-
- Userdata() : data{nullptr}, size{0} {}
- Userdata(const Userdata &o)
- {
- size = o.size;
- data = new char[size];
- std::memcpy(data, o.data, size);
- }
- ~Userdata() { delete[] data; }
- };
-
- Custom(OperandConstraint input_constr, const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, std::string id, const Userdata &userdata);
-
- void accept(OperationVisitor &v) const override;
-
-public:
- /**
- * @return unique operation identifier
- */
- const std::string &id() const;
-
- std::string name() const override;
- OpCode opcode() const final { return OpCode::Custom; }
-
- /**
- * @return user-provided data
- */
- const Userdata &userdata() const;
-
-private:
- std::string _id;
- Userdata _userdata;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-#endif // __ONERT_IR_OPERATION_CUSTOM_H__
diff --git a/runtime/onert/core/include/ir/operation/DepthToSpace.h b/runtime/onert/core/include/ir/operation/DepthToSpace.h
deleted file mode 100644
index a5315051d..000000000
--- a/runtime/onert/core/include/ir/operation/DepthToSpace.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_DEPTH_TO_SPACE_H__
-#define __ONERT_IR_OPERATION_DEPTH_TO_SPACE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class DepthToSpace : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- std::int32_t block_size;
- };
-
-public:
- DepthToSpace(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::DepthToSpace; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_DEPTH_TO_SPACE_H__
diff --git a/runtime/onert/core/include/ir/operation/DepthwiseConv2D.h b/runtime/onert/core/include/ir/operation/DepthwiseConv2D.h
deleted file mode 100644
index b10bf708c..000000000
--- a/runtime/onert/core/include/ir/operation/DepthwiseConv2D.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_DEPTHWISECONV2D_H__
-#define __ONERT_IR_OPERATION_DEPTHWISECONV2D_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-#include "ir/Padding.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class DepthwiseConv2D : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- KERNEL,
- BIAS
- };
-
- struct Param
- {
- Stride stride;
- Padding padding;
- uint32_t multiplier;
- Activation activation;
- };
-
-public:
- DepthwiseConv2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::DepthwiseConv2D; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_DEPTHWISECONV2D_H__
diff --git a/runtime/onert/core/include/ir/operation/Einsum.h b/runtime/onert/core/include/ir/operation/Einsum.h
deleted file mode 100644
index 9892c24b8..000000000
--- a/runtime/onert/core/include/ir/operation/Einsum.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_EINSUM_H__
-#define __ONERT_IR_OPERATION_EINSUM_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Einsum : public Operation
-{
-public:
- struct Param
- {
- std::string equation;
- };
-
-public:
- Einsum(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Einsum; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_EINSUM_H__
diff --git a/runtime/onert/core/include/ir/operation/ElementwiseActivation.h b/runtime/onert/core/include/ir/operation/ElementwiseActivation.h
deleted file mode 100644
index b2a1d3d2d..000000000
--- a/runtime/onert/core/include/ir/operation/ElementwiseActivation.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_ELEMENTWISE_ACTIVATION_H__
-#define __ONERT_IR_OPERATION_ELEMENTWISE_ACTIVATION_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ElementwiseActivation : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- enum class Type
- {
- ELU,
- LOGISTIC,
- RELU,
- TANH,
- LEAKY_RELU
- };
-
- struct Param
- {
- Type op_type;
- float alpha;
- float beta;
- Param() : op_type(Type::ELU), alpha(0.0f), beta(0.0f) {}
- };
-
-public:
- ElementwiseActivation(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::ElementwiseActivation; }
-
-public:
- const Param &param() const { return _param; }
-
-public:
- static float infinity;
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_ELEMENTWISE_ACTIVATION_H__
diff --git a/runtime/onert/core/include/ir/operation/ElementwiseBinary.h b/runtime/onert/core/include/ir/operation/ElementwiseBinary.h
deleted file mode 100644
index dd07f6058..000000000
--- a/runtime/onert/core/include/ir/operation/ElementwiseBinary.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_ELEMENTWISEBINARY_H__
-#define __ONERT_IR_OPERATION_ELEMENTWISEBINARY_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ElementwiseBinary : public Operation
-{
-public:
- enum Input
- {
- LHS = 0,
- RHS
- };
-
- enum class ElementwiseBinaryType
- {
- LOGICAL_AND,
- LOGICAL_OR,
- MAX,
- MIN
- };
-
- struct Param
- {
- ElementwiseBinaryType op_type;
- };
-
-public:
- ElementwiseBinary(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::ElementwiseBinary; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_ELEMENTWISEBINARY_H__
diff --git a/runtime/onert/core/include/ir/operation/ElementwiseUnary.h b/runtime/onert/core/include/ir/operation/ElementwiseUnary.h
deleted file mode 100644
index c40778a56..000000000
--- a/runtime/onert/core/include/ir/operation/ElementwiseUnary.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_ELEMENTWISEUNARY_H__
-#define __ONERT_IR_OPERATION_ELEMENTWISEUNARY_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ElementwiseUnary : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- enum class Type
- {
- ABS,
- CAST,
- COS,
- DEQUANTIZE,
- ERF,
- EXP,
- FLOOR,
- LOG,
- LOGICAL_NOT,
- NEG,
- QUANTIZE,
- ROUND,
- RSQRT,
- SIN,
- SQRT,
- SQURE,
- ZEROS_LIKE
- };
-
- struct Param
- {
- Type op_type;
- };
-
-public:
- ElementwiseUnary(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::ElementwiseUnary; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_ELEMENTWISEUNARY_H__
diff --git a/runtime/onert/core/include/ir/operation/EmbeddingLookup.h b/runtime/onert/core/include/ir/operation/EmbeddingLookup.h
deleted file mode 100644
index 54064faf0..000000000
--- a/runtime/onert/core/include/ir/operation/EmbeddingLookup.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_EMBEDDING_LOOKUP_H__
-#define __ONERT_IR_OPERATION_EMBEDDING_LOOKUP_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class EmbeddingLookup : public Operation
-{
-public:
- enum Input
- {
- LOOKUPS = 0,
- VALUES = 1
- };
-
-public:
- EmbeddingLookup(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::EmbeddingLookup; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_EMBEDDING_LOOKUP_H__
diff --git a/runtime/onert/core/include/ir/operation/ExpandDims.h b/runtime/onert/core/include/ir/operation/ExpandDims.h
deleted file mode 100644
index 09669a40b..000000000
--- a/runtime/onert/core/include/ir/operation/ExpandDims.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_EXPANDDIMS_H__
-#define __ONERT_IR_OPERATION_EXPANDDIMS_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ExpandDims : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- AXIS = 1
- };
-
-public:
- ExpandDims(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ExpandDims; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_EXPANDDIMS_H__
diff --git a/runtime/onert/core/include/ir/operation/Fill.h b/runtime/onert/core/include/ir/operation/Fill.h
deleted file mode 100644
index 524e41385..000000000
--- a/runtime/onert/core/include/ir/operation/Fill.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_FILL_H__
-#define __ONERT_IR_OPERATION_FILL_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Fill : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- VALUE,
- };
-
-public:
- Fill(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Fill; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_FILL_H__
diff --git a/runtime/onert/core/include/ir/operation/FullyConnected.h b/runtime/onert/core/include/ir/operation/FullyConnected.h
deleted file mode 100644
index b6484ae4d..000000000
--- a/runtime/onert/core/include/ir/operation/FullyConnected.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_FULLYCONNECTED_H__
-#define __ONERT_IR_OPERATION_FULLYCONNECTED_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class FullyConnected : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- WEIGHT,
- BIAS
- };
-
- struct Param
- {
- Activation activation;
- };
-
-public:
- FullyConnected(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::FullyConnected; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_FULLYCONNECTED_H__
diff --git a/runtime/onert/core/include/ir/operation/FusedBatchNorm.h b/runtime/onert/core/include/ir/operation/FusedBatchNorm.h
deleted file mode 100644
index 989ee2b98..000000000
--- a/runtime/onert/core/include/ir/operation/FusedBatchNorm.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_FUSEDBATCHNORM_H__
-#define __ONERT_IR_OPERATION_FUSEDBATCHNORM_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class FusedBatchNorm : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SCALE,
- OFFSET,
- MEAN,
- VARIANCE
- };
-
- struct Param
- {
- bool is_training;
- std::string data_format;
- float epsilon;
- };
-
-public:
- FusedBatchNorm(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::FusedBatchNorm; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_FUSEDBATCHNORM_H__
diff --git a/runtime/onert/core/include/ir/operation/Gather.h b/runtime/onert/core/include/ir/operation/Gather.h
deleted file mode 100644
index 544eb3b19..000000000
--- a/runtime/onert/core/include/ir/operation/Gather.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_GATHER_H__
-#define __ONERT_IR_OPERATION_GATHER_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Gather : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- INDICES,
- };
-
- struct Param
- {
- int32_t axis;
- };
-
-public:
- Gather(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Gather; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_GATHER_H__
diff --git a/runtime/onert/core/include/ir/operation/HashtableLookup.h b/runtime/onert/core/include/ir/operation/HashtableLookup.h
deleted file mode 100644
index 4b6cf9362..000000000
--- a/runtime/onert/core/include/ir/operation/HashtableLookup.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_HASHTABLE_LOOKUP_H__
-#define __ONERT_IR_OPERATION_HASHTABLE_LOOKUP_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class HashtableLookup : public Operation
-{
-public:
- enum Input
- {
- LOOKUPS = 0,
- KEYS = 1,
- VALUES = 2
- };
-
- enum Output
- {
- OUTPUT = 0,
- HITS = 1
- };
-
-public:
- HashtableLookup(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::HashtableLookup; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_HASHTABLE_LOOKUP_H__
diff --git a/runtime/onert/core/include/ir/operation/If.h b/runtime/onert/core/include/ir/operation/If.h
deleted file mode 100644
index 41cd4e239..000000000
--- a/runtime/onert/core/include/ir/operation/If.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_IF_H__
-#define __ONERT_IR_OPERATION_IF_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class If : public Operation
-{
-public:
- struct Param
- {
- SubgraphIndex then_subg_index;
- SubgraphIndex else_subg_index;
- };
-
-public:
- If(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::If; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_IF_H__
diff --git a/runtime/onert/core/include/ir/operation/InstanceNorm.h b/runtime/onert/core/include/ir/operation/InstanceNorm.h
deleted file mode 100644
index 6a3bb5189..000000000
--- a/runtime/onert/core/include/ir/operation/InstanceNorm.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_INSTANCE_NORM_H__
-#define __ONERT_IR_OPERATION_INSTANCE_NORM_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class InstanceNorm : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- GAMMA,
- BETA
- };
-
- struct Param
- {
- Activation activation;
- float epsilon;
- };
-
-public:
- InstanceNorm(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::InstanceNorm; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_INSTANCE_NORM_H__
diff --git a/runtime/onert/core/include/ir/operation/L2Normalization.h b/runtime/onert/core/include/ir/operation/L2Normalization.h
deleted file mode 100644
index abbd68c97..000000000
--- a/runtime/onert/core/include/ir/operation/L2Normalization.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_L2_NORMALIZATION_H__
-#define __ONERT_IR_OPERATION_L2_NORMALIZATION_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class L2Normalization : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
-public:
- L2Normalization(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::L2Normalization; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_L2_NORMALIZATION_H__
diff --git a/runtime/onert/core/include/ir/operation/LSTM.h b/runtime/onert/core/include/ir/operation/LSTM.h
deleted file mode 100644
index 027bc6b42..000000000
--- a/runtime/onert/core/include/ir/operation/LSTM.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_LSTM_H__
-#define __ONERT_IR_OPERATION_LSTM_H__
-
-#include "ir/InternalType.h"
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-// This operation supports only unidirectional sequence lstm
-class LSTM : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- INPUT_TO_INPUT_WEIGHTS = 1,
- INPUT_TO_FORGET_WEIGHTS = 2,
- INPUT_TO_CELL_WEIGHTS = 3,
- INPUT_TO_OUTPUT_WEIGHTS = 4,
- RECURRENT_TO_INPUT_WEIGHTS = 5,
- RECURRENT_TO_FORGET_WEIGHTS = 6,
- RECURRENT_TO_CELL_WEIGHTS = 7,
- RECURRENT_TO_OUTPUT_WEIGHTS = 8,
- CELL_TO_INPUT_WEIGHTS = 9,
- CELL_TO_FORGET_WEIGHTS = 10,
- CELL_TO_OUTPUT_WEIGHTS = 11,
- INPUT_GATE_BIAS = 12,
- FORGET_GATE_BIAS = 13,
- CELL_BIAS = 14,
- OUTPUT_GATE_BIAS = 15,
- PROJECTION_WEIGHTS = 16,
- PROJECTION_BIAS = 17,
- OUTPUT_STATE_IN = 18,
- CELL_STATE_IN = 19,
- INPUT_LAYER_NORMALIZATION_WEIGHTS = 20,
- FORGET_LAYER_NORMALIZATION_WEIGHTS = 21,
- CELL_LAYER_NORMALIZATION_WEIGHTS = 22,
- OUTPUT_LAYER_NORMALIZATION_WEIGHTS = 23,
- };
-
- enum Output
- {
- SCRATCH_BUFFER = 0,
- OUTPUT_STATE_OUT = 1,
- CELL_STATE_OUT = 2,
- OUTPUT = 3
- };
-
- struct Param
- {
- Activation activation;
- float cell_threshold;
- float projection_threshold;
- bool time_major;
- };
-
-public:
- LSTM(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::LSTM; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_LSTM_H__
diff --git a/runtime/onert/core/include/ir/operation/LocalResponseNormalization.h b/runtime/onert/core/include/ir/operation/LocalResponseNormalization.h
deleted file mode 100644
index 2946cfbad..000000000
--- a/runtime/onert/core/include/ir/operation/LocalResponseNormalization.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_LOCAL_RESPONSE_NORMALIZATION_H__
-#define __ONERT_IR_OPERATION_LOCAL_RESPONSE_NORMALIZATION_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class LocalResponseNormalization : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- int radius;
- float bias;
- float alpha;
- float beta;
- };
-
-public:
- LocalResponseNormalization(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::LocalResponseNormalization; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_LOCAL_RESPONSE_NORMALIZATION_H__
diff --git a/runtime/onert/core/include/ir/operation/LogSoftmax.h b/runtime/onert/core/include/ir/operation/LogSoftmax.h
deleted file mode 100644
index 391b4ba4a..000000000
--- a/runtime/onert/core/include/ir/operation/LogSoftmax.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_LOGSOFTMAX_H__
-#define __ONERT_IR_OPERATION_LOGSOFTMAX_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class LogSoftmax : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- float beta;
- int axis;
- };
-
-public:
- LogSoftmax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::LogSoftmax; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_LOGSOFTMAX_H__
diff --git a/runtime/onert/core/include/ir/operation/LowerInfo.h b/runtime/onert/core/include/ir/operation/LowerInfo.h
deleted file mode 100644
index 7ef53b8c7..000000000
--- a/runtime/onert/core/include/ir/operation/LowerInfo.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_LOWER_INFO_H__
-#define __ONERT_IR_OPERATION_LOWER_INFO_H__
-
-#include <string>
-
-#include <ir/operand/PermuteFactor.h>
-
-namespace onert
-{
-namespace backend
-{
-class Backend;
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class LowerInfo
-{
-public:
- LowerInfo(const backend::Backend *backend, Layout layout);
- const backend::Backend *backend() const { return _permute_factor.backend(); }
- Layout layout() const { return _permute_factor.layout(); }
-
-private:
- operand::PermuteFactor _permute_factor;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_LOWER_INFO_H__
diff --git a/runtime/onert/core/include/ir/operation/MatrixBandPart.h b/runtime/onert/core/include/ir/operation/MatrixBandPart.h
deleted file mode 100644
index 291826635..000000000
--- a/runtime/onert/core/include/ir/operation/MatrixBandPart.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_MATRIX_BAND_PART_H__
-#define __ONERT_IR_OPERATION_MATRIX_BAND_PART_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class MatrixBandPart : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- NUM_LOWER_DIAG,
- NUM_UPPER_DIAG,
- };
-
-public:
- MatrixBandPart(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::MatrixBandPart; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_MATRIX_BAND_PART_H__
diff --git a/runtime/onert/core/include/ir/operation/OneHot.h b/runtime/onert/core/include/ir/operation/OneHot.h
deleted file mode 100644
index 6264cd15e..000000000
--- a/runtime/onert/core/include/ir/operation/OneHot.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_ONEHOT_H__
-#define __ONERT_IR_OPERATION_ONEHOT_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class OneHot : public Operation
-{
-public:
- enum Input
- {
- INDICES = 0,
- DEPTH = 1,
- ON_VALUE = 2,
- OFF_VALUE = 3,
- };
-
- struct Param
- {
- int axis;
- };
-
-public:
- OneHot(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::OneHot; }
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_ONEHOT_H__
diff --git a/runtime/onert/core/include/ir/operation/PReLU.h b/runtime/onert/core/include/ir/operation/PReLU.h
deleted file mode 100644
index 2981ffc6a..000000000
--- a/runtime/onert/core/include/ir/operation/PReLU.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_PRELU_H__
-#define __ONERT_IR_OPERATION_PRELU_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class PReLU : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- ALPHA = 1
- };
-
-public:
- PReLU(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::PReLU; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_PRELU_H__
diff --git a/runtime/onert/core/include/ir/operation/Pack.h b/runtime/onert/core/include/ir/operation/Pack.h
deleted file mode 100644
index cf07541e0..000000000
--- a/runtime/onert/core/include/ir/operation/Pack.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_PACK_H__
-#define __ONERT_IR_OPERATION_PACK_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-class Pack : public Operation
-{
-public:
- struct Param
- {
- int32_t num;
- int32_t axis;
- };
-
-public:
- Pack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Pack; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-} // namespace operation
-} // namespace ir
-} // namespace onert
-#endif // __ONERT_IR_OPERATION_PACK_H__
diff --git a/runtime/onert/core/include/ir/operation/Pad.h b/runtime/onert/core/include/ir/operation/Pad.h
deleted file mode 100644
index 00481cd50..000000000
--- a/runtime/onert/core/include/ir/operation/Pad.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_PAD_H__
-#define __ONERT_IR_OPERATION_PAD_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Pad : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- PAD = 1,
- VALUE = 2
- };
-
-public:
- Pad(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Pad; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_PAD_H__
diff --git a/runtime/onert/core/include/ir/operation/Permute.h b/runtime/onert/core/include/ir/operation/Permute.h
deleted file mode 100644
index 10f09b9a0..000000000
--- a/runtime/onert/core/include/ir/operation/Permute.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_PERMUTE_H__
-#define __ONERT_IR_OPERATION_PERMUTE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace backend
-{
-class BackendContext;
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Permute : public Operation
-{
-public:
- enum class Type
- {
- NHWC_TO_NCHW,
- NCHW_TO_NHWC,
- COPY
- };
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Permute; }
-
-public:
- Permute(const OperandIndex &input, const OperandIndex &output, Type type);
-
-public:
- Type getPermuteType() const { return _type; }
-
-private:
- Type _type;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_PERMUTE_H__
diff --git a/runtime/onert/core/include/ir/operation/Pool2D.h b/runtime/onert/core/include/ir/operation/Pool2D.h
deleted file mode 100644
index 22425b4c2..000000000
--- a/runtime/onert/core/include/ir/operation/Pool2D.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_POOL2D_H__
-#define __ONERT_IR_OPERATION_POOL2D_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-#include "ir/Padding.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Pool2D : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- enum class PoolType
- {
- AVG,
- L2,
- MAX,
- };
-
- struct Param
- {
- PoolType op_type;
- uint32_t kh;
- uint32_t kw;
- Stride stride;
- Padding padding;
- Activation activation;
- };
-
-public:
- Pool2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::Pool2D; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_POOL2D_H__
diff --git a/runtime/onert/core/include/ir/operation/Pow.h b/runtime/onert/core/include/ir/operation/Pow.h
deleted file mode 100644
index ca28ddfe7..000000000
--- a/runtime/onert/core/include/ir/operation/Pow.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_POW_H__
-#define __ONERT_IR_OPERATION_POW_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Pow : public Operation
-{
-public:
- enum Input
- {
- LHS = 0,
- RHS
- };
-
-public:
- Pow(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Pow; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_POW_H__
diff --git a/runtime/onert/core/include/ir/operation/RNN.h b/runtime/onert/core/include/ir/operation/RNN.h
deleted file mode 100644
index 087075da2..000000000
--- a/runtime/onert/core/include/ir/operation/RNN.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_RNN_H__
-#define __ONERT_IR_OPERATION_RNN_H__
-
-#include "ir/InternalType.h"
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class RNN : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- WEIGHTS = 1,
- RECURRENT_WEIGHTS = 2,
- BIAS = 3,
- HIDDEN_STATE_IN = 4
- };
-
- enum Output
- {
- OUTPUT = 0,
- HIDDEN_STATE_OUT = 1
- };
-
- struct Param
- {
- Activation activation;
- };
-
-public:
- RNN(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::RNN; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RNN_H__
diff --git a/runtime/onert/core/include/ir/operation/Range.h b/runtime/onert/core/include/ir/operation/Range.h
deleted file mode 100644
index 81e170be9..000000000
--- a/runtime/onert/core/include/ir/operation/Range.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_RANGE_H__
-#define __ONERT_IR_OPERATION_RANGE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Range : public Operation
-{
-public:
- enum Input
- {
- START = 0,
- LIMIT = 1,
- DELTA = 2
- };
-
-public:
- Range(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Range; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RANGE_H__
diff --git a/runtime/onert/core/include/ir/operation/Rank.h b/runtime/onert/core/include/ir/operation/Rank.h
deleted file mode 100644
index 2fd24ce23..000000000
--- a/runtime/onert/core/include/ir/operation/Rank.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_RANK_H__
-#define __ONERT_IR_OPERATION_RANK_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Rank : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
-public:
- Rank(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Rank; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RANK_H__
diff --git a/runtime/onert/core/include/ir/operation/Reduce.h b/runtime/onert/core/include/ir/operation/Reduce.h
deleted file mode 100644
index 26bcf5ec9..000000000
--- a/runtime/onert/core/include/ir/operation/Reduce.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_REDUCE_H__
-#define __ONERT_IR_OPERATION_REDUCE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Reduce : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- AXES = 1
- };
-
- enum class ReduceType
- {
- ALL,
- ANY,
- MAX,
- MEAN,
- MIN,
- PROD,
- SUM
- };
-
- struct Param
- {
- ReduceType reduce_type;
- bool keep_dims;
- };
-
-public:
- Reduce(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- std::string name() const override;
- OpCode opcode() const final { return OpCode::Reduce; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_REDUCE_ALL_H__
diff --git a/runtime/onert/core/include/ir/operation/Reshape.h b/runtime/onert/core/include/ir/operation/Reshape.h
deleted file mode 100644
index c2c0e8c99..000000000
--- a/runtime/onert/core/include/ir/operation/Reshape.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_RESHAPE_H__
-#define __ONERT_IR_OPERATION_RESHAPE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Reshape : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SHAPE = 1
- };
-
- struct Param
- {
- std::vector<int32_t> new_shape;
- };
-
-public:
- Reshape(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Reshape; }
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RESHAPE_H__
diff --git a/runtime/onert/core/include/ir/operation/ResizeBilinear.h b/runtime/onert/core/include/ir/operation/ResizeBilinear.h
deleted file mode 100644
index ab330c826..000000000
--- a/runtime/onert/core/include/ir/operation/ResizeBilinear.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_RESIZE_BILINEAR_H__
-#define __ONERT_IR_OPERATION_RESIZE_BILINEAR_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ResizeBilinear : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SIZE = 1,
- };
-
- struct Param
- {
- // If the input SIZE exists in inputs, height_out and width_out are not set. Ignore these params
- int32_t height_out;
- int32_t width_out;
- bool align_corners;
- bool half_pixel_centers;
- };
-
-public:
- ResizeBilinear(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ResizeBilinear; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RESIZE_BILINEAR_H__
diff --git a/runtime/onert/core/include/ir/operation/ResizeNearestNeighbor.h b/runtime/onert/core/include/ir/operation/ResizeNearestNeighbor.h
deleted file mode 100644
index 10827803e..000000000
--- a/runtime/onert/core/include/ir/operation/ResizeNearestNeighbor.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_RESIZE_NEAREST_NEIGHBOR_H__
-#define __ONERT_IR_OPERATION_RESIZE_NEAREST_NEIGHBOR_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class ResizeNearestNeighbor : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SIZE = 1,
- };
-
- struct Param
- {
- // If the input SIZE exists in inputs, Be height_out and width_out not set. Ignore these params
- int32_t height_out;
- int32_t width_out;
- bool align_corners;
- };
-
-public:
- ResizeNearestNeighbor(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::ResizeNearestNeighbor; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_RESIZE_NEAREST_NEIGHBOR_H__
diff --git a/runtime/onert/core/include/ir/operation/Reverse.h b/runtime/onert/core/include/ir/operation/Reverse.h
deleted file mode 100644
index 3d7f3fc89..000000000
--- a/runtime/onert/core/include/ir/operation/Reverse.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_REVERSE_H__
-#define __ONERT_IR_OPERATION_REVERSE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Reverse : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- AXIS = 1
- };
-
-public:
- Reverse(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Reverse; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_REVERSE_H__
diff --git a/runtime/onert/core/include/ir/operation/Select.h b/runtime/onert/core/include/ir/operation/Select.h
deleted file mode 100644
index 33bf67886..000000000
--- a/runtime/onert/core/include/ir/operation/Select.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SELECT_H__
-#define __ONERT_IR_OPERATION_SELECT_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Select : public Operation
-{
-public:
- enum Input
- {
- CONDITION = 0,
- INPUT_TRUE = 1,
- INPUT_FALSE = 2
- };
-
-public:
- Select(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Select; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SELECT_H__
diff --git a/runtime/onert/core/include/ir/operation/Shape.h b/runtime/onert/core/include/ir/operation/Shape.h
deleted file mode 100644
index 4dea7e424..000000000
--- a/runtime/onert/core/include/ir/operation/Shape.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SHAPE_H__
-#define __ONERT_IR_OPERATION_SHAPE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Shape : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
-public:
- Shape(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Shape; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SHAPE_H__
diff --git a/runtime/onert/core/include/ir/operation/Slice.h b/runtime/onert/core/include/ir/operation/Slice.h
deleted file mode 100644
index c86a9893a..000000000
--- a/runtime/onert/core/include/ir/operation/Slice.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SLICE_H__
-#define __ONERT_IR_OPERATION_SLICE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Slice : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- BEGINS = 1,
- SIZES = 2,
- };
-
-public:
- Slice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Slice; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SLICE_H__
diff --git a/runtime/onert/core/include/ir/operation/Softmax.h b/runtime/onert/core/include/ir/operation/Softmax.h
deleted file mode 100644
index db7ae910e..000000000
--- a/runtime/onert/core/include/ir/operation/Softmax.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SOFTMAX_H__
-#define __ONERT_IR_OPERATION_SOFTMAX_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Softmax : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- float beta;
- };
-
-public:
- Softmax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Softmax; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SOFTMAX_H__
diff --git a/runtime/onert/core/include/ir/operation/SpaceToBatchND.h b/runtime/onert/core/include/ir/operation/SpaceToBatchND.h
deleted file mode 100644
index 99928ff24..000000000
--- a/runtime/onert/core/include/ir/operation/SpaceToBatchND.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SPACE_TO_BATCH_ND_H__
-#define __ONERT_IR_OPERATION_SPACE_TO_BATCH_ND_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class SpaceToBatchND : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- BLOCK_SIZE = 1,
- PADDINGS = 2
- };
-
-public:
- SpaceToBatchND(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::SpaceToBatchND; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SPACE_TO_BATCH_ND_H__
diff --git a/runtime/onert/core/include/ir/operation/SpaceToDepth.h b/runtime/onert/core/include/ir/operation/SpaceToDepth.h
deleted file mode 100644
index 6c8b09130..000000000
--- a/runtime/onert/core/include/ir/operation/SpaceToDepth.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SPACE_TO_DEPTH_H__
-#define __ONERT_IR_OPERATION_SPACE_TO_DEPTH_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class SpaceToDepth : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- std::int32_t block_size;
- };
-
-public:
- SpaceToDepth(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::SpaceToDepth; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SPACE_TO_DEPTH_H__
diff --git a/runtime/onert/core/include/ir/operation/Split.h b/runtime/onert/core/include/ir/operation/Split.h
deleted file mode 100644
index c415941a4..000000000
--- a/runtime/onert/core/include/ir/operation/Split.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_SPLIT_H__
-#define __ONERT_IR_OPERATION_SPLIT_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-class Split : public Operation
-{
-public:
- enum Input
- {
- AXIS = 0,
- INPUT = 1,
- };
-
- struct Param
- {
- int num_splits;
- };
-
-public:
- Split(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Split; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-} // namespace operation
-} // namespace ir
-} // namespace onert
-#endif // __ONERT_IR_OPERATION_SPLIT_H__
diff --git a/runtime/onert/core/include/ir/operation/SplitV.h b/runtime/onert/core/include/ir/operation/SplitV.h
deleted file mode 100644
index 99a06ee7f..000000000
--- a/runtime/onert/core/include/ir/operation/SplitV.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_SPLIT_V_H__
-#define __ONERT_IR_OPERATION_SPLIT_V_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-class SplitV : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- SIZE_SPLITS = 1,
- SPLIT_DIM = 2
- };
-
- struct Param
- {
- int num_splits;
- };
-
-public:
- SplitV(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::SplitV; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-} // namespace operation
-} // namespace ir
-} // namespace onert
-#endif // __ONERT_IR_OPERATION_SPLIT_V_H__
diff --git a/runtime/onert/core/include/ir/operation/SquaredDifference.h b/runtime/onert/core/include/ir/operation/SquaredDifference.h
deleted file mode 100644
index 392b11448..000000000
--- a/runtime/onert/core/include/ir/operation/SquaredDifference.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SQUARED_DIFFERENCE_H__
-#define __ONERT_IR_OPERATION_SQUARED_DIFFERENCE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class SquaredDifference : public Operation
-{
-public:
- enum Input
- {
- LHS = 0,
- RHS
- };
-
-public:
- SquaredDifference(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::SquaredDifference; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SQUARED_DIFFERENCE_H__
diff --git a/runtime/onert/core/include/ir/operation/Squeeze.h b/runtime/onert/core/include/ir/operation/Squeeze.h
deleted file mode 100644
index c370472b7..000000000
--- a/runtime/onert/core/include/ir/operation/Squeeze.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_SQUEEZE_H__
-#define __ONERT_IR_OPERATION_SQUEEZE_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Squeeze : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- // Please see tensorflow/lite/c/builtin_op_data.h and squeeze.cc.
- // tensorflow lite supports only for ndim <= 8.
- int dims[8];
- int ndim;
- };
-
-public:
- Squeeze(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Squeeze; }
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_SQUEEZE_H__
diff --git a/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h b/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h
deleted file mode 100644
index 112a748fd..000000000
--- a/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
-#define __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class StatelessRandomUniform : public Operation
-{
-public:
- enum Input
- {
- SHAPE = 0,
- SEED = 1
- };
-
-public:
- StatelessRandomUniform(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::StatelessRandomUniform; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
diff --git a/runtime/onert/core/include/ir/operation/StridedSlice.h b/runtime/onert/core/include/ir/operation/StridedSlice.h
deleted file mode 100644
index 4a5e06410..000000000
--- a/runtime/onert/core/include/ir/operation/StridedSlice.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_STRIDED_SLICE_H__
-#define __ONERT_IR_OPERATION_STRIDED_SLICE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class StridedSlice : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- STARTS = 1,
- ENDS = 2,
- STRIDES = 3
- };
-
- struct Param
- {
- int32_t begin_mask;
- int32_t end_mask;
- int32_t shrink_axis_mask;
- };
-
-public:
- StridedSlice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::StridedSlice; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_STRIDED_SLICE_H__
diff --git a/runtime/onert/core/include/ir/operation/Tile.h b/runtime/onert/core/include/ir/operation/Tile.h
deleted file mode 100644
index 388c452c8..000000000
--- a/runtime/onert/core/include/ir/operation/Tile.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_TILE_H__
-#define __ONERT_IR_OPERATION_TILE_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Tile : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0,
- MULTIPLES,
- };
-
-public:
- Tile(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Tile; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_TILE_H__
diff --git a/runtime/onert/core/include/ir/operation/TopKV2.h b/runtime/onert/core/include/ir/operation/TopKV2.h
deleted file mode 100644
index 179a599ca..000000000
--- a/runtime/onert/core/include/ir/operation/TopKV2.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_TOPK_V2_H__
-#define __ONERT_IR_OPERATION_TOPK_V2_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class TopKV2 : public Operation
-{
-public:
- enum Input
- {
- INPUT
- };
-
- enum Output
- {
- OUTPUT_VALUES = 0,
- OUTPUT_INDICES,
- };
-
- struct Param
- {
- std::int32_t k;
- };
-
-public:
- TopKV2(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::TopKV2; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_TOPK_V2_H__
diff --git a/runtime/onert/core/include/ir/operation/Transpose.h b/runtime/onert/core/include/ir/operation/Transpose.h
deleted file mode 100644
index 665c9bbce..000000000
--- a/runtime/onert/core/include/ir/operation/Transpose.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_TRANSPOSE_H__
-#define __ONERT_IR_OPERATION_TRANSPOSE_H__
-
-#include "ir/Operation.h"
-
-#include <utility>
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class Transpose : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0, // for an n-D tensor, specifying the tensor to be transposed.
- PERMUTATION = 1,
- };
-
-public:
- Transpose(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Transpose; }
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_TRANSPOSE_H__
diff --git a/runtime/onert/core/include/ir/operation/TransposeConv.h b/runtime/onert/core/include/ir/operation/TransposeConv.h
deleted file mode 100644
index 05137ccf8..000000000
--- a/runtime/onert/core/include/ir/operation/TransposeConv.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_TRANSPOSE_CONV_H__
-#define __ONERT_IR_OPERATION_TRANSPOSE_CONV_H__
-
-#include <memory>
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-#include "ir/Padding.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class TransposeConv : public Operation
-{
-public:
- enum Input
- {
- OUTPUT_SHAPE = 0,
- KERNEL,
- INPUT
- };
-
- struct Param
- {
- Padding padding;
- Stride stride;
- };
-
-public:
- TransposeConv(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::TransposeConv; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_TRANSPOSE_CONV_H__
diff --git a/runtime/onert/core/include/ir/operation/Unpack.h b/runtime/onert/core/include/ir/operation/Unpack.h
deleted file mode 100644
index 092583a97..000000000
--- a/runtime/onert/core/include/ir/operation/Unpack.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __ONERT_IR_OPERATION_UNPACK_H__
-#define __ONERT_IR_OPERATION_UNPACK_H__
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-class Unpack : public Operation
-{
-public:
- enum Input
- {
- INPUT = 0
- };
-
- struct Param
- {
- int32_t num;
- int32_t axis;
- };
-
-public:
- Unpack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Unpack; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-} // namespace operation
-} // namespace ir
-} // namespace onert
-#endif // __ONERT_IR_OPERATION_UNPACK_H__
diff --git a/runtime/onert/core/include/ir/operation/While.h b/runtime/onert/core/include/ir/operation/While.h
deleted file mode 100644
index cf310d596..000000000
--- a/runtime/onert/core/include/ir/operation/While.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_WHILE_H__
-#define __ONERT_IR_OPERATION_WHILE_H__
-
-#include "ir/Operation.h"
-#include "ir/InternalType.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-class While : public Operation
-{
-public:
- struct Param
- {
- SubgraphIndex cond_subg_index;
- SubgraphIndex body_subg_index;
- };
-
-public:
- While(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param);
-
-public:
- void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::While; }
-
-public:
- const Param &param() const { return _param; }
-
-private:
- Param _param;
-};
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_WHILE_H__
diff --git a/runtime/onert/core/include/util/Config.lst b/runtime/onert/core/include/util/Config.lst
deleted file mode 100644
index 30f211011..000000000
--- a/runtime/onert/core/include/util/Config.lst
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CONFIG
-#error Define CONFIG before including this file
-#endif
-
-// Name | Type | Default
-CONFIG(GRAPH_DOT_DUMP , int , "0")
-CONFIG(BACKENDS , std::string , "cpu;acl_cl;acl_neon;bcq") // FIXME Remove bcq
-CONFIG(OP_BACKEND_ALLOPS , std::string , "")
-CONFIG(OP_BACKEND_MAP , std::string , "")
-CONFIG(DISABLE_COMPILE , bool , "0")
-CONFIG(ONERT_LOG_ENABLE , bool , "0")
-CONFIG(CPU_MEMORY_PLANNER , std::string , "WIC")
-CONFIG(EXECUTOR , std::string , "Linear")
-CONFIG(ACL_LAYOUT , std::string , "none")
-CONFIG(NCNN_LAYOUT , std::string , "NCHW")
-CONFIG(PROFILING_MODE , bool , "0")
-CONFIG(USE_SCHEDULER , bool , "0")
-CONFIG(OP_SEQ_MAX_NODE , int , "0")
-CONFIG(TRACE_FILEPATH , std::string , "")
-CONFIG(FP16_ENABLE , bool , "0")
-CONFIG(RUY_THREADS , int , "-1")
-CONFIG(USE_MMAPED_DATA , bool , "0")
-
-// Auto-generate all operations
-
-#define OP(InternalName) \
- CONFIG(OP_BACKEND_ ## InternalName, std::string, "")
-#include "ir/Operations.lst"
-#undef OP
-
diff --git a/runtime/onert/core/include/util/ConfigSource.h b/runtime/onert/core/include/util/ConfigSource.h
deleted file mode 100644
index b6a8144fd..000000000
--- a/runtime/onert/core/include/util/ConfigSource.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_CONFIG_SOURCE_H__
-#define __ONERT_UTIL_CONFIG_SOURCE_H__
-
-#include <memory>
-
-#include "IConfigSource.h"
-
-namespace onert
-{
-namespace util
-{
-
-void config_source(std::unique_ptr<IConfigSource> &&source);
-
-bool toBool(const std::string &val);
-int toInt(const std::string &val);
-
-bool getConfigBool(const std::string &key);
-int getConfigInt(const std::string &key);
-std::string getConfigString(const std::string &key);
-
-} // namespace util
-} // namespace onert
-
-namespace onert
-{
-namespace util
-{
-namespace config
-{
-
-#define CONFIG(Name, Type, Default) extern const char *Name;
-
-#include "Config.lst"
-
-#undef CONFIG
-
-} // namespace config
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_CONFIG_SOURCE_H__
diff --git a/runtime/onert/core/include/util/EnvConfigSource.h b/runtime/onert/core/include/util/EnvConfigSource.h
deleted file mode 100644
index 8c5d0e8e9..000000000
--- a/runtime/onert/core/include/util/EnvConfigSource.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_ENV_CONFIG_SOURCE_H__
-#define __ONERT_UTIL_ENV_CONFIG_SOURCE_H__
-
-#include <unordered_map>
-
-#include "util/GeneralConfigSource.h"
-
-namespace onert
-{
-namespace util
-{
-
-class EnvConfigSource final : public GeneralConfigSource
-{
-public:
- std::string get(const std::string &key) const override;
-
-private:
- std::unordered_map<std::string, std::string> _default_attributes;
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_ENV_CONFIG_SOURCE_H__
diff --git a/runtime/onert/core/include/util/Exceptions.h b/runtime/onert/core/include/util/Exceptions.h
deleted file mode 100644
index fc3fa0f64..000000000
--- a/runtime/onert/core/include/util/Exceptions.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_ONERTEXCEPTION_H__
-#define __ONERT_UTIL_ONERTEXCEPTION_H__
-
-#include <string>
-
-namespace onert
-{
-
-class OnertException : public std::exception
-{
-public:
- OnertException(const std::string &msg) : _msg{msg} {}
- OnertException(const std::string &tag, const std::string &msg) : _msg{tag + " : " + msg} {}
-
- const char *what() const noexcept override { return _msg.c_str(); }
-
-private:
- std::string _msg;
-};
-
-class InsufficientBufferSizeException : public OnertException
-{
-public:
- InsufficientBufferSizeException(const std::string &msg)
- : OnertException{"InsufficientBufferSize", msg}
- {
- }
-};
-
-} // namespace onert
-
-#endif // __ONERT_UTIL_ONERTEXCEPTION_H__
diff --git a/runtime/onert/core/include/util/GeneralConfigSource.h b/runtime/onert/core/include/util/GeneralConfigSource.h
deleted file mode 100644
index dedc820ec..000000000
--- a/runtime/onert/core/include/util/GeneralConfigSource.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_GLOBAL_CONFIG_SOURCE_H__
-#define __ONERT_UTIL_GLOBAL_CONFIG_SOURCE_H__
-
-#include <unordered_map>
-
-#include "util/IConfigSource.h"
-
-namespace onert
-{
-namespace util
-{
-
-class GeneralConfigSource : public IConfigSource
-{
-public:
- GeneralConfigSource() = default;
-
- std::string get(const std::string &key) const override;
- void set(const std::string &key, const std::string &val);
-
-private:
- std::unordered_map<std::string, std::string> _map;
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_GLOBAL_CONFIG_SOURCE_H__
diff --git a/runtime/onert/core/include/util/IConfigSource.h b/runtime/onert/core/include/util/IConfigSource.h
deleted file mode 100644
index 07b09848a..000000000
--- a/runtime/onert/core/include/util/IConfigSource.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_I_CONFIG_SOURCE_H__
-#define __ONERT_UTIL_I_CONFIG_SOURCE_H__
-
-#include <string>
-
-namespace onert
-{
-namespace util
-{
-
-struct IConfigSource
-{
- /**
- * @brief Destroy the IConfigSource object
- */
- virtual ~IConfigSource() = default;
-
- /**
- * @brief get the value for the matching key
- *
- * @param key string key to search
- * @return string value associated with the key
- */
- virtual std::string get(const std::string &key) const = 0;
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_I_CONFIG_SOURCE_H__
diff --git a/runtime/onert/core/include/util/ITimer.h b/runtime/onert/core/include/util/ITimer.h
deleted file mode 100644
index d5a4e1eb0..000000000
--- a/runtime/onert/core/include/util/ITimer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_ITIMER_H__
-#define __ONERT_UTIL_ITIMER_H__
-
-#include <chrono>
-
-namespace onert
-{
-namespace util
-{
-
-class ITimer
-{
-public:
- virtual void handleBegin() = 0;
- virtual void handleEnd() = 0;
- int getTime() { return _timer_res; };
-
- virtual ~ITimer() = default;
-
-protected:
- int _timer_res{0};
-};
-
-class CPUTimer : public ITimer
-{
-public:
- void handleBegin() override { _start_time = std::chrono::steady_clock::now(); };
-
- void handleEnd() override
- {
- const auto end_time = std::chrono::steady_clock::now();
- _timer_res =
- std::chrono::duration_cast<std::chrono::microseconds>(end_time - _start_time).count();
- };
-
-private:
- std::chrono::steady_clock::time_point _start_time; // in microseconds
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_ITIMER_H__
diff --git a/runtime/onert/core/include/util/Index.h b/runtime/onert/core/include/util/Index.h
deleted file mode 100644
index e8f59282d..000000000
--- a/runtime/onert/core/include/util/Index.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_INDEX_H__
-#define __ONERT_UTIL_INDEX_H__
-
-#include <functional>
-#include <limits>
-#include <stdint.h>
-#include <string>
-
-namespace onert
-{
-namespace util
-{
-
-/**
- * @brief A wrapper class for unsigned integral Index
- * NOTE : Max value of the underlying type is used as the invalid value
- *
- * @tparam T Underlying type. Must be unsigned integral type otherwise its behavior is undefined.
- * @tparam DummyTag Dummy type to distinguish types with a same underlying type. Using an opaque
- * type is recommended.
- */
-template <typename T, typename DummyTag> class Index
-{
-private:
- static const T UNDEFINED = std::numeric_limits<T>::max();
-
-public:
- /**
- * @brief Construct a new Index object
- */
- explicit Index(void) : _index{UNDEFINED} {}
- /**
- * @brief Construct a new Index object with a value in the underlying type
- *
- * @param o Value in the underlying type
- */
- explicit Index(const T o) : _index{o} {}
- /**
- * @brief Copy Constructor
- *
- * @param o Object to be copied
- */
- Index(const Index &o) = default;
-
- /**
- * @brief Assign a value in the underlying time
- *
- * @param o Value in the underlying type
- * @return Index& Reference of this pointer
- */
- Index &operator=(const T o)
- {
- _index = o;
- return *this;
- }
-
- /**
- * @brief Copy assignment operator
- *
- * @param o Object to be copied
- * @return Index& Reference of this pointer
- */
- Index &operator=(const Index &o) = default;
-
- /**
- * @brief Equality operator
- *
- * @param o The other value in the underlying type to compare
- * @return true if underlying value is the same, false otherwise
- */
- bool operator==(T o) const { return _index == o; }
- /**
- * @brief Equality operator
- *
- * @param o The other object to compare
- * @return true if underlying value is the same, false otherwise
- */
- bool operator==(const Index &o) const { return _index == o._index; }
- /**
- * @brief Inquality operator
- *
- * @param o The other value in the underlying type to compare
- * @return true if underlying value is different, false otherwise
- */
- bool operator!=(T o) const { return !(*this == o); }
- /**
- * @brief Inquality operator
- *
- * @param o The other object to compare
- * @return true if underlying value is different, false otherwise
- */
- bool operator!=(const Index &o) const { return !(*this == o); }
-
- /**
- * @brief Post increment operator
- *
- * @return Index Index before increment
- */
- Index operator++(int)
- {
- Index temp = *this;
- _index++;
- return temp;
- }
-
- /**
- * @brief Check whether the value is valid or not
- *
- * @return true if valid, false otherwise
- */
- bool valid() const { return _index != UNDEFINED; }
- /**
- * @brief Check whether the value is undefined
- *
- * @return true if undefined, false otherwise
- */
- bool undefined() const { return _index == UNDEFINED; }
- /**
- * @brief Return underlying value
- *
- * @return T Underlying value
- */
- T value() const { return _index; }
-
- friend std::ostream &operator<<(std::ostream &o, const Index &t)
- {
- if (t.undefined())
- return o << std::string("undefined");
- else
- return o << t.value();
- }
-
-private:
- T _index;
-};
-
-} // namespace util
-} // namespace onert
-
-namespace std
-{
-
-template <typename T, typename Tag> struct hash<::onert::util::Index<T, Tag>>
-{
- size_t operator()(const ::onert::util::Index<T, Tag> &index) const noexcept
- {
- return hash<T>()(index.value());
- }
-};
-
-} // namespace std
-
-#endif // __ONERT_UTIL_INDEX_H__
diff --git a/runtime/onert/core/include/util/ObjectManager.h b/runtime/onert/core/include/util/ObjectManager.h
deleted file mode 100644
index d2dd881a8..000000000
--- a/runtime/onert/core/include/util/ObjectManager.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_OBJECT_MANAGER_H__
-#define __ONERT_UTIL_OBJECT_MANAGER_H__
-
-#include <unordered_map>
-#include <memory>
-#include <list>
-#include <functional>
-
-#include <memory>
-
-namespace onert
-{
-namespace util
-{
-
-/**
- * @brief Class that owns objects and maps them with indices as a handle for them
- *
- */
-template <typename Index, typename Object> class ObjectManager
-{
-public:
- ObjectManager() : _index_count{0u} {}
-
-public:
- /**
- * @brief Create an object with args and put it in the container with a new Index for that
- *
- * @param[in] args Arguments for creating Operand object
- * @return Created index that is associated to the object
- */
- template <class... Args> Index emplace(Args &&... args)
- {
- auto index = generateIndex();
- _objects.emplace(index, std::make_unique<Object>(std::forward<Args>(args)...));
- return index;
- }
-
- /**
- * @brief Put object in the container with a new Index for that
- *
- * @param[in] object Object to be pushed
- * @return Created index that is associated to the object
- */
- Index push(std::unique_ptr<Object> &&object)
- {
- auto index = generateIndex();
- _objects.emplace(index, std::move(object));
- return index;
- }
-
- /**
- * @brief Remove the object that is associated with the given index
- *
- * @param[in] index Index of the object to be removed
- * @return N/A
- */
- void remove(const Index &index) { _objects.erase(index); }
-
- /**
- * @brief Get the object that is associated with the given index
- *
- * @param[in] index Index of the object to be returned
- * @return Object
- */
- const Object &at(const Index &index) const { return *(_objects.at(index)); }
- /**
- * @brief Get the object that is associated with the given index
- *
- * @param[in] index Index of the object to be returned
- * @return Object
- */
- Object &at(const Index &index) { return *(_objects.at(index)); }
- /**
- * @brief Get the object that is associated with the given index
- *
- * @param[in] index Index of the object to be returned
- * @return true if such entry exists otherwise false
- */
- bool exist(const Index &index) const
- {
- auto it = _objects.find(index);
- return it != _objects.end();
- }
- /**
- * @brief Iterate over the container with given function
- *
- * @param[in] fn Function to be run for every container entry
- * @return N/A
- */
- void iterate(const std::function<void(const Index &, const Object &)> &fn) const
- {
- for (const auto &e : _objects)
- {
- fn(e.first, *e.second);
- }
- }
- /**
- * @brief Iterate over the container with given function
- *
- * @param[in] fn Function to be run for every container entry
- * @return N/A
- */
- void iterate(const std::function<void(const Index &, Object &)> &fn)
- {
- // TODO Remove this workaround
- // This implementation is a workaround in case of adding operands while iteration
- std::list<Index> l;
-
- for (auto &e : _objects)
- {
- l.push_back(e.first);
- }
-
- for (auto index : l)
- {
- fn(index, *_objects[index]);
- }
- }
-
-private:
- Index generateIndex() { return Index{_index_count++}; }
-
-protected:
- std::unordered_map<Index, std::unique_ptr<Object>> _objects;
- uint32_t _index_count;
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_OBJECT_MANAGER_H__
diff --git a/runtime/onert/core/include/util/Set.h b/runtime/onert/core/include/util/Set.h
deleted file mode 100644
index ee4062d25..000000000
--- a/runtime/onert/core/include/util/Set.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Set.h
- * @brief This file contains onert::util::Set class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __ONERT_UTIL_SET_H__
-#define __ONERT_UTIL_SET_H__
-
-#include <cassert>
-#include <unordered_set>
-
-namespace onert
-{
-namespace util
-{
-
-/**
- * @brief Class for set of custom element
- & @tparam Element Key type of Set
- */
-template <typename Element> class Set
-{
-public:
- /**
- * @brief Construct default Set object.
- */
- Set() = default;
- /**
- * @brief Construct Set object by copy semantics.
- */
- Set(const Set<Element> &) = default;
- /**
- * @brief Construct move Set object by move semantics.
- */
- Set(Set<Element> &&) = default;
-
-public:
- /**
- * @brief Add a given element to the set
- *
- * @param e Element added
- */
- void add(const Element &e) { _set.insert(e); }
- /**
- * @brief remove a given element from the set
- *
- * @param e Element removed
- */
- void remove(const Element &e) { _set.erase(e); }
- /**
- * @brief Get size of the set
- *
- * @return The size of the set
- */
- uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
- /**
- * @brief Get whether the set is empty
- *
- * @return Whether the set is empty
- */
- bool empty() const { return _set.empty(); }
- /**
- * @brief Get whether a given element exists in the set
- *
- * @param e A given element
- *
- * @return Whether a given element exists in the set
- */
- bool contains(const Element &e) const { return _set.find(e) != _set.end(); }
- /**
- * @brief Get first element of the set
- *
- * @return first element of the set
- */
- const Element &getOnlyElement() const
- {
- assert(_set.size() == 1u);
- return *_set.begin();
- }
-
-public:
- /**
- * @brief operator overloading function for `|`
- *
- * @return A set with two sets combined
- */
- Set<Element> operator|(const Set<Element> &other) const // Union
- {
- auto ret = *this;
- for (auto e : other)
- {
- ret.add(e);
- }
- return ret;
- }
- /**
- * @brief operator overloading function for `&`
- *
- * @return A set of elements that overlap in two sets
- */
- Set<Element> operator&(const Set<Element> &other) const // Intersect
- {
- Set<Element> ret;
- for (auto e : other)
- {
- if (contains(e))
- {
- ret.add(e);
- }
- }
- return ret;
- }
- /**
- * @brief operator overloading function for `-`
- *
- * @return A set of subtracted from another set
- */
- Set<Element> operator-(const Set<Element> &other) const // Minus
- {
- auto ret = *this;
- for (auto e : other)
- {
- ret.remove(e);
- }
- return ret;
- }
-
-public:
- /**
- * @brief begin() of const_iterator for this class
- *
- * @return The first iterator of the set
- */
- typename std::unordered_set<Element>::const_iterator begin() const { return _set.begin(); }
- /**
- * @brief end() of const_iterator for this class
- *
- * @return The last iterator of the set
- */
- typename std::unordered_set<Element>::const_iterator end() const { return _set.end(); }
-
-private:
- std::unordered_set<Element> _set;
-};
-
-} // namespace util
-} // namespace onert
-
-#endif // __ONERT_UTIL_SET_H__
diff --git a/runtime/onert/core/include/util/ShapeInference.h b/runtime/onert/core/include/util/ShapeInference.h
deleted file mode 100644
index 701b835d2..000000000
--- a/runtime/onert/core/include/util/ShapeInference.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_GRAPH_SHAPE_INFERENCE_H__
-#define __ONERT_GRAPH_SHAPE_INFERENCE_H__
-
-#include "Utils.h"
-
-#include "ir/operation/Concat.h"
-#include "ir/operation/Conv2D.h"
-#include "ir/operation/DepthwiseConv2D.h"
-#include "ir/operation/Pool2D.h"
-#include "ir/operation/Reshape.h"
-#include "ir/operation/StridedSlice.h"
-#include "compiler/LoweredGraph.h"
-#include "ir/Index.h"
-#include "ir/Layout.h"
-#include "ir/OperationVisitor.h"
-#include "backend/IDynamicTensorManager.h"
-#include "backend/ITensor.h"
-#include "backend/ITensorRegistry.h"
-
-namespace onert
-{
-namespace shape_inference
-{
-
-using Shapes = std::vector<ir::Shape>;
-
-// Define shape calculation for operations. List them in alphabetic order.
-
-ir::Shape inferArgMaxShape(const ir::Shape &input_shape, int axis, int rank);
-
-ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape,
- const ir::operation::BatchMatMul::Param &param);
-
-ir::Shape inferBCQFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &cluster_shape,
- const int32_t *cluster_buf);
-
-ir::Shape inferBCQGatherShape(const ir::Shape &indices_shape, const ir::Shape &cluster_shape,
- const int32_t *cluster_buf, int rank,
- const ir::operation::BCQGather::Param &param);
-
-ir::Shape inferBroadcastToShape(const ir::Shape shp_shape, const int32_t *shp_buf);
-
-ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param &param);
-
-ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
- const ir::operation::Conv2D::Param &param,
- ir::Layout layout = ir::Layout::NHWC);
-
-ir::Shape inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
- const ir::operation::DepthwiseConv2D::Param &param,
- ir::Layout layout = ir::Layout::NHWC);
-
-ir::Shape inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape);
-
-ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis);
-
-ir::Shape inferFillShape(const ir::Shape &in_shape, const int32_t *in_buf);
-
-ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape);
-
-ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis,
- int rank);
-
-ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis);
-
-ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num);
-
-ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads);
-
-ir::Shape inferPoolShape(const ir::Shape &in_shape, const ir::operation::Pool2D::Param &param,
- ir::Layout layout = ir::Layout::NHWC);
-
-template <typename T> ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val);
-
-ir::Shape inferReshapeShape(const int32_t *shape_buf, const int32_t shape_num_elements,
- const size_t total_num_elements);
-
-ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector<int> &axes,
- bool keep_dims);
-
-template <float *> ir::Shape inferRangeShape(float *start_val, float *limit_val, float *delta_val);
-
-template <typename T> ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val);
-
-ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height,
- const int32_t output_width);
-
-ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
- const ir::Shape &input_false_shape);
-
-ir::Shape inferSliceShape(const ir::Shape &input_shape, const int32_t *begins_buf,
- const int32_t *sizes_buf);
-
-ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape,
- const ir::Shape &padding_shape, const int32_t *block_shape_buf,
- const int32_t *padding_buf);
-
-ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits);
-
-ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param &param);
-
-struct StridedSliceParams
-{
- int8_t start_indices_count;
- int16_t start_indices[4];
- int8_t stop_indices_count;
- int16_t stop_indices[4];
- int8_t strides_count;
- int16_t strides[4];
-
- int16_t begin_mask;
- int16_t ellipsis_mask;
- int16_t end_mask;
- int16_t new_axis_mask;
- int16_t shrink_axis_mask;
-};
-
-template <typename T>
-StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides,
- const uint32_t begin_mask, const uint32_t end_mask,
- const uint32_t shrink_axis_mask, const uint8_t rank);
-
-ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params,
- uint32_t rank);
-
-ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier_buf,
- const int32_t multiplier_size);
-
-ir::Shape inferTransposeShape(const ir::Shape &in_shape, const int32_t *perm_buf,
- const int32_t rank);
-
-ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank);
-
-} // namespace shape_inference
-} // namespace onert
-
-#endif // __ONERT_GRAPH_SHAPE_INFERENCE_H__
diff --git a/runtime/onert/core/include/util/Utils.h b/runtime/onert/core/include/util/Utils.h
deleted file mode 100644
index 8a4eea32b..000000000
--- a/runtime/onert/core/include/util/Utils.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Utils.h
- * @brief This file contains utility macro
- */
-
-#ifndef __ONERT_UTIL_UTILS_H__
-#define __ONERT_UTIL_UTILS_H__
-
-#include "ir/Coordinates.h"
-#include "ir/Shape.h"
-
-#define UNUSED_RELEASE(a) (void)(a)
-
-template <size_t from, size_t to, typename Enable = void> struct ForEachDimension
-{
- template <typename L, typename... Args>
- static void unroll(const onert::ir::Shape &shape, onert::ir::Coordinates &coords,
- L &&lambda_function, Args &&... args)
- {
- static_assert(from < to, "from must not be less than to");
- assert(static_cast<int>(to) <= shape.rank());
- const auto &d = shape.dim(from);
-
- for (auto v = 0; v < d; v++)
- {
- coords.set(from, v);
- ForEachDimension<from + 1, to>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- }
- }
-};
-
-template <size_t from, size_t to>
-struct ForEachDimension<from, to, typename std::enable_if<from == to>::type>
-{
- template <typename L, typename... Args>
- static void unroll(const onert::ir::Shape &shape, onert::ir::Coordinates &coords,
- L &&lambda_function, Args &&... args)
- {
- UNUSED_RELEASE(shape);
- assert(static_cast<int>(to) <= shape.rank());
- lambda_function(coords, std::forward<Args>(args)...);
- }
-};
-
-template <typename L, typename... Args>
-inline void ShapeLoop(const onert::ir::Shape &shape, L &&lambda_function, Args &&... args)
-{
- assert(shape.rank() > 0);
- for (auto i = 0; i < shape.rank(); ++i)
- {
- assert(shape.dim(i) > 0);
- }
-
- onert::ir::Coordinates coords;
- switch (shape.rank())
- {
- case 0:
- coords.set(0, 0);
- ForEachDimension<0, 0>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 1:
- ForEachDimension<0, 1>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 2:
- ForEachDimension<0, 2>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 3:
- ForEachDimension<0, 3>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 4:
- ForEachDimension<0, 4>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 5:
- ForEachDimension<0, 5>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- case 6:
- ForEachDimension<0, 6>::unroll(shape, coords, std::forward<L>(lambda_function),
- std::forward<Args>(args)...);
- break;
- default:
- assert(false && "ShapeLoop, 1 <= Shape'rank <= 6");
- break;
- }
-}
-#endif // __ONERT_UTIL_UTILS_H__
diff --git a/runtime/onert/core/include/util/logging.h b/runtime/onert/core/include/util/logging.h
deleted file mode 100644
index 76cfb8d60..000000000
--- a/runtime/onert/core/include/util/logging.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_LOGGING_H__
-#define __ONERT_UTIL_LOGGING_H__
-
-#include <iostream>
-
-#include "util/ConfigSource.h"
-
-namespace onert
-{
-namespace util
-{
-namespace logging
-{
-
-class Context
-{
-public:
- Context() noexcept : _enabled{false}
- {
- const auto env = util::getConfigBool(util::config::ONERT_LOG_ENABLE);
-
- if (env)
- {
- _enabled = true;
- }
- }
-
- static Context &get() noexcept;
-
-public:
- bool enabled(void) const { return _enabled; }
-
-private:
- bool _enabled;
-};
-
-static Context &ctx = Context::get();
-
-} // namespace logging
-} // namespace util
-} // namespace onert
-
-#define VERBOSE(name) \
- if (::onert::util::logging::ctx.enabled()) \
- std::cout << "[" << #name << "] "
-
-#define VERBOSE_F() \
- if (::onert::util::logging::ctx.enabled()) \
- std::cout << "[" << __func__ << "] "
-
-#endif // __ONERT_UTIL_LOGGING_H__
diff --git a/runtime/onert/core/src/backend/BackendContext.cc b/runtime/onert/core/src/backend/BackendContext.cc
deleted file mode 100644
index bafa36d28..000000000
--- a/runtime/onert/core/src/backend/BackendContext.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/BackendContext.h"
-
-#include "ir/Operation.h"
-#include "backend/IConstantInitializer.h"
-
-namespace onert
-{
-namespace backend
-{
-
-void BackendContext::initialize(const std::vector<OperationInfo> &operation_list,
- const std::vector<ir::OperandIndex> &operand_list)
-{
- _operation_list = operation_list;
- _operand_list = operand_list;
-}
-
-void BackendContext::initConsts()
-{
- for (auto &op : _operation_list)
- {
- constant_initializer->setLayout(op.layout);
- _graph->operations().at(op.index).accept(*constant_initializer);
- }
-
- for (auto ind : _operand_list)
- {
- const auto &obj = _graph->operands().at(ind);
- if (obj.isConstant() && !constant_initializer->exist(ind))
- {
- constant_initializer->registerDefaultInitializer(ind, obj);
- }
- }
-
- constant_initializer->run();
-}
-
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/IConstantInitializer.cc b/runtime/onert/core/src/backend/IConstantInitializer.cc
deleted file mode 100644
index 934a42753..000000000
--- a/runtime/onert/core/src/backend/IConstantInitializer.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/IConstantInitializer.h"
-
-#include <Half.h>
-
-using float16 = Half;
-
-namespace onert
-{
-namespace backend
-{
-
-void IConstantInitializer::registerCopyInitializer(const ir::OperandIndex &index,
- const ir::Operand &obj)
-{
- // For only CONSTANTS
- // TODO Add to check if tensor has been allocated
- if (!obj.isConstant())
- return;
-
- const auto type = obj.typeInfo().type();
- using ir::DataType;
-
- switch (type)
- {
- case DataType::FLOAT32:
- _init_map[index] = copyInit<float>;
- break;
- case DataType::INT32:
- _init_map[index] = copyInit<int32_t>;
- break;
- case DataType::UINT32:
- _init_map[index] = copyInit<uint32_t>;
- break;
- case DataType::BOOL8:
- case DataType::QUANT_UINT8_ASYMM:
- _init_map[index] = copyInit<uint8_t>;
- break;
- case DataType::QUANT_INT8_SYMM:
- _init_map[index] = copyInit<int8_t>;
- break;
- case DataType::FLOAT16:
- _init_map[index] = copyInit<float16>;
- break;
- case DataType::INT64:
- _init_map[index] = copyInit<int64_t>;
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-void IConstantInitializer::registerPermuteInitializer(const ir::OperandIndex &index,
- const ir::Operand &obj)
-{
- // For only CONSTANTS
- // TODO Add to check if tensor has been allocated
- if (!obj.isConstant())
- return;
-
- const auto type = obj.typeInfo().type();
- using ir::DataType;
- using namespace std::placeholders;
-
- switch (type)
- {
- case DataType::FLOAT32:
- _init_map[index] = std::bind(permuteInit<float>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::INT32:
- _init_map[index] = std::bind(permuteInit<int32_t>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::UINT32:
- _init_map[index] = std::bind(permuteInit<uint32_t>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::BOOL8:
- case DataType::QUANT_UINT8_ASYMM:
- _init_map[index] = std::bind(permuteInit<uint8_t>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::QUANT_INT8_SYMM:
- _init_map[index] = std::bind(permuteInit<int8_t>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::FLOAT16:
- _init_map[index] = std::bind(permuteInit<float16>, _1, _2, _current_op_seq_layout);
- break;
- case DataType::INT64:
- _init_map[index] = std::bind(permuteInit<int64_t>, _1, _2, _current_op_seq_layout);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/IPortableTensor.cc b/runtime/onert/core/src/backend/IPortableTensor.cc
deleted file mode 100644
index cec34e780..000000000
--- a/runtime/onert/core/src/backend/IPortableTensor.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/IPortableTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-
-// `dynamic_cast` not working across library boundaries on NDK
-// With this as a key function, `dynamic_cast` works across dl
-IPortableTensor::~IPortableTensor() {}
-
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/ITensor.cc b/runtime/onert/core/src/backend/ITensor.cc
deleted file mode 100644
index 7127ed93d..000000000
--- a/runtime/onert/core/src/backend/ITensor.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/ITensor.h"
-
-namespace onert
-{
-namespace backend
-{
-
-ir::Shape ITensor::getShape() const
-{
- onert::ir::Shape shape(num_dimensions());
- for (uint32_t d = 0; d < num_dimensions(); d++)
- shape.dim(d) = dimension(d);
-
- return shape;
-}
-
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/Backend.h b/runtime/onert/core/src/backend/controlflow/Backend.h
deleted file mode 100644
index 670f7750f..000000000
--- a/runtime/onert/core/src/backend/controlflow/Backend.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_BACKEND_H__
-#define __ONERT_BACKEND_CONTROLFLOW_BACKEND_H__
-
-#include "Config.h"
-#include "ConstantInitializer.h"
-#include "KernelGenerator.h"
-#include "TensorBuilder.h"
-#include "Tensor.h"
-
-#include <backend/Backend.h>
-
-#include <memory>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class Backend : public ::onert::backend::Backend
-{
-public:
- Backend() : _config{std::make_shared<Config>()} {}
-
- std::shared_ptr<IConfig> config() const override { return _config; }
-
- std::unique_ptr<BackendContext> newContext(const ir::Graph &graph,
- const std::shared_ptr<custom::IKernelBuilder> &,
- bool) const override
- {
- const auto &operands = graph.operands();
- auto context = std::make_unique<BackendContext>(this, &graph);
- // ControlFlow backend may not build tensors for itself because the backend's operation uses
- // tensors of other baceknd instead
- // But the backend builds tensors in case of that the controlflow operation may have constant
- // input or that consecutive controflow operations exist. We have to make them not to be built
- // later
- // 1. Constant input
- // These tensors cannot be dynamic tensor, so let's do it as follows:
- // - always skip copying
- // - if it is operation's input in child subgraph: register "use" as constant input of the
- // operations in child subgraph
- // - if it is child subgraph's output: register "use" as constant input of the operations
- // using it
- // 2. Consecutive controflow operation's intermediate tensor
- // These tensors can be dynamic tensor and this is complicated to support without copying. But
- // there is no such case until now, let's support it later
- // TODO Remove TensorBuilder and ConstantInitializer
- // TODO Support Consecutive controflow operation's intermediate tensor
- auto tr = std::make_shared<TensorRegistry>();
- auto tb = std::make_shared<TensorBuilder>(tr);
- context->tensor_registry = tr;
- context->tensor_builder = tb;
- context->constant_initializer = std::make_shared<ConstantInitializer>(operands, tr);
- context->kernel_gen = std::make_shared<KernelGenerator>(graph, tb->dynamicTensorManager(), tr);
- context->tensor_register = nullptr;
- context->optimizer = nullptr;
- return context;
- }
-
-private:
- std::shared_ptr<IConfig> _config;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_BACKEND_H__
diff --git a/runtime/onert/core/src/backend/controlflow/BackendContext.h b/runtime/onert/core/src/backend/controlflow/BackendContext.h
deleted file mode 100644
index d179bfde4..000000000
--- a/runtime/onert/core/src/backend/controlflow/BackendContext.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
-#define __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
-
-#include <backend/BackendContext.h>
-#include "ExternalContext.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class BackendContext : public onert::backend::BackendContext
-{
-public:
- BackendContext(const Backend *backend, const ir::Graph *graph,
- std::shared_ptr<ITensorRegistry> tensor_registry = nullptr,
- std::shared_ptr<ITensorBuilder> tensor_builder = nullptr,
- std::shared_ptr<IConstantInitializer> constant_initializer = nullptr,
- std::shared_ptr<IKernelGenerator> kernel_gen = nullptr,
- std::shared_ptr<ITensorRegister> tensor_register = nullptr,
- std::shared_ptr<IOptimizer> optimizer = nullptr)
- : onert::backend::BackendContext(backend, graph, tensor_registry, tensor_builder,
- constant_initializer, kernel_gen, tensor_register,
- optimizer),
- _external_context(std::make_shared<ExternalContext>())
- {
- }
-
- std::shared_ptr<ExternalContext> external_context() { return _external_context; }
-
-private:
- // NOTE ruy context has a thread pool, and when multiple ruy contexts are created,
- // the thread pool is also created in duplicate
- // TODO Create one ruy context for session
- std::shared_ptr<ExternalContext> _external_context;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__
diff --git a/runtime/onert/core/src/backend/controlflow/Config.cc b/runtime/onert/core/src/backend/controlflow/Config.cc
deleted file mode 100644
index 5ec01fe11..000000000
--- a/runtime/onert/core/src/backend/controlflow/Config.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Config.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-std::string Config::ID = "controlflow";
-
-bool Config::initialize() { return true; }
-
-ir::Layout Config::supportLayout(const ir::Operation &, ir::Layout frontend_layout)
-{
- return frontend_layout;
-}
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/Config.h b/runtime/onert/core/src/backend/controlflow/Config.h
deleted file mode 100644
index 6645ed59d..000000000
--- a/runtime/onert/core/src/backend/controlflow/Config.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_CONFIG_H__
-#define __ONERT_BACKEND_CONTROLFLOW_CONFIG_H__
-
-#include <backend/IConfig.h>
-#include <memory>
-#include <util/ITimer.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class Config : public IConfig
-{
-public:
- static std::string ID;
- std::string id() override { return ID; }
- bool initialize() override;
- ir::Layout supportLayout(const ir::Operation &node, ir::Layout frontend_layout) override;
- bool supportPermutation() override { return false; }
- bool supportDynamicTensor() override
- {
- // TODO Make this backend to support dynamic tensor or not to build non-constant tensor
- return true;
- }
- bool supportFP16() override { return false; }
-
- std::unique_ptr<util::ITimer> timer() override { return std::make_unique<util::CPUTimer>(); }
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_CONFIG_H__
diff --git a/runtime/onert/core/src/backend/controlflow/ConstantInitializer.h b/runtime/onert/core/src/backend/controlflow/ConstantInitializer.h
deleted file mode 100644
index e21a8f357..000000000
--- a/runtime/onert/core/src/backend/controlflow/ConstantInitializer.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_CONTROLFLOW_CONSTANT_INITIALIZER_H__
-#define __ONERT_COMPILER_CONTROLFLOW_CONSTANT_INITIALIZER_H__
-
-#include "TensorRegistry.h"
-
-#include <backend/IConstantInitializer.h>
-#include <ir/Operands.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class ConstantInitializer : public IConstantInitializer
-{
-public:
- ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<ITensorRegistry> &tensor_reg)
- : IConstantInitializer{operands}, _tensor_reg{tensor_reg}
- {
- }
-
-private:
- std::shared_ptr<ITensorRegistry> tensor_registry() const override { return _tensor_reg; }
-
-private:
- std::shared_ptr<ITensorRegistry> _tensor_reg;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_COMPILER_CONTROLFLOW_CONSTANT_INITIALIZER_H__
diff --git a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc
deleted file mode 100644
index 77f02969d..000000000
--- a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "DynamicTensorManager.h"
-
-#include "util/logging.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-DynamicTensorManager::DynamicTensorManager(const std::shared_ptr<TensorRegistry> &tensors)
- : _dynamic_mem_mgr{new cpu_common::DynamicMemoryManager()}, _tensors{tensors}
-{
- // DO NOTHING
-}
-
-void DynamicTensorManager::buildTensor(const ir::OperandIndex &ind,
- const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout)
-{
- auto tensor =
- std::make_unique<cpu_common::Tensor>(tensor_info, backend_layout, _dynamic_mem_mgr.get());
- _tensors->setNativeOwnTensor(ind, std::move(tensor));
-}
-
-void DynamicTensorManager::planDealloc(ir::OperationIndex op_ind, backend::ITensor *tensor)
-{
- _dealloc_tensor_map[op_ind].emplace(tensor);
-}
-
-void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind)
-{
- auto find = _dealloc_tensor_map.find(op_ind);
- if (find == _dealloc_tensor_map.end())
- return;
-
- auto &input_set = find->second;
- for (auto *tensor : input_set)
- {
- if (!tensor->is_dynamic())
- continue;
-
- _dynamic_mem_mgr->deallocate(tensor);
-
- auto *cpu_tensor = nnfw::misc::polymorphic_downcast<cpu_common::Tensor *>(tensor);
- cpu_tensor->resetBuffer();
-
- VERBOSE(DynamicTensorManager) << "Deallocating a tensor " << (void *)tensor
- << " (input of op_ind: " << op_ind.value() << ")" << std::endl;
- }
-}
-
-const ITensor *DynamicTensorManager::getRawITensor(ir::OperandIndex ind)
-{
- auto ptr = _tensors->getITensor(ind);
- assert(ptr);
- return ptr;
-}
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h
deleted file mode 100644
index fb822a917..000000000
--- a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_DYNAMICTENSOR_MANAGER_H__
-#define __ONERT_BACKEND_CONTROLFLOW_DYNAMICTENSOR_MANAGER_H__
-
-#include "TensorRegistry.h"
-#include "Tensor.h"
-
-#include <backend/IDynamicTensorManager.h>
-#include <backend/cpu_common/MemoryManager.h>
-#include <ir/OperandInfo.h>
-#include <ir/Operation.h>
-#include <ir/Index.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-/**
- * @brief Class to manage dynamic tensor and its memory
- */
-class DynamicTensorManager : public backend::IDynamicTensorManager
-{
-public:
- DynamicTensorManager(const std::shared_ptr<TensorRegistry> &tensors);
-
- virtual ~DynamicTensorManager() = default;
-
- void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout);
-
- void planDealloc(ir::OperationIndex op_ind, backend::ITensor *tensor) override;
- void deallocInput(ir::OperationIndex op_ind) override;
-
- std::shared_ptr<cpu_common::DynamicMemoryManager> dynamic_mem_mgr() { return _dynamic_mem_mgr; }
-
-private:
- const ITensor *getRawITensor(ir::OperandIndex ind);
-
-private:
- /**
- * @brief Memory manager for dynamic tensor.
- * @todo DynamicMemoryManager is not optimized. Optimized one is needed
- */
- std::shared_ptr<cpu_common::DynamicMemoryManager> _dynamic_mem_mgr;
- const std::shared_ptr<TensorRegistry> _tensors;
-
- // contains list of dynamic tensor, which can be deallocated after running operation
- // note: this map could contain static tensor too. Careful use is required.
- std::unordered_map<ir::OperationIndex, std::unordered_set<backend::ITensor *>>
- _dealloc_tensor_map;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_DYNAMICTENSOR_MANAGER_H__
diff --git a/runtime/onert/core/src/backend/controlflow/ExternalContext.h b/runtime/onert/core/src/backend/controlflow/ExternalContext.h
deleted file mode 100644
index 58bccb6c6..000000000
--- a/runtime/onert/core/src/backend/controlflow/ExternalContext.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_EXTERNAL_CONTEXT_H__
-#define __ONERT_BACKEND_CONTROLFLOW_EXTERNAL_CONTEXT_H__
-
-#include <backend/IExternalContext.h>
-#include <util/ConfigSource.h>
-#include <ruy/context.h>
-
-namespace
-{
-const int kDefaultNumThreadpoolThreads = 1;
-}
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-// TODO Unify this with cpu::ExternalContext
-class ExternalContext : public IExternalContext
-{
-public:
- ExternalContext() : _ruy_context(nullptr)
- {
- // setMaxNumThreads(onert::util::getConfigInt(onert::util::config::RUY_THREADS));
- }
-
- void setMaxNumThreads(int max_num_threads)
- {
- const int target_num_threads =
- max_num_threads > -1 ? max_num_threads : kDefaultNumThreadpoolThreads;
- _ruy_context->set_max_num_threads(target_num_threads);
- }
-
- ruy::Context *ruy_context() const { return _ruy_context.get(); }
-
-private:
- const std::unique_ptr<ruy::Context> _ruy_context;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_EXTERNAL_CONTEXT_H__
diff --git a/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc b/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc
deleted file mode 100644
index d76ca53e3..000000000
--- a/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "KernelGenerator.h"
-
-#include <backend/BackendContext.h>
-#include <util/Utils.h>
-#include "kernel/IfLayer.h"
-#include "kernel/WhileLayer.h"
-#include "kernel/PermuteLayer.h"
-#include "exec/ExecutorBase.h"
-#include "exec/FunctionSequence.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-KernelGenerator::KernelGenerator(const ir::Graph &graph, IDynamicTensorManager *dyn_tensor_manager,
- const std::shared_ptr<TensorRegistry> &tensor_reg)
- : _graph{graph}, _dyn_tensor_manager{dyn_tensor_manager}, _tensor_reg{tensor_reg},
- _tensor_registries{}, _executor_map{nullptr}
-{
- UNUSED_RELEASE(_graph);
- UNUSED_RELEASE(_tensor_registries);
- UNUSED_RELEASE(_executor_map);
-}
-
-void KernelGenerator::visit(const ir::OpSequence &op_seq)
-{
- assert(!_return_fn_seq);
- assert(_dyn_tensor_manager);
- assert(_tensor_reg);
-
- auto dyn_shape_inferer =
- std::make_unique<exec::DynamicShapeInferer>(_graph.operands(), _tensor_reg);
-
- _return_fn_seq = std::make_unique<exec::FunctionSequence>();
-
- // Prepare to handle dynamic tensors later
- auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
- {
- dyn_ctx->op_seq = &op_seq;
- dyn_ctx->operations = &_graph.operations();
- dyn_ctx->dynamic_shape_inferer = std::move(dyn_shape_inferer);
- dyn_ctx->dynamic_tensor_manager = _dyn_tensor_manager;
-
- _return_fn_seq->dynamic_tensor_ctx(dyn_ctx);
- }
-
- for (const auto &op_idx : op_seq.operations())
- {
- const auto &node = _graph.operations().at(op_idx);
- node.accept(*this);
- _return_fn_seq->append(releaseFunction());
- }
-}
-
-void KernelGenerator::visit(const ir::operation::If &node)
-{
- const auto then_subg_index = node.param().then_subg_index;
- const auto else_subg_index = node.param().else_subg_index;
-
- std::vector<backend::ITensor *> input_tensors;
- for (const auto input_index : node.getInputs())
- {
- auto input_tensor = getTensor(input_index);
-
- input_tensors.emplace_back(input_tensor);
- }
-
- std::vector<backend::ITensor *> output_tensors;
- for (const auto output_index : node.getOutputs())
- {
- auto output_tensor = getTensor(output_index);
- output_tensors.emplace_back(output_tensor);
- }
-
- // IfLayer just set ExecutorMap instead of then and else executor to avoid complexity of
- // creating executor recusively
- const auto cond_tensor = input_tensors.front();
- input_tensors.erase(input_tensors.begin());
- auto fn = std::make_unique<::onert::backend::controlflow::kernel::IfLayer>(
- cond_tensor, input_tensors, output_tensors, node.getOutputs(), _graph, then_subg_index,
- else_subg_index, _executor_map);
-
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::Permute &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(0)};
-
- // Add PermuteLayer
- std::vector<ITensor *> output_tensors{getTensor(output_index)};
- std::vector<ITensor *> input_tensors{getTensor(input_index)};
-
- auto fn = std::make_unique<kernel::PermuteLayer>(input_tensors, output_tensors);
- _return_fn = std::move(fn);
-}
-
-void KernelGenerator::visit(const ir::operation::While &node)
-{
- const auto cond_subg_index = node.param().cond_subg_index;
- const auto body_subg_index = node.param().body_subg_index;
-
- // This op does not support input as a constant, because controlflow backend does not have
- // TensorBuilder
- std::vector<backend::ITensor *> input_tensors;
- for (const auto input_index : node.getInputs())
- {
- auto input_tensor = getTensor(input_index);
-
- input_tensors.emplace_back(input_tensor);
- }
-
- std::vector<backend::ITensor *> output_tensors;
- for (const auto output_index : node.getOutputs())
- {
- auto output_tensor = getTensor(output_index);
- output_tensors.emplace_back(output_tensor);
- }
-
- // WhileLayer just set ExecutorMap instead of cond and body executor to avoid complexity of
- // creating executor recusively
- auto fn = std::make_unique<::onert::backend::controlflow::kernel::WhileLayer>(
- input_tensors, output_tensors, node.getOutputs(), _graph, cond_subg_index, body_subg_index,
- _executor_map);
-
- _return_fn = std::move(fn);
-}
-
-backend::ITensor *KernelGenerator::getTensor(const ir::OperandIndex &index)
-{
- backend::ITensor *ret = _tensor_registries.getITensor(index);
- assert(ret != nullptr);
- return ret;
-}
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/KernelGenerator.h b/runtime/onert/core/src/backend/controlflow/KernelGenerator.h
deleted file mode 100644
index ce248913f..000000000
--- a/runtime/onert/core/src/backend/controlflow/KernelGenerator.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_KERNEL_GENERATOR_H__
-#define __ONERT_BACKEND_CONTROLFLOW_KERNEL_GENERATOR_H__
-
-#include <backend/IKernelGenerator.h>
-#include <backend/ITensorBuilder.h>
-#include <exec/IExecutor.h>
-#include <ir/Graph.h>
-#include "TensorBuilder.h"
-#include "compiler/TensorRegistries.h"
-#include "TensorRegistry.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class KernelGenerator : public IKernelGenerator
-{
-public:
- KernelGenerator(const ir::Graph &graph, IDynamicTensorManager *dyn_tensor_manager,
- const std::shared_ptr<TensorRegistry> &tensor_reg);
-
- void setTensorRegistries(const compiler::TensorRegistries &tensor_registries)
- {
- _tensor_registries = tensor_registries;
- }
- void setExecutorMap(const std::shared_ptr<exec::ExecutorMap> &executor_map)
- {
- // FIXME Using shared_ptr's raw pointer!
- _executor_map = executor_map.get();
- }
-
- using IKernelGenerator::visit;
-
- void visit(const ir::OpSequence &) override;
- void visit(const ir::operation::If &) override;
- void visit(const ir::operation::Permute &) override;
- void visit(const ir::operation::While &) override;
-
-private:
- backend::ITensor *getTensor(const ir::OperandIndex &index);
-
-private:
- const ir::Graph &_graph;
- IDynamicTensorManager *_dyn_tensor_manager;
- std::shared_ptr<TensorRegistry> _tensor_reg;
- compiler::TensorRegistries _tensor_registries;
- exec::ExecutorMap *_executor_map;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_KERNEL_GENERATOR_H__
diff --git a/runtime/onert/core/src/backend/controlflow/Tensor.h b/runtime/onert/core/src/backend/controlflow/Tensor.h
deleted file mode 100644
index ba5bafd75..000000000
--- a/runtime/onert/core/src/backend/controlflow/Tensor.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_TENSOR_H__
-#define __ONERT_BACKEND_CONTROLFLOW_TENSOR_H__
-
-#include <backend/cpu_common/Tensor.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-using Tensor = cpu_common::Tensor;
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_TENSOR_H__
diff --git a/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc b/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc
deleted file mode 100644
index 7d0ff201f..000000000
--- a/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TensorBuilder.h"
-
-#include <util/logging.h>
-
-#include <cassert>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-TensorBuilder::TensorBuilder(const std::shared_ptr<TensorRegistry> &tensor_reg)
- : _tensor_reg{tensor_reg}, _dynamic_tensor_mgr{new DynamicTensorManager(_tensor_reg)},
- _static_tensor_mgr{new cpu_common::StaticTensorManager(
- _tensor_reg->base_reg(), _dynamic_tensor_mgr->dynamic_mem_mgr().get())}
-{
- /* empty */
-}
-
-void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout)
-{
- _tensor_info_map.emplace(ind, info);
-
- _tensor_layout_map.insert({ind, backend_layout});
-
- if (info.isDynamic())
- {
- _dynamic_tensor_mgr->buildTensor(ind, info, _tensor_layout_map[ind]);
- }
- else
- {
- _static_tensor_mgr->buildTensor(ind, info, _tensor_layout_map[ind], info.isConstant());
- }
-}
-
-void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
-{
- // TODO Enhance the way of checking user tensors
- if (_tensor_info_map.find(ind) == _tensor_info_map.end()) // Do not proceed for user tensors
- return;
-
- const auto tensor_info = _tensor_info_map.at(ind);
-
- if (!nativeOwnTensorAt(ind)->is_dynamic())
- {
- const auto size = tensor_info.total_size();
- _static_tensor_mgr->claimPlan(ind, size);
- }
-}
-
-void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind)
-{
- // TODO Enhance the way of checking user tensors
- if (_tensor_info_map.find(ind) == _tensor_info_map.end()) // Do not proceed for user tensors
- return;
-
- if (!nativeOwnTensorAt(ind)->is_dynamic())
- {
- _static_tensor_mgr->releasePlan(ind);
- }
-}
-
-bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
-{
- // User tensors are not registered in _tensor_info_map but objects for them are exist
- // in the tensor registry.
- // TODO Enhance the way of checking user tensors
- if (_tensor_reg->getITensor(ind))
- return true;
- return _tensor_info_map.find(ind) != _tensor_info_map.end();
-}
-
-void TensorBuilder::prepare(void)
-{
- _static_tensor_mgr->allocateConsts();
- _static_tensor_mgr->allocateNonconsts();
-}
-
-void TensorBuilder::allocate()
-{
- // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
- // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
-}
-
-IDynamicTensorManager *TensorBuilder::dynamicTensorManager(void)
-{
- return _dynamic_tensor_mgr.get();
-}
-
-cpu_common::Tensor *TensorBuilder::nativeOwnTensorAt(const ir::OperandIndex &ind)
-{
- return _tensor_reg->getNativeOwnTensor(ind);
-}
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/TensorBuilder.h b/runtime/onert/core/src/backend/controlflow/TensorBuilder.h
deleted file mode 100644
index 695994761..000000000
--- a/runtime/onert/core/src/backend/controlflow/TensorBuilder.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__
-#define __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__
-
-#include <backend/cpu_common/StaticTensorManager.h>
-#include <backend/cpu_common/TensorRegistry.h>
-#include <backend/cpu_common/Tensor.h>
-
-#include <backend/ITensorBuilder.h>
-#include <ir/OperandIndexMap.h>
-
-#include <unordered_map>
-
-#include "DynamicTensorManager.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-class TensorBuilder : public ITensorBuilder
-{
-public:
- TensorBuilder(const std::shared_ptr<TensorRegistry> &tensor_reg);
-
- /**
- * @brief Register tensor information to allocate on CPU backend
- * @param[in] ind Operand index
- * @param[in] info Operand information
- * @param[in] layout Operand data layout
- */
- void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout) override;
-
- void notifyFirstUse(const ir::OperandIndex &) override;
- void notifyLastUse(const ir::OperandIndex &) override;
-
- bool isRegistered(const ir::OperandIndex &) const override;
-
- void prepare(void) override;
- void allocate() override;
- void postFunctionPrepare() override { /* DO NOTHING */}
-
- IDynamicTensorManager *dynamicTensorManager(void) override;
-
- /**
- * @brief Get tensor with a specific OperandIndex.
- * @param ind OperandIndex for the tensor. There must exist a tensor with this ind.
- * If not, program will crash with assert or exception.
- * @return operand::Tensor *
- */
- cpu_common::Tensor *nativeOwnTensorAt(const ir::OperandIndex &ind);
-
-private:
- const std::shared_ptr<TensorRegistry> _tensor_reg;
- std::unique_ptr<DynamicTensorManager> _dynamic_tensor_mgr;
- std::unique_ptr<cpu_common::StaticTensorManager> _static_tensor_mgr;
- ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
- ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__
diff --git a/runtime/onert/core/src/backend/controlflow/TensorRegistry.h b/runtime/onert/core/src/backend/controlflow/TensorRegistry.h
deleted file mode 100644
index 94f71bb9c..000000000
--- a/runtime/onert/core/src/backend/controlflow/TensorRegistry.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_TENSOR_REGISTRY_H__
-#define __ONERT_BACKEND_CONTROLFLOW_TENSOR_REGISTRY_H__
-
-#include "backend/cpu_common/TensorRegistry.h"
-#include "backend/ITensorRegistry.h"
-#include "Tensor.h"
-#include "UserTensor.h"
-#include <assert.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-/**
- * @brief Tensor registry class for controlflow backend
- *
- * This class contains three types of tensors. Two native tensors(tensors that are managed by this
- * backend) and the other is migrant tensor.
- *
- * - NativeUserTensor - @c UserTensor managed by this backend, buffer is user-given
- * - NativeOwnTensor - @c cpu_common::Tensor managed by this backend ( in @c _base_reg )
- * - MigrantTensor - @c IPortableTensor managed by other backends ( in @c _base_reg )
- *
- * @note @c _base_reg is used in implementation to reuse @c cpu_common::StaticTensorManager
- *
- */
-class TensorRegistry : public ITensorRegistry
-{
-public:
- TensorRegistry() : _base_reg{new cpu_common::TensorRegistry} {}
-
- ITensor *getITensor(const ir::OperandIndex &ind) override
- {
- auto base_tensor = _base_reg->getITensor(ind);
- if (base_tensor)
- return base_tensor;
- return getNativeUserTensor(ind);
- }
-
- ITensor *getNativeITensor(const ir::OperandIndex &ind) override
- {
- auto base_tensor = _base_reg->getNativeITensor(ind);
- if (base_tensor)
- return base_tensor;
- return getNativeUserTensor(ind);
- }
-
- IPortableTensor *getPortableTensor(const ir::OperandIndex &ind)
- {
- auto base_tensor = _base_reg->getPortableTensor(ind);
- if (base_tensor)
- return base_tensor;
- return getNativeUserTensor(ind);
- }
-
- IPortableTensor *getNativeTensor(const ir::OperandIndex &ind)
- {
- auto base_tensor = _base_reg->getNativeTensor(ind);
- if (base_tensor)
- return base_tensor;
- return getNativeUserTensor(ind);
- }
-
- Tensor *getNativeOwnTensor(const ir::OperandIndex &ind)
- {
- return _base_reg->getNativeTensor(ind);
- }
-
- UserTensor *getNativeUserTensor(const ir::OperandIndex &ind)
- {
- auto tensor = _native_user_tensors.find(ind);
- if (tensor != _native_user_tensors.end())
- return tensor->second.get();
- return nullptr;
- }
-
- bool setMigrantTensor(const ir::OperandIndex &ind, IPortableTensor *tensor) override
- {
- assert(tensor);
- assert(!getITensor(ind)); // For the ind, tensor is not registered yet
- _base_reg->setMigrantTensor(ind, tensor);
- return true;
- }
-
- void setNativeOwnTensor(ir::OperandIndex ind, std::unique_ptr<Tensor> &&tensor)
- {
- assert(tensor);
- assert(!getITensor(ind)); // For the ind, tensor is not registered yet
- _base_reg->setNativeTensor(ind, std::move(tensor));
- }
-
- void setNativeUserTensor(ir::OperandIndex ind, std::unique_ptr<UserTensor> &&tensor)
- {
- assert(tensor);
- assert(!getITensor(ind)); // For the ind, tensor is not registered yet
- _native_user_tensors[ind] = std::move(tensor);
- }
-
- const ir::OperandIndexMap<std::unique_ptr<UserTensor>> &native_user_tensors()
- {
- return _native_user_tensors;
- }
- std::shared_ptr<cpu_common::TensorRegistry> base_reg() { return _base_reg; }
-
-private:
- std::shared_ptr<cpu_common::TensorRegistry> _base_reg;
- ir::OperandIndexMap<std::unique_ptr<UserTensor>> _native_user_tensors;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // ifndef __ONERT_BACKEND_CONTROLFLOW_TENSOR_REGISTRY_H__
diff --git a/runtime/onert/core/src/backend/controlflow/UserTensor.cc b/runtime/onert/core/src/backend/controlflow/UserTensor.cc
deleted file mode 100644
index 5081a90ea..000000000
--- a/runtime/onert/core/src/backend/controlflow/UserTensor.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "UserTensor.h"
-
-#include "util/Exceptions.h"
-#include "ir/DataType.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-size_t UserTensor::calcOffset(const ir::Coordinates &coords) const
-{
- size_t rank = num_dimensions();
- size_t offset = 0;
- for (size_t i = 0; i < rank; ++i)
- {
- offset = offset * dimension(i) + coords[i];
- }
- offset *= sizeOfDataType(data_type());
- return offset;
-}
-
-bool UserTensor::applyShape(const ir::Shape &new_shape)
-{
- // User tensors cannot be reallocated.
- auto new_size = new_shape.num_elements() * ir::sizeOfDataType(data_type());
- if (total_size() < new_size)
- throw InsufficientBufferSizeException{"User given buffer size is too small."};
- setShape(new_shape);
- return true;
-}
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/UserTensor.h b/runtime/onert/core/src/backend/controlflow/UserTensor.h
deleted file mode 100644
index 7aa62a8a9..000000000
--- a/runtime/onert/core/src/backend/controlflow/UserTensor.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_USER_TENSOR_H__
-#define __ONERT_BACKEND_CONTROLFLOW_USER_TENSOR_H__
-
-#include "ir/OperandInfo.h"
-#include "backend/IPortableTensor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-
-/**
- * @brief Tensor object that is for Input and Output tensors from the user.
- *
- * This class is a wrapped buffer that is allocated by the user. So it does not have resposibility
- * on allocation nor deallocation. All the model input/output tensors are wrapped with this class
- * for execution.
- *
- */
-class UserTensor : public IPortableTensor
-{
-public:
- UserTensor(const ir::OperandInfo &info, ir::Layout layout, uint8_t *buffer, size_t size)
- : IPortableTensor{info}, _layout{layout}, _buffer{buffer}, _size{size}, _dynamic{false}
- {
- }
-
- UserTensor(const ir::OperandInfo &info, ir::Layout layout) : UserTensor{info, layout, nullptr, 0}
- {
- }
-
-public:
- void setBuffer(uint8_t *buffer, size_t size)
- {
- _buffer = buffer;
- _size = size;
- }
-
-public:
- uint8_t *buffer() const override { return _buffer; }
- size_t total_size() const override { return _size; }
- size_t dimension(size_t index) const override { return _info.shape().dim(index); }
- size_t num_dimensions() const override { return _info.shape().rank(); }
- size_t calcOffset(const ir::Coordinates &coords) const override;
- ir::Layout layout() const override { return _layout; }
- ir::DataType data_type() const override { return _info.typeInfo().type(); }
- float data_scale() const override { return _info.typeInfo().scale(); }
- int32_t data_offset() const override { return _info.typeInfo().offset(); }
- bool is_dynamic() const override { return _dynamic; }
- void set_dynamic() override { _dynamic = true; }
- ir::Shape getShape() const override { return _info.shape(); }
- void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); }
- bool is_constant() const override { return false; }
- bool applyShape(const ir::Shape &) override;
-
-private:
- ir::Layout _layout;
- uint8_t *_buffer;
- size_t _size;
- bool _dynamic;
-};
-
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_USER_TENSOR_H__
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.cc b/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.cc
deleted file mode 100644
index c0329acd8..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "IfLayer.h"
-
-#include <backend/ITensor.h>
-#include "exec/ExecutorBase.h"
-#include <misc/polymorphic_downcast.h>
-#include "PermuteLayer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-IfLayer::IfLayer(backend::ITensor *cond_tensor, const std::vector<backend::ITensor *> input_tensors,
- const std::vector<backend::ITensor *> output_tensors,
- const ir::OperandIndexSequence &output_indices, const ir::Graph &graph,
- const ir::SubgraphIndex &then_subg_index, const ir::SubgraphIndex &else_subg_index,
- exec::ExecutorMap *executor_map)
- : _cond_tensor{cond_tensor}, _input_tensors{input_tensors}, _output_tensors{output_tensors},
- _output_indices{output_indices}, _graph{graph}, _then_subg_index{then_subg_index},
- _else_subg_index{else_subg_index}, _executor_map{executor_map}
-{
- // At this point, executor_map may not have executors of then subg and else subg
-}
-
-void IfLayer::run()
-{
- // Check condition
- // // If true
- // // // Copy _input_tensors -> then subg's inputs
- // // // Run then subg
- // // // Copy outputs of then subg -> _output_tensors
- // // Else
- // // // Copy _input_tensors -> else subg's inputs if false
- // // // Run else subg
- // // // Copy outputs of else subg -> _output_tensors
- auto getResultCond = [](backend::ITensor *tensor) -> bool {
- bool ret = false;
- tensor->access([&](ITensor &tensor) { ret = *reinterpret_cast<bool *>(tensor.buffer()); });
- return ret;
- };
-
- exec::ExecutorBase *subg_exec = nullptr;
- bool cond_result = getResultCond(_cond_tensor);
- if (cond_result)
- {
- VERBOSE(If) << "Call to $" << _then_subg_index << " (then)" << std::endl;
- subg_exec = nnfw::misc::polymorphic_downcast<exec::ExecutorBase *>(
- _executor_map->at(_then_subg_index).get());
- }
- else
- {
- VERBOSE(If) << "Call to $" << _else_subg_index << " (else)" << std::endl;
- subg_exec = nnfw::misc::polymorphic_downcast<exec::ExecutorBase *>(
- _executor_map->at(_else_subg_index).get());
- }
-
- const auto &subg_graph = subg_exec->graph();
-
- std::vector<backend::ITensor *> src_tensors;
- std::vector<backend::ITensor *> dst_tensors;
- // Add tensors used in subgraph or contained in outputs of subgraph
- assert(subg_graph.getInputs().size() == _input_tensors.size());
- assert(subg_graph.getInputs().size() == subg_exec->getInputTensors().size());
- for (uint32_t i = 0; i < subg_graph.getInputs().size(); ++i)
- {
- const auto &subg_input_index = subg_graph.getInputs().at(i);
- const auto &subg_input = subg_graph.operands().at(subg_input_index);
- if (subg_input.getUses().size() > 0 || subg_graph.getOutputs().contains(subg_input_index))
- {
- src_tensors.emplace_back(_input_tensors.at(i));
- dst_tensors.emplace_back(subg_exec->getInputTensors().at(i));
- }
- }
- const auto permute_op_input_to_subg_input =
- std::make_shared<PermuteLayer>(src_tensors, dst_tensors);
-
- // Add tensors used as output of operation or contained in outputs of operation
- src_tensors.clear();
- dst_tensors.clear();
- assert(_output_indices.size() == subg_exec->getOutputTensors().size());
- assert(_output_indices.size() == _output_tensors.size());
- for (uint32_t i = 0; i < _output_indices.size(); ++i)
- {
- const auto &output_index = _output_indices.at(i);
- const auto &output = _graph.operands().at(output_index);
- if (output.getUses().size() > 0 || _graph.getOutputs().contains(output_index))
- {
- src_tensors.emplace_back(subg_exec->getOutputTensors().at(i));
- dst_tensors.emplace_back(_output_tensors.at(i));
- }
- }
- const auto permute_subg_output_to_op_output =
- std::make_shared<PermuteLayer>(src_tensors, dst_tensors);
-
- // Remove copying of unused tensor
- permute_op_input_to_subg_input->prepare();
- permute_subg_output_to_op_output->prepare();
-
- // Copy & run
- subg_exec->execute(_input_tensors, permute_op_input_to_subg_input);
- permute_subg_output_to_op_output->run();
- VERBOSE(If) << "Return from $" << (cond_result ? _then_subg_index : _else_subg_index)
- << std::endl;
-}
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.h b/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.h
deleted file mode 100644
index 1461388dc..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/IfLayer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_KERNEL_IF_LAYER_H__
-#define __ONERT_BACKEND_CONTROLFLOW_KERNEL_IF_LAYER_H__
-
-#include <backend/ITensor.h>
-#include <exec/IExecutor.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-class IfLayer : public ::onert::exec::IFunction
-{
-public:
- IfLayer(backend::ITensor *cond_tensor, const std::vector<backend::ITensor *> input_tensors,
- const std::vector<backend::ITensor *> output_tensors,
- const ir::OperandIndexSequence &output_indices, const ir::Graph &graph,
- const ir::SubgraphIndex &then_subg_index, const ir::SubgraphIndex &else_subg_index,
- exec::ExecutorMap *executor_map);
-
-public:
- void run() override;
-
-private:
- backend::ITensor *_cond_tensor;
- const std::vector<backend::ITensor *> _input_tensors;
- const std::vector<backend::ITensor *> _output_tensors;
- const ir::OperandIndexSequence &_output_indices;
- const ir::Graph &_graph;
- const ir::SubgraphIndex _then_subg_index;
- const ir::SubgraphIndex _else_subg_index;
- exec::ExecutorMap *_executor_map;
-};
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_KERNEL_IF_LAYER_H__
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.cc b/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.cc
deleted file mode 100644
index 49fbb33c4..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PermuteLayer.h"
-
-#include "exec/ShapeConverter.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-void PermuteLayer::run()
-{
- assert(_src_tensors.size() == _dst_tensors.size());
- // PermuteLayer infers dynamic shape inside itself whenever run is called for the following
- // reasons:
- // 1. PermuteLayer has to access dynamic tensor manager for input/output tensors of other backends
- // 2. Other controlflow operation(If/While) uses this layout for copying tensors of other
- // subgraphs(with other backends)
- // 3. This infering code is placed here to avoid duplicated code that can be caused by above 2
- // reasons
-
- // check if output is not dynamic
- for (size_t i = 0; i < _src_tensors.size(); ++i)
- {
- auto dst_tensor = _dst_tensors.at(i);
- auto src_tensor = _src_tensors.at(i);
- if (src_tensor->is_dynamic() || dst_tensor->is_dynamic())
- {
- // getting output shape
- auto src_shape = src_tensor->getShape();
-
- // set output shape and output buffer
- ir::Shape new_shape =
- exec::convertShape(src_shape, src_tensor->layout(), dst_tensor->layout());
-
- try
- {
- if (!dst_tensor->applyShape(new_shape))
- throw std::runtime_error{
- "Error: PermuteLayer: output's TensorManager does not support dynamic tensor"};
- assert(dst_tensor->buffer() != nullptr);
- }
- catch (const std::out_of_range &e)
- {
- std::cerr << "Error: out_of_range in PermuteLayer: output's TensorManager does not support "
- "dynamic tensor"
- << '\n';
- throw;
- }
- }
- assert(exec::convertShape(src_tensor->getShape(), src_tensor->layout(), dst_tensor->layout()) ==
- dst_tensor->getShape());
- }
- IPermuteFunction::run();
-}
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.h b/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.h
deleted file mode 100644
index 8129403a5..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/PermuteLayer.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_KERNEL_PERMUTELAYER_H__
-#define __ONERT_BACKEND_CONTROLFLOW_KERNEL_PERMUTELAYER_H__
-
-#include "backend/ITensorBuilder.h"
-#include "exec/IPermuteFunction.h"
-#include "exec/IExecutor.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-class PermuteLayer : public onert::exec::IPermuteFunction
-{
-public:
- PermuteLayer(const std::vector<ITensor *> &src_tensors, const std::vector<ITensor *> &dst_tensors)
- {
- assert(src_tensors.size() == dst_tensors.size());
- _src_tensors = src_tensors;
- _dst_tensors = dst_tensors;
- }
-
- void optimize() override
- {
- // Remove copying of tensor as nullptr
- auto src_it = _src_tensors.begin();
- auto dst_it = _dst_tensors.begin();
- while (src_it != _src_tensors.end())
- {
- if ((*src_it == *dst_it) || (*src_it == nullptr || *dst_it == nullptr))
- {
- src_it = _src_tensors.erase(src_it);
- dst_it = _dst_tensors.erase(dst_it);
- }
- else
- {
- ++src_it;
- ++dst_it;
- }
- }
- }
-
- void run() override;
-};
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_KERNEL_PERMUTELAYER_H__
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.cc b/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.cc
deleted file mode 100644
index 225f0dd7c..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "WhileLayer.h"
-
-#include <backend/ITensor.h>
-#include "exec/ExecutorBase.h"
-#include <misc/polymorphic_downcast.h>
-#include "PermuteLayer.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-WhileLayer::WhileLayer(const std::vector<backend::ITensor *> input_tensors,
- const std::vector<backend::ITensor *> output_tensors,
- const ir::OperandIndexSequence &output_indices, const ir::Graph &graph,
- const ir::SubgraphIndex &cond_subg_index,
- const ir::SubgraphIndex &body_subg_index, exec::ExecutorMap *executor_map)
- : _cond_subg_index{cond_subg_index}, _body_subg_index{body_subg_index},
- _output_indices{output_indices}, _graph{graph}, _input_tensors{input_tensors},
- _output_tensors{output_tensors}, _executor_map{executor_map}
-{
- // At this point, executor_map may not have executors of cond subg and body subg
-}
-
-void WhileLayer::run()
-{
- // Copy "_input_tensors" -> "cond subg inputs"
- // Run cond subg
- // Start loop while output of cond subg is ture
- // // Copy "_input_tensors" -> "body subg inputs" in the first iteration, then copy "body subg
- // outputs" -> "body subg inputs" in the second or more iterations
- // // Run body subg
- // // Copy "body subg outputs" -> "cond subg inputs"
- // // Run cond subg
- // If there is no loop copy "_input_tensors" -> "_dst_tensors", else copy "cond subg inputs" ->
- // "_dst_tensors"
- auto cond_exec = nnfw::misc::polymorphic_downcast<exec::ExecutorBase *>(
- _executor_map->at(_cond_subg_index).get());
- auto body_exec = nnfw::misc::polymorphic_downcast<exec::ExecutorBase *>(
- _executor_map->at(_body_subg_index).get());
-
- const auto &cond_graph = cond_exec->graph();
- const auto &body_graph = body_exec->graph();
-
- std::vector<backend::ITensor *> input_tensors;
- std::vector<backend::ITensor *> cond_input_tensors;
- std::vector<backend::ITensor *> body_input_tensors;
- std::vector<backend::ITensor *> body_output_tensors;
- std::vector<backend::ITensor *> output_tensors;
-
- // Add only used tensors in cond subgraph
- assert(cond_graph.getInputs().size() == _input_tensors.size());
- assert(cond_graph.getInputs().size() == cond_exec->getInputTensors().size());
- for (uint32_t i = 0; i < cond_graph.getInputs().size(); ++i)
- {
- const auto &cond_input = cond_graph.operands().at(cond_graph.getInputs().at(i));
- if (cond_input.getUses().size() > 0)
- {
- input_tensors.emplace_back(_input_tensors.at(i));
- cond_input_tensors.emplace_back(cond_exec->getInputTensors().at(i));
- }
- }
- const auto permute_op_input_to_cond_input =
- std::make_shared<PermuteLayer>(input_tensors, cond_input_tensors);
-
- // Add only used tensors among outputs of while operation
- assert(_output_indices.size() == _input_tensors.size());
- assert(_output_indices.size() == _output_tensors.size());
- input_tensors.clear();
- output_tensors.clear();
- for (size_t i = 0; i < _output_indices.size(); ++i)
- {
- const auto &output_index = _output_indices.at(i);
- const auto &output = _graph.operands().at(output_index);
- if (output.getUses().size() > 0 || _graph.getOutputs().contains(output_index))
- {
- input_tensors.emplace_back(_input_tensors.at(i));
- output_tensors.emplace_back(_output_tensors.at(i));
- }
- }
- const auto permute_op_input_to_op_output =
- std::make_shared<PermuteLayer>(input_tensors, output_tensors);
-
- // Add all tensors with unused tensors in body subgraph because unused input tensors will be
- // copied output tensors in body subgraph
- assert(_input_tensors.size() == body_exec->getInputTensors().size());
- input_tensors = _input_tensors;
- body_input_tensors = body_exec->getInputTensors();
- const auto permute_op_input_to_body_input =
- std::make_shared<PermuteLayer>(input_tensors, body_input_tensors);
-
- // Add only used tensors in cond subgraph
- assert(cond_graph.getInputs().size() == body_exec->getOutputTensors().size());
- assert(cond_graph.getInputs().size() == cond_exec->getInputTensors().size());
- body_output_tensors.clear();
- cond_input_tensors.clear();
- for (uint32_t i = 0; i < cond_graph.getInputs().size(); ++i)
- {
- const auto &cond_input = cond_graph.operands().at(cond_graph.getInputs().at(i));
- if (cond_input.getUses().size() > 0)
- {
- body_output_tensors.emplace_back(body_exec->getOutputTensors().at(i));
- cond_input_tensors.emplace_back(cond_exec->getInputTensors().at(i));
- }
- }
- const auto permute_body_output_to_cond_input =
- std::make_shared<PermuteLayer>(body_output_tensors, cond_input_tensors);
-
- // Add only used tensors in body subgraph
- assert(body_graph.getInputs().size() == body_exec->getOutputTensors().size());
- assert(body_graph.getInputs().size() == body_exec->getInputTensors().size());
- body_output_tensors.clear();
- body_input_tensors.clear();
- for (uint32_t i = 0; i < body_graph.getInputs().size(); ++i)
- {
- const auto &body_input_index = body_graph.getInputs().at(i);
- const auto &body_input = body_graph.operands().at(body_input_index);
- if (body_input.getUses().size() > 0 &&
- !body_exec->graph().getOutputs().contains(body_input_index))
- {
- body_output_tensors.emplace_back(body_exec->getOutputTensors().at(i));
- body_input_tensors.emplace_back(body_exec->getInputTensors().at(i));
- }
- }
- const auto permute_body_output_to_body_input =
- std::make_shared<PermuteLayer>(body_output_tensors, body_input_tensors);
-
- // Add only used tensors among outputs of while operation
- assert(_output_indices.size() == body_exec->getOutputTensors().size());
- assert(_output_indices.size() == _output_tensors.size());
- body_output_tensors.clear();
- output_tensors.clear();
- for (size_t i = 0; i < _output_indices.size(); ++i)
- {
- const auto &output_index = _output_indices.at(i);
- const auto &output = _graph.operands().at(output_index);
- if (output.getUses().size() > 0 || _graph.getOutputs().contains(output_index))
- {
- body_output_tensors.emplace_back(body_exec->getOutputTensors().at(i));
- output_tensors.emplace_back(_output_tensors.at(i));
- }
- }
- const auto permute_body_output_to_op_output =
- std::make_shared<PermuteLayer>(body_output_tensors, output_tensors);
-
- // Remove copying of unused tensor
- permute_op_input_to_cond_input->prepare();
- permute_op_input_to_op_output->prepare();
- permute_op_input_to_body_input->prepare();
- permute_body_output_to_cond_input->prepare();
- permute_body_output_to_body_input->prepare();
- permute_body_output_to_op_output->prepare();
-
- VERBOSE(While) << "Call to $" << _cond_subg_index << " (cond)" << std::endl;
- cond_exec->execute(_input_tensors, permute_op_input_to_cond_input);
- VERBOSE(While) << "Return from $" << _cond_subg_index << std::endl;
-
- assert(cond_exec->getOutputTensors().size() == 1);
- auto &cond_output_tensor = cond_exec->getOutputTensors().at(0);
- auto getResultCond = [](backend::ITensor *tensor) -> bool {
- bool ret = false;
- tensor->access([&](ITensor &tensor) { ret = *reinterpret_cast<bool *>(tensor.buffer()); });
- return ret;
- };
-
- const auto body_execute_with_op_inputs = [&]() {
- VERBOSE(While) << "Call to $" << _body_subg_index << " (body)" << std::endl;
- body_exec->execute(_input_tensors, permute_op_input_to_body_input);
- VERBOSE(While) << "Return from $" << _body_subg_index << std::endl;
- };
-
- const auto body_execute_with_body_outputs = [&]() {
- VERBOSE(While) << "Call to $" << _body_subg_index << " (body)" << std::endl;
- body_exec->execute(body_exec->getOutputTensors(), permute_body_output_to_body_input);
- VERBOSE(While) << "Return from $" << _body_subg_index << std::endl;
- };
-
- std::function<void()> body_execute = body_execute_with_op_inputs;
- const auto cond_execute = [&]() {
- VERBOSE(While) << "Call to $" << _cond_subg_index << " (cond)" << std::endl;
- cond_exec->execute(body_exec->getOutputTensors(), permute_body_output_to_cond_input);
- VERBOSE(While) << "Return from $" << _cond_subg_index << std::endl;
- };
- auto permute_to_outputs_fn = permute_op_input_to_op_output;
-
- // Loop while Cond subgraph's output is true
- while (getResultCond(cond_output_tensor))
- {
- body_execute();
- cond_execute();
- body_execute = body_execute_with_body_outputs;
- permute_to_outputs_fn = permute_body_output_to_op_output;
- }
- permute_to_outputs_fn->run();
-}
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.h b/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.h
deleted file mode 100644
index 9dae49281..000000000
--- a/runtime/onert/core/src/backend/controlflow/kernel/WhileLayer.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CONTROLFLOW_KERNEL_WHILE_LAYER_H__
-#define __ONERT_BACKEND_CONTROLFLOW_KERNEL_WHILE_LAYER_H__
-
-#include <backend/ITensor.h>
-#include <exec/IExecutor.h>
-#include <exec/IFunction.h>
-#include <ir/OperandIndexSequence.h>
-#include <ir/Graph.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace controlflow
-{
-namespace kernel
-{
-
-class WhileLayer : public ::onert::exec::IFunction
-{
-public:
- WhileLayer(const std::vector<backend::ITensor *> input_tensors,
- const std::vector<backend::ITensor *> output_tensors,
- const ir::OperandIndexSequence &output_indices, const ir::Graph &graph,
- const ir::SubgraphIndex &cond_subg_index, const ir::SubgraphIndex &body_subg_index,
- exec::ExecutorMap *executor_map);
-
-public:
- void run() override;
-
-private:
- const ir::SubgraphIndex _cond_subg_index;
- const ir::SubgraphIndex _body_subg_index;
- const ir::OperandIndexSequence &_output_indices;
- const ir::Graph &_graph;
- const std::vector<backend::ITensor *> _input_tensors;
- const std::vector<backend::ITensor *> _output_tensors;
- exec::ExecutorMap *_executor_map;
-};
-
-} // namespace kernel
-} // namespace controlflow
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CONTROLFLOW_KERNEL_WHILE_LAYER_H__
diff --git a/runtime/onert/core/src/backend/cpu_common/Allocator.cc b/runtime/onert/core/src/backend/cpu_common/Allocator.cc
deleted file mode 100644
index 0ba444ee6..000000000
--- a/runtime/onert/core/src/backend/cpu_common/Allocator.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/cpu_common/Allocator.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-Allocator::Allocator(uint32_t capacity)
-{
- _base = std::make_unique<uint8_t[]>(capacity);
-
- VERBOSE(ALLOC) << "allocation capacity: " << capacity << std::endl;
- VERBOSE(ALLOC) << "base pointer: " << static_cast<void *>(_base.get()) << std::endl;
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc b/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc
deleted file mode 100644
index 740248ccd..000000000
--- a/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/cpu_common/DynamicTensorManager.h"
-
-#include "util/logging.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-DynamicTensorManager::DynamicTensorManager(const std::shared_ptr<TensorRegistry> &reg)
- : _dynamic_mem_mgr{new DynamicMemoryManager()}, _tensors{reg}
-{
- // DO NOTHING
-}
-
-void DynamicTensorManager::buildTensor(const ir::OperandIndex &ind,
- const ir::OperandInfo &tensor_info,
- ir::Layout backend_layout)
-{
- assert(_tensors->getNativeTensor(ind) == nullptr);
- auto tensor = std::make_unique<Tensor>(tensor_info, backend_layout, _dynamic_mem_mgr.get());
- _tensors->setNativeTensor(ind, std::move(tensor));
-}
-
-void DynamicTensorManager::planDealloc(ir::OperationIndex op_ind, backend::ITensor *tensor)
-{
- _dealloc_tensor_map[op_ind].emplace(tensor);
-}
-
-void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind)
-{
- auto find = _dealloc_tensor_map.find(op_ind);
- if (find == _dealloc_tensor_map.end())
- return;
-
- auto &input_set = find->second;
- for (auto *tensor : input_set)
- {
- if (!tensor->is_dynamic())
- continue;
-
- _dynamic_mem_mgr->deallocate(tensor);
-
- auto *cpu_tensor = nnfw::misc::polymorphic_downcast<cpu_common::Tensor *>(tensor);
- cpu_tensor->resetBuffer();
-
- VERBOSE(DynamicTensorManager) << "Deallocating tensor " << (void *)cpu_tensor
- << " (input of op_ind: " << op_ind.value() << ")" << std::endl;
- }
-}
-
-const ITensor *DynamicTensorManager::getRawITensor(ir::OperandIndex ind)
-{
- auto ptr = _tensors->getITensor(ind);
- assert(ptr);
- return ptr;
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryManager.cc b/runtime/onert/core/src/backend/cpu_common/MemoryManager.cc
deleted file mode 100644
index 9f179d9ee..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryManager.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <backend/cpu_common/MemoryManager.h>
-
-#include <cassert>
-
-#include "MemoryPlannerFactory.h"
-#include "util/ConfigSource.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-MemoryManager::MemoryManager() : _mem_planner{createMemoryPlanner()}
-{
- // DO NOTHING
-}
-
-MemoryManager::MemoryManager(const std::string planner_id)
- : _mem_planner{createMemoryPlanner(planner_id)}
-{
- // DO NOTHING
-}
-
-cpu_common::IMemoryPlanner *MemoryManager::createMemoryPlanner()
-{
- auto planner_id = util::getConfigString(util::config::CPU_MEMORY_PLANNER);
- return cpu_common::MemoryPlannerFactory::get().create(planner_id);
-}
-
-cpu_common::IMemoryPlanner *MemoryManager::createMemoryPlanner(const std::string planner_id)
-{
- return cpu_common::MemoryPlannerFactory::get().create(planner_id);
-}
-
-void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
-{
- _mem_planner->claim(ind, size);
-}
-
-void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); }
-
-void MemoryManager::allocate(void)
-{
- _mem_alloc = std::make_shared<cpu_common::Allocator>(_mem_planner->capacity());
- assert(_mem_alloc->base());
-}
-
-uint8_t *MemoryManager::getBuffer(const ir::OperandIndex &ind) const
-{
- assert(_mem_planner->memory_plans().find(ind) != _mem_planner->memory_plans().end());
- const auto &mem_blk = _mem_planner->memory_plans().at(ind);
- return _mem_alloc->base() + mem_blk.offset;
-}
-
-std::shared_ptr<cpu_common::Allocator> DynamicMemoryManager::allocate(const ITensor *tensor,
- uint32_t capacity)
-{
- auto find = _mem_alloc_map.find(tensor);
- if (find != _mem_alloc_map.end())
- throw std::runtime_error("Cannot allocate memory for a tensor. It was already allocated.");
-
- _mem_alloc_map[tensor] = std::make_shared<cpu_common::Allocator>(capacity);
- return _mem_alloc_map[tensor];
-}
-
-void DynamicMemoryManager::deallocate(const ITensor *tensor)
-{
- auto find = _mem_alloc_map.find(tensor);
- if (find == _mem_alloc_map.end())
- throw std::runtime_error("Cannot find Allocator for the requested index");
-
- find->second->release(); // explicitly erase memory
- _mem_alloc_map.erase(find); // remove tensor and alloc
-}
-
-void DynamicMemoryManager::deallocate(void)
-{
- for (auto &mem_alloc : _mem_alloc_map)
- {
- // Release memory buffer of mem_alloc
- mem_alloc.second->release();
- }
-
- _mem_alloc_map.clear();
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.cc b/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.cc
deleted file mode 100644
index 75c2da7d2..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.cc
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MemoryPlanner.h"
-#include "util/logging.h"
-#include <cassert>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size)
-{
- assert(size != 0);
-
- Block blk{_capacity, size};
- _mem_plans[ind] = blk;
- _capacity += size;
-
- VERBOSE(BP_PLANNER) << "CLAIM(#" << ind.value() << "): " << blk.offset << ", " << blk.size
- << std::endl;
-}
-
-void BumpPlanner::release(const ir::OperandIndex &ind)
-{
- VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): "
- << "NOTHING does" << std::endl;
-}
-
-// There are some assumptions for claiming memory(== making a reservation for memory).
-// 1. About _claim_table(std::map).
-// - The table's data structure is std::map so that it always sorts
-// value(OperandIndex) by key(base_offset).
-// - This claim() inserts key/value into _claim_table and the release() removes the key/value from
-// _claim_table.
-// - _claim_table shows the memory status at a certain point in time. Therefore,
-// - If _claim_table has an offset and a certain size at a certain point in time,
-// it means the place at the offset has been already claimed(== can't claim now. need to find
-// someplace new).
-// - If _claim_table doesn't have any element for an offset and a certain size at a certain
-// point in time, it means the place at the offset can be claimed.
-// 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than
-// the previous claim_base_offset.
-void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size)
-{
- assert(size != 0);
-
- // Find the right position for claiming
- uint32_t next_offset = 0;
- for (auto &mem_claim : _claim_table)
- {
- auto claimed_base_offset = mem_claim.first;
- auto claimed_size = _mem_plans[mem_claim.second].size;
- if (next_offset + size <= claimed_base_offset)
- {
- break;
- }
- else
- {
- next_offset = claimed_base_offset + claimed_size;
- }
- }
-
- // Now next_offset is set to the proper offset
- _claim_table[next_offset] = ind;
- _mem_plans[ind] = {next_offset, size};
-
- VERBOSE(FF_PLANNER) << "claim(#" << ind.value() << "): [+" << next_offset << ", " << size << "sz]"
- << std::endl;
-
- if (_capacity < next_offset + size)
- {
- _capacity = next_offset + size;
- }
-}
-
-void FirstFitPlanner::release(const ir::OperandIndex &ind)
-{
- for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it)
- {
- if (it->second == ind)
- {
- uint32_t offset = it->first;
- uint32_t index = ind.value();
- uint32_t size = _mem_plans[ind].size;
-
- _claim_table.erase(it);
-
- VERBOSE(FF_PLANNER) << "release(#" << index << "): [+" << offset << ", " << size << "sz]"
- << std::endl;
- return;
- }
- }
- assert(!"Cannot release for given index. It has been not claimed or released already.");
-}
-
-WICPlanner::WICPlanner()
- : _initialized(false), _capacity(0), _mem_plans(), _live_operands(), _interference_graph(),
- _operands()
-{
- // DO NOTHING
-}
-
-void WICPlanner::claim(const ir::OperandIndex &ind, size_t size)
-{
- assert(size != 0);
-
- _operands.emplace(size, ind);
- _interference_graph[ind].insert(_interference_graph[ind].end(), _live_operands.cbegin(),
- _live_operands.cend());
- for (const auto &live_operand : _live_operands)
- {
- _interference_graph[live_operand].emplace_back(ind);
- }
- _live_operands.emplace(ind);
-
- VERBOSE(WIC_PLANNER) << "claim(#" << ind.value() << "): [" << size << "sz]" << std::endl;
-}
-
-void WICPlanner::release(const ir::OperandIndex &ind)
-{
- _live_operands.erase(ind);
- VERBOSE(WIC_PLANNER) << "release(#" << ind.value() << ")" << std::endl;
-}
-
-/*
- * Build memory plans using liveness and size of operands
- * 1. Build inference graph at claim
- * - Two operands interfere if they have overlapped live range
- * 2. Sort operands in descending order of size
- * - Use std::multimap to sort operands
- * 3. Allocate memory block for sorted operands
- * - Find free memory block which does not overlap with interfered operands
- */
-void WICPlanner::buildMemoryPlans()
-{
- for (const auto &operand : _operands)
- {
- uint32_t size = operand.first;
- const ir::OperandIndex &ind = operand.second;
- VERBOSE(WIC_PLANNER) << "build_plan(#" << ind.value() << "): [" << size << "sz]" << std::endl;
-
- uint32_t next_offset = 0;
- if (_interference_graph.count(ind))
- {
- // Find interfered memory plans and sort them by offset
- std::multimap<uint32_t, uint32_t> interfered_plans;
- for (const auto &interference : _interference_graph[ind])
- {
- if (_mem_plans.count(interference))
- interfered_plans.emplace(_mem_plans[interference].offset, _mem_plans[interference].size);
- }
-
- // Find free memory block in first-fit manner
- for (const auto &interfered_plan : interfered_plans)
- {
- auto claimed_base_offset = interfered_plan.first;
- auto claimed_size = interfered_plan.second;
- VERBOSE(WIC_PLANNER) << "interfere : [+" << claimed_base_offset << ", " << claimed_size
- << "sz]" << std::endl;
- if (next_offset + size <= claimed_base_offset)
- {
- break;
- }
- else if (next_offset < claimed_base_offset + claimed_size)
- {
- next_offset = claimed_base_offset + claimed_size;
- }
- }
- }
- else
- {
- VERBOSE(WIC_PLANNER) << "No interference" << std::endl;
- }
-
- _mem_plans[ind] = {next_offset, size};
- VERBOSE(WIC_PLANNER) << "alloc(#" << ind.value() << "): [+" << next_offset << ", " << size
- << "sz]" << std::endl;
-
- if (_capacity < next_offset + size)
- {
- _capacity = next_offset + size;
- }
- }
- _initialized = true;
- _interference_graph.clear();
- _operands.clear();
-}
-
-WICPlanner::MemoryPlans &WICPlanner::memory_plans()
-{
- if (!_initialized)
- buildMemoryPlans();
- return _mem_plans;
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.h b/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.h
deleted file mode 100644
index 7c387e542..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file        MemoryPlanner.h
- * @brief       This file contains Memory Planning related classes
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_H__
-#define __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_H__
-
-#include <map>
-#include <vector>
-#include <unordered_set>
-#include <memory>
-
-#include "backend/cpu_common/Allocator.h"
-#include "backend/cpu_common/IMemoryPlanner.h"
-#include "ir/OperandIndexMap.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-/**
- * @brief Class to plan memory by bump way
- */
-class BumpPlanner : public IMemoryPlanner
-{
-public:
- /**
- * @brief Claim memory for operand by bump way
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- void claim(const ir::OperandIndex &, size_t) override;
- /**
- * @brief Release memory for operand by bump way
- * @param[in] index The operand index
- */
- void release(const ir::OperandIndex &) override;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- uint32_t capacity() override { return _capacity; }
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- MemoryPlans &memory_plans() override { return _mem_plans; }
-
-private:
- uint32_t _capacity = 0;
- MemoryPlans _mem_plans;
-};
-
-/**
- * @brief Class to plan memory by firstfit way
- */
-class FirstFitPlanner : public IMemoryPlanner
-{
-public:
- /**
- * @brief Claim memory for operand by firstfit way
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- void claim(const ir::OperandIndex &, size_t) override;
- /**
- * @brief Release memory for operand by firstfit way
- * @param[in] index The operand index
- */
- void release(const ir::OperandIndex &) override;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- uint32_t capacity() override { return _capacity; }
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- MemoryPlans &memory_plans() override { return _mem_plans; }
-
-private:
- uint32_t _capacity = 0;
- MemoryPlans _mem_plans;
- // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset)
- std::map<uint32_t, ir::OperandIndex> _claim_table;
-};
-
-/**
- * @brief Class to plan memory by Weighted Interval Color algorithm
- */
-class WICPlanner : public IMemoryPlanner
-{
-public:
- WICPlanner();
-
- /**
- * @brief Claim memory for operand by WIC algorithm
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- void claim(const ir::OperandIndex &, size_t) override;
- /**
- * @brief Release memory for operand by WIC algorithm
- * @param[in] index The operand index
- */
- void release(const ir::OperandIndex &) override;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- uint32_t capacity() override
- {
- if (!_initialized)
- buildMemoryPlans();
- return _capacity;
- }
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- MemoryPlans &memory_plans() override;
-
-private:
- void buildMemoryPlans();
-
- bool _initialized;
- uint32_t _capacity;
- MemoryPlans _mem_plans;
- std::unordered_set<ir::OperandIndex> _live_operands;
- ir::OperandIndexMap<std::vector<ir::OperandIndex>> _interference_graph;
- // Sort operands by descending order of size
- std::multimap<uint32_t, ir::OperandIndex, std::greater<uint32_t>> _operands;
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_H__
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.test.cc b/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.test.cc
deleted file mode 100644
index 5208a94d4..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryPlanner.test.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "MemoryPlanner.h"
-#include "ir/Index.h"
-
-TEST(Allocator, allocate_test)
-{
- ::onert::backend::cpu_common::Allocator allocator(1024);
- ASSERT_NE(allocator.base(), nullptr);
-}
-
-TEST(BumpPlanner, claim_test)
-{
- ::onert::backend::cpu_common::BumpPlanner planner;
-
- auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) {
- onert::ir::OperandIndex mem_idx(index);
- planner.claim(mem_idx, size);
- auto mem_blk = planner.memory_plans()[mem_idx];
- ASSERT_EQ(mem_blk.offset, expected_offset);
- ASSERT_EQ(mem_blk.size, size);
- };
-
- claim(0, 10, 0);
- claim(1, 20, 10);
- claim(2, 30, 30);
-}
-
-TEST(FirstFitPlanner, claim_release_test)
-{
- ::onert::backend::cpu_common::FirstFitPlanner planner;
-
- auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) {
- onert::ir::OperandIndex mem_idx(index);
- planner.claim(mem_idx, size);
- auto mem_blk = planner.memory_plans()[mem_idx];
- ASSERT_EQ(mem_blk.offset, expected_offset);
- ASSERT_EQ(mem_blk.size, size);
- };
-
- auto release = [&planner](uint32_t index) {
- onert::ir::OperandIndex mem_idx(index);
- planner.release(mem_idx);
- };
-
- // 0 CLAIM - 10
- claim(0, 10, 0);
-
- // 1 CLAIM - 20
- claim(1, 20, 10);
-
- // 2 CLAIM - 30
- claim(2, 30, 30);
-
- // 0 RELEASE - 10
- release(0);
-
- // 3 CLAIM - 20
- claim(3, 20, 60);
-
- // 4 CLAIM - 5
- claim(4, 5, 0);
-
- // 5 CLAIM - 10
- claim(5, 10, 80);
-
- // 6 CLAIM - 5
- claim(6, 5, 5);
-
- // 2 RELEASE - 30
- release(2);
-
- // 7 CLAIM - 35
- claim(7, 35, 90);
-
- // 8 CLAIM - 10
- claim(8, 10, 30);
-
- // 4 RELEASE - 5
- release(4);
-
- // 9 CLAIM - 10
- claim(9, 10, 40);
-
- // 10 CLAIM - 10
- claim(10, 10, 50);
-
- // 6 RELEASE
- release(6);
-
- // 1 RELEASE
- release(1);
-
- // 8 RELEASE
- release(8);
-
- // 9 RELEASE
- release(9);
-
- // 10 RELEASE
- release(10);
-
- // 3 RELEASE
- release(3);
-
- // 5 RELEASE
- release(5);
-
- // 7 RELEASE
- release(7);
-}
-
-TEST(WICPlanner, claim_release_test)
-{
- ::onert::backend::cpu_common::WICPlanner planner;
-
- auto claim = [&planner](uint32_t index, size_t size) {
- onert::ir::OperandIndex mem_idx(index);
- planner.claim(mem_idx, size);
- };
-
- auto release = [&planner](uint32_t index) {
- onert::ir::OperandIndex mem_idx(index);
- planner.release(mem_idx);
- };
-
- auto verify = [&planner](uint32_t index, uint32_t size, uint32_t expected_offset) {
- onert::ir::OperandIndex mem_idx(index);
- auto mem_blk = planner.memory_plans()[mem_idx];
- ASSERT_EQ(mem_blk.offset, expected_offset);
- ASSERT_EQ(mem_blk.size, size);
- };
-
- auto capacity = [&planner](uint32_t expected_capacity) {
- auto actual_capacity = planner.capacity();
- ASSERT_EQ(actual_capacity, expected_capacity);
- };
-
- claim(0, 20);
- claim(1, 5);
- release(0);
- claim(2, 10);
- release(1);
- claim(3, 10);
- release(2);
- claim(4, 10);
- release(3);
- claim(5, 20);
- release(4);
- claim(6, 20);
- release(5);
- release(7);
-
- // VERIFY 0 - 0
- verify(0, 20, 0);
-
- // VERIFY 1 - 20
- verify(1, 5, 20);
-
- // VERIFY 2 - 0
- verify(2, 10, 0);
-
- // VERIFY 3 - 10
- verify(3, 10, 10);
-
- // VERIFY 4 - 20
- verify(4, 10, 20);
-
- // VERIFY 5 - 0
- verify(5, 20, 0);
-
- // VERIFY 6 - 20
- verify(6, 20, 20);
-
- // CAPACITY - 40
- capacity(40);
-}
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.cc b/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.cc
deleted file mode 100644
index ead4f3294..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MemoryPlannerFactory.h"
-
-#include "MemoryPlanner.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-MemoryPlannerFactory &MemoryPlannerFactory::get()
-{
- static MemoryPlannerFactory instance;
- return instance;
-}
-
-IMemoryPlanner *MemoryPlannerFactory::create(const std::string &key)
-{
- if (key == "FirstFit")
- {
- return new FirstFitPlanner;
- }
- else if (key == "Bump")
- {
- return new BumpPlanner;
- }
- else if (key == "WIC")
- {
- return new WICPlanner;
- }
- return new FirstFitPlanner; // Default Planner
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.h b/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.h
deleted file mode 100644
index d14ec13ca..000000000
--- a/runtime/onert/core/src/backend/cpu_common/MemoryPlannerFactory.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_FACTORY_H__
-#define __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_FACTORY_H__
-
-#include "backend/cpu_common/IMemoryPlanner.h"
-
-#include <string>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-class MemoryPlannerFactory
-{
-public:
- static MemoryPlannerFactory &get();
-
-private:
- MemoryPlannerFactory() = default;
-
-public:
- IMemoryPlanner *create(const std::string &key);
-};
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
-
-#endif // __ONERT_BACKEND_CPU_COMMON_MEMORY_PLANNER_FACTORY_H__
diff --git a/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc b/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc
deleted file mode 100644
index cac43babe..000000000
--- a/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/cpu_common/StaticTensorManager.h"
-
-#include "backend/cpu_common/DynamicTensorManager.h"
-#include <util/logging.h>
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-StaticTensorManager::StaticTensorManager(const std::shared_ptr<TensorRegistry> &reg,
- DynamicMemoryManager *dynamic_mem_mgr)
- : _const_mgr{new DynamicMemoryManager()}, _nonconst_mgr{new MemoryManager()}, _tensors{reg},
- _dynamic_mem_mgr{dynamic_mem_mgr}
-{
- // DO NOTHING
-}
-
-void StaticTensorManager::allocateConsts(void)
-{
- for (auto &pair : _tensors->native_tensors())
- {
- const auto &ind = pair.first;
- auto tensor = pair.second.get();
- if (_as_constants[ind])
- {
- auto mem_alloc = _const_mgr->allocate(_tensors->getITensor(ind), tensor->total_size());
- tensor->setBuffer(mem_alloc);
- auto buffer = mem_alloc->base();
- VERBOSE(CPU_COMMON_StaticTensorManager) << "CONSTANT TENSOR(#" << ind.value()
- << "): " << static_cast<void *>(buffer)
- << "size : " << tensor->total_size() << std::endl;
- }
- }
-}
-
-void StaticTensorManager::allocateNonconsts(void)
-{
- _nonconst_mgr->allocate();
-
- for (auto &pair : _tensors->native_tensors())
- {
- const auto &ind = pair.first;
- auto tensor = pair.second.get();
- if (!_as_constants[ind] && !tensor->is_dynamic())
- {
- auto *buffer = _nonconst_mgr->getBuffer(ind);
- tensor->setBuffer(buffer);
-
- VERBOSE(CPU_COMMON_StaticTensorManager) << "TENSOR(#" << ind.value()
- << "): " << static_cast<void *>(buffer) << std::endl;
- }
- }
-}
-
-void StaticTensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
-
-void StaticTensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
-
-void StaticTensorManager::buildTensor(const ir::OperandIndex &ind,
- const ir::OperandInfo &tensor_info, ir::Layout backend_layout,
- bool as_const)
-{
- assert(!_tensors->getNativeTensor(ind));
- auto tensor = std::make_unique<Tensor>(tensor_info, backend_layout, _dynamic_mem_mgr);
- _tensors->setNativeTensor(ind, std::move(tensor));
- _as_constants[ind] = as_const;
-}
-
-void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
-{
- assert(_tensors->getNativeTensor(ind));
-
- // This method is called only when a tensor has proper shape
- assert(!_tensors->getNativeTensor(ind)->is_dynamic());
-
- if (!_as_constants[ind])
- _nonconst_mgr->claimPlan(ind, size);
-}
-
-void StaticTensorManager::releasePlan(const ir::OperandIndex &ind)
-{
- assert(_tensors->getNativeTensor(ind));
-
- // This method is called only when a tensor has proper shape
- assert(!_tensors->getNativeTensor(ind)->is_dynamic());
-
- if (!_as_constants[ind])
- _nonconst_mgr->releasePlan(ind);
-}
-
-void StaticTensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
-{
- for (const auto &it : _tensors->native_tensors())
- fn(it.first);
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/backend/cpu_common/Tensor.cc b/runtime/onert/core/src/backend/cpu_common/Tensor.cc
deleted file mode 100644
index d3dcf9a6d..000000000
--- a/runtime/onert/core/src/backend/cpu_common/Tensor.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/cpu_common/Tensor.h"
-
-#include "ir/DataType.h"
-#include "backend/cpu_common/MemoryManager.h"
-
-namespace onert
-{
-namespace backend
-{
-namespace cpu_common
-{
-
-Tensor::~Tensor() {}
-
-size_t Tensor::calcOffset(const ir::Coordinates &coords) const
-{
- size_t rank = num_dimensions();
- rank = rank == 0 ? 1 : rank;
- size_t offset = 0;
- for (size_t i = 0; i < rank; ++i)
- {
- offset = offset * dimension(i) + coords[i];
- }
- offset *= sizeOfDataType(data_type());
- return offset;
-}
-
-void Tensor::setShape(const ir::Shape &new_shape) { _info.shape(new_shape); }
-
-bool Tensor::applyShape(const ir::Shape &new_shape)
-{
- bool previously_dynamic = is_dynamic();
-
- auto allocTensorMem = [&](bool overwrite = false) {
- auto capacity = total_size();
- auto alloc = _dynamic_mem_mgr->allocate(this, capacity);
-
- if (overwrite)
- overwriteBuffer(alloc);
- else
- setBuffer(alloc);
- };
-
- if (!previously_dynamic)
- {
- // TODO deallocate tensor->buffer()
- // issue is that staticTensorManager might have allocate this memory
- setShape(new_shape);
- set_dynamic();
- allocTensorMem(true);
- }
- else if (buffer() == nullptr)
- {
- setShape(new_shape);
- set_dynamic();
- allocTensorMem();
- }
- // when buffer was already allocated and new_shape requires different size
- else
- {
- auto previous_size = total_size();
- auto new_size = new_shape.num_elements() * ir::sizeOfDataType(data_type());
- if (previous_size != new_size)
- {
- _dynamic_mem_mgr->deallocate(this);
-
- setShape(new_shape);
- set_dynamic();
- allocTensorMem(true);
- }
- else
- { // when buffer with same size was already allocated, shape could differ
- setShape(new_shape);
- }
- }
- return true;
-}
-
-} // namespace cpu_common
-} // namespace backend
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/BackendManager.cc b/runtime/onert/core/src/compiler/BackendManager.cc
deleted file mode 100644
index 0093f50fd..000000000
--- a/runtime/onert/core/src/compiler/BackendManager.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler/BackendManager.h"
-
-#include <memory>
-#include <dlfcn.h>
-
-#include "backend/Backend.h"
-#include "backend/controlflow/Backend.h"
-#include "backend/controlflow/Config.h"
-#include "backend/IConfig.h"
-#include "util/logging.h"
-#include "util/ConfigSource.h"
-#include "misc/string_helpers.h"
-
-static const char *SHARED_LIB_EXT =
-#if defined(__APPLE__) && defined(__MACH__)
- ".dylib";
-#else
- ".so";
-#endif
-
-namespace onert
-{
-namespace compiler
-{
-
-BackendManager &BackendManager::get()
-{
- static BackendManager object;
- return object;
-}
-
-BackendManager::BackendManager() { loadControlflowBackend(); }
-
-void BackendManager::loadControlflowBackend()
-{
- auto backend_object = std::unique_ptr<backend::controlflow::Backend, backend_destroy_t>(
- new backend::controlflow::Backend, [](backend::Backend *backend) { delete backend; });
-
- bool initialized = backend_object->config()->initialize(); // Call initialize here?
- if (!initialized)
- {
- throw std::runtime_error(backend::controlflow::Config::ID + " backend initialization failed");
- }
- _controlflow = backend_object.get(); // Save the controlflow backend implementation pointer
- assert(_controlflow);
- _gen_map.emplace(backend_object->config()->id(), std::move(backend_object));
-}
-
-void BackendManager::loadBackend(const std::string &backend)
-{
- if (get(backend) != nullptr)
- {
- return;
- }
-
- // TODO Remove indentation
- {
- const std::string backend_so = "libbackend_" + backend + SHARED_LIB_EXT;
- void *handle = dlopen(backend_so.c_str(), RTLD_LAZY | RTLD_LOCAL);
-
- if (handle == nullptr)
- {
- VERBOSE_F() << "Failed to load backend '" << backend << "' - " << dlerror() << std::endl;
- return;
- }
-
- VERBOSE_F() << "Successfully loaded '" << backend << "' - " << backend_so << "\n";
-
- {
- // load object creator function
- auto backend_create = (backend_create_t)dlsym(handle, "onert_backend_create");
- if (backend_create == nullptr)
- {
- fprintf(stderr, "BackendManager: unable to open function onert_backend_create : %s\n",
- dlerror());
- abort();
- }
-
- // load object creator function
- auto backend_destroy = (backend_destroy_t)dlsym(handle, "onert_backend_destroy");
- if (backend_destroy == nullptr)
- {
- fprintf(stderr, "BackendManager: unable to open function onert_backend_destroy : %s\n",
- dlerror());
- abort();
- }
-
- auto backend_object =
- std::unique_ptr<backend::Backend, backend_destroy_t>(backend_create(), backend_destroy);
- bool initialized = backend_object->config()->initialize(); // Call initialize here?
- if (!initialized)
- {
- VERBOSE_F() << backend.c_str() << " backend initialization failed. Don't use this backend"
- << std::endl;
- dlclose(handle);
- return;
- }
- _gen_map.emplace(backend_object->config()->id(), std::move(backend_object));
- }
-
- // Save backend handle (avoid warning by handle lost without dlclose())
- auto u_handle = std::unique_ptr<void, dlhandle_destroy_t>{handle, [](void *h) { dlclose(h); }};
- _handle_map.emplace(backend, std::move(u_handle));
- }
-}
-
-backend::Backend *BackendManager::get(const std::string &key)
-{
- if (_gen_map.find(key) != _gen_map.end())
- {
- return _gen_map.at(key).get();
- }
-
- return nullptr;
-}
-
-const backend::Backend *BackendManager::get(const std::string &key) const
-{
- if (_gen_map.find(key) != _gen_map.end())
- {
- return _gen_map.at(key).get();
- }
-
- return nullptr;
-}
-
-const backend::controlflow::Backend *BackendManager::getControlflow() const { return _controlflow; }
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/BackendResolver.cc b/runtime/onert/core/src/compiler/BackendResolver.cc
deleted file mode 100644
index a47d8d2d5..000000000
--- a/runtime/onert/core/src/compiler/BackendResolver.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler/BackendResolver.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/Compiler.cc b/runtime/onert/core/src/compiler/Compiler.cc
deleted file mode 100644
index 12b582b35..000000000
--- a/runtime/onert/core/src/compiler/Compiler.cc
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler/Compiler.h"
-
-#include "ParamChecker.h"
-#include "ExecutorFactory.h"
-#include "OperationValidator.h"
-#include "ShapeValidator.h"
-#include "Fp32ToFp16Converter.h"
-
-#include <backend/controlflow/Config.h>
-#include "compiler/BackendManager.h"
-#include "compiler/IScheduler.h"
-#include "compiler/ManualScheduler.h"
-#include "compiler/HEScheduler.h"
-#include "compiler/StaticShapeInference.h"
-#include "compiler/pass/ConstantOutputPass.h"
-#include "compiler/pass/OddOutputPass.h"
-#include "compiler/pass/PassRunner.h"
-#include "exec/ExecTime.h"
-#include "ir/operation/LowerInfo.h"
-#include "ir/verifier/Verifier.h"
-#include "dumper/dot/DotDumper.h"
-#include "compiler/Linear.h"
-#include "interp/InterpExecutor.h"
-#include "util/ConfigSource.h"
-#include "util/logging.h"
-#include "ir/OperationDumper.h"
-#include "misc/string_helpers.h"
-
-namespace onert
-{
-
-namespace compiler
-{
-
-CompilerOptions fetchCompilerOptionsFromGlobalConfig(const ir::Subgraphs &subgs)
-{
- CompilerOptions options;
- options.backend_list = nnfw::misc::split(util::getConfigString(util::config::BACKENDS), ';');
- options.is_primary_subgraph = false;
- options.trace_filepath = util::getConfigString(util::config::TRACE_FILEPATH);
- options.graph_dump_level = util::getConfigInt(util::config::GRAPH_DOT_DUMP);
- options.op_seq_max_node = util::getConfigInt(util::config::OP_SEQ_MAX_NODE);
- options.executor = util::getConfigString(util::config::EXECUTOR);
- options.he_scheduler = util::getConfigBool(util::config::USE_SCHEDULER);
- options.he_profiling_mode = util::getConfigBool(util::config::PROFILING_MODE);
- options.disable_compile = util::getConfigBool(util::config::DISABLE_COMPILE);
- options.fp16_enable = util::getConfigBool(util::config::FP16_ENABLE);
-#ifdef RUY_PROFILER
- options.op_seq_max_node = 1;
-#endif
-
- {
- // Backend for all
- auto &ms_options = options.manual_scheduler_options;
-
- // Default value for op_backend_all is first element in the backend list
- ms_options.backend_for_all = util::getConfigString(util::config::OP_BACKEND_ALLOPS);
-
-// Opcode to Backend
-#define OP(OpName) \
- { \
- const auto &backend_str = util::getConfigString(util::config::OP_BACKEND_##OpName); \
- if (!backend_str.empty()) \
- { \
- ms_options.opcode_to_backend[ir::OpCode::OpName] = backend_str; \
- } \
- }
-#include "ir/Operations.lst"
-#undef OP
-
- // Index to Backend
- // TODO Support multiple subgraphs for manual scheduling
- auto map_str = util::getConfigString(util::config::OP_BACKEND_MAP);
- auto key_val_list = nnfw::misc::split(map_str, ';');
- for (const auto &key_val_str : key_val_list)
- {
- if (key_val_str.empty())
- {
- continue;
- }
-
- auto key_val = nnfw::misc::split(key_val_str, '=');
- const auto &key_str = key_val.at(0);
- const auto &val = key_val.at(1);
- auto key = static_cast<uint32_t>(std::stoi(key_str));
-
- subgs.at(ir::SubgraphIndex{0})
- ->operations()
- .at(ir::OperationIndex{key}); // Check if exist, or this wil throw
- ms_options.index_to_backend.emplace(ir::OperationIndex{key}, val);
- }
- }
- return options;
-}
-
-Compiler::Compiler(const std::shared_ptr<ir::Subgraphs> &subgs)
- : _subgraphs{subgs}, _state{State::CREATED}
-{
- // Set default values for CompilerOptions
- // All these default values should not be fetched from Env, when we stop supporting Android NN
- // API.
- _options = fetchCompilerOptionsFromGlobalConfig(*subgs);
-}
-
-void Compiler::enableToFp16() { _options.fp16_enable = true; }
-
-void Compiler::checkProfilerConditions()
-{
- if (!_options.he_scheduler)
- throw std::runtime_error("Heterogeneous scheduler must be enabled during profiling.");
-
- if (_options.executor != "Dataflow")
- throw std::runtime_error("Profiling mode works only with 'Dataflow' executor");
-}
-
-std::shared_ptr<exec::ExecutorMap> Compiler::compile(void)
-{
- // Set control flow backend for control flow operators
- {
- _options.manual_scheduler_options.opcode_to_backend[ir::OpCode::If] =
- backend::controlflow::Config::ID;
- _options.manual_scheduler_options.opcode_to_backend[ir::OpCode::While] =
- backend::controlflow::Config::ID;
- _options.manual_scheduler_options.opcode_to_backend[ir::OpCode::Permute] =
- backend::controlflow::Config::ID;
- }
-
- // FIXME This is a workaround for bcq operations, should remove it
- {
- _options.manual_scheduler_options.opcode_to_backend[ir::OpCode::BCQFullyConnected] = "bcq";
- _options.manual_scheduler_options.opcode_to_backend[ir::OpCode::BCQGather] = "bcq";
- }
-
- {
- VERBOSE(Compiler) << std::boolalpha;
- VERBOSE(Compiler) << "==== Compiler Options ====" << std::endl;
- VERBOSE(Compiler) << "backend_list : "
- << nnfw::misc::join(_options.backend_list.begin(),
- _options.backend_list.end(), "/")
- << std::endl;
- VERBOSE(Compiler) << "trace_filepath : " << _options.trace_filepath << std::endl;
- VERBOSE(Compiler) << "graph_dump_level : " << _options.graph_dump_level << std::endl;
- VERBOSE(Compiler) << "op_seq_max_node : " << _options.op_seq_max_node << std::endl;
- VERBOSE(Compiler) << "executor : " << _options.executor << std::endl;
- VERBOSE(Compiler) << "manual_scheduler_options : (Too many things to print)" << std::endl;
- VERBOSE(Compiler) << "he_scheduler : " << _options.he_scheduler << std::endl;
- VERBOSE(Compiler) << "he_profiling_mode : " << _options.he_profiling_mode << std::endl;
- VERBOSE(Compiler) << "disable_compile : " << _options.disable_compile << std::endl;
- VERBOSE(Compiler) << "fp16_enable : " << _options.fp16_enable << std::endl;
- VERBOSE(Compiler) << std::noboolalpha;
- }
-
- _subgraphs->iterate([&](const ir::SubgraphIndex &, ir::Graph &subg) {
- // Mandatory passes
- pass::PassRunner{}
- .append(std::make_unique<pass::ConstantOutputPass>(subg))
- .append(std::make_unique<pass::OddOutputPass>(subg))
- .run();
- });
-
- /***************************************************
- * Prepare compilation phase
- ***************************************************/
-
- // Check shape independent operation feature
- // - Operand type
- // - Shape independent parameter
- _subgraphs->iterate(
- [](const onert::ir::SubgraphIndex &, const ir::Graph &subg) { OperationValidator{subg}(); });
-
- auto executors = std::make_shared<exec::ExecutorMap>();
-
- // Compilable check
- // TODO: Support hybrid execution -
- // execution between interpreter and compiled executor (including control flow)
- if (!checkCompilable())
- {
- _subgraphs->iterate([&](const ir::SubgraphIndex &index, ir::Graph &subg) {
- executors->emplace(index, std::make_unique<interp::InterpExecutor>(subg));
- });
- _state = State::COMPILED;
- return executors;
- }
-
- // Mode check
- if (_options.he_profiling_mode)
- checkProfilerConditions();
-
- /***************************************************
- * Backend independent analysis & optimization phase
- ***************************************************/
- auto dump_level = static_cast<dumper::dot::DotDumper::Level>(_options.graph_dump_level);
-
- // Lower: Assign backend
- std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>> lowered_subgs;
- _subgraphs->iterate([&](const ir::SubgraphIndex &index, ir::Graph &subg) {
- _options.is_primary_subgraph = (index == ir::SubgraphIndex{0});
- onert::dumper::dot::DotDumper dot_dumper(subg, dump_level);
- dot_dumper.dump(nnfw::misc::str("before_lower_subg-", index.value()));
-
- // Lower: Assign backend
- lowered_subgs[index] = std::make_unique<compiler::LoweredGraph>(subg, _options);
-
- // Check backend(s) for subgraph support FP16
- bool backends_support_fp16 = true;
- auto &contexts = (*lowered_subgs[index]).backend_contexts();
- for (auto it = contexts.begin(); it != contexts.end(); it++)
- {
- // Controlflow backend is not for actual computaion of operations so it is an exception
- if (it->first->config()->id() != backend::controlflow::Config::ID)
- backends_support_fp16 &= it->first->config()->supportFP16();
- }
-
- if (_options.fp16_enable && backends_support_fp16)
- {
- // NOTE: the only acl_cl backend enables fp16 mode
- Fp32ToFp16Converter(*lowered_subgs[index]).run();
- }
-
- subg.setSubgraphs(nullptr);
- });
-
- _subgraphs.reset();
-
- // Shape inference.
- {
- const auto primary_subg_idx = ir::SubgraphIndex{0};
- StaticShapeInferer inferer(primary_subg_idx, lowered_subgs);
- lowered_subgs.at(primary_subg_idx)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- auto has_dynamic_tensor = inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
- inferer.dump();
- }
-
- // Shape validation
- // TODO Move shape independent feature check from ShapeValidator to OperationValidator
- // TODO Move ShapeValidator into shape inference
- // - Check input tensor shape validation
- // - Check parameter value validation which valid value is depend on input tensor shape
- // - Output tensor shape validation check is needless because
- // static/dynamic shape inferer will make valid output shape
- for (auto &pair : lowered_subgs)
- {
- auto &lowered_subg = pair.second;
- compiler::ShapeValidator{lowered_subg->graph()}();
- }
-
- /*************************************************************
- * Backend independent analysis & optimization phase finished
- *************************************************************/
-
- executors = std::make_shared<exec::ExecutorMap>();
- for (auto &pair : lowered_subgs)
- {
- const auto &subg_index = pair.first;
- auto &lowered_subg = pair.second;
- auto indexed_ranks = lowered_subg->indexed_ranks();
-
- _options.is_primary_subgraph = (subg_index == ir::SubgraphIndex{0});
-
- onert::dumper::dot::DotDumper dot_dumper_lowered(lowered_subg.get(), dump_level);
- dot_dumper_lowered.dump("after_lower_subg-" + std::to_string(subg_index.value()));
-
- ir::OperationDumper dumper("START SUBGRAPH " + std::to_string(subg_index.value()));
- lowered_subg->graph().operations().iterate(
- [&](const ir::OperationIndex &, const ir::Operation &op) { op.accept(dumper); });
- auto executor = std::unique_ptr<exec::IExecutor>{
- ExecutorFactory::get().create(std::move(lowered_subg), _options, executors)};
- executor->setIndexedRanks(indexed_ranks);
- executors->insert(std::make_pair(subg_index, std::move(executor)));
- }
-
- /********************************
- * Code generation phase finished
- ********************************/
- _state = State::COMPILED;
- return executors;
-}
-
-bool Compiler::checkCompilable()
-{
- // Disable compile phase
- // When ready to use interpreter backend, remove this config and use backend setting
- if (_options.disable_compile)
- {
- return false;
- }
-
- // TODO check unspecified operand shape
-
- // Check compilable parameter
- for (uint32_t i = 0; i < _subgraphs->count(); ++i)
- {
- auto graph = _subgraphs->at(ir::SubgraphIndex{i});
- ParamChecker paramChecker{graph};
- paramChecker();
- if (paramChecker.haveNoneConstParam())
- {
- return false;
- }
- }
-
- return true;
-}
-
-} // namespace compiler
-
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ExecutorFactory.cc b/runtime/onert/core/src/compiler/ExecutorFactory.cc
deleted file mode 100644
index bb325ffbc..000000000
--- a/runtime/onert/core/src/compiler/ExecutorFactory.cc
+++ /dev/null
@@ -1,501 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ExecutorFactory.h"
-
-#include <functional>
-#include "exec/ExecutionObservers.h"
-#include "exec/LinearExecutor.h"
-#include "exec/DataflowExecutor.h"
-#include "exec/ParallelExecutor.h"
-#include "compiler/BackendManager.h"
-#include "compiler/ExecutionBuilder.h"
-#include "exec/ExecTime.h"
-#include "compiler/Linear.h"
-#include "compiler/TensorBuilders.h"
-#include "backend/IConstantInitializer.h"
-#include "backend/IKernelGenerator.h"
-#include "backend/IOptimizer.h"
-#include "backend/IPortableTensor.h"
-#include "backend/ITensorRegister.h"
-#include "backend/controlflow/Config.h"
-#include "backend/controlflow/KernelGenerator.h"
-#include "backend/controlflow/UserTensor.h"
-#include "backend/controlflow/TensorBuilder.h"
-#include <memory>
-
-namespace onert
-{
-namespace
-{
-
-class SyncFunction final : public exec::IFunction
-{
-public:
- virtual ~SyncFunction() = default;
- SyncFunction(std::unique_ptr<exec::IFunction> fn, const std::shared_ptr<backend::IConfig> config)
- : _fn{std::move(fn)}, _config{config}
- {
- assert(_fn);
- assert(_config);
- }
-
- void run() override
- {
- _fn->run();
- _config->sync();
- }
-
- void prepare() override { _fn->prepare(); }
-
-private:
- std::unique_ptr<exec::IFunction> _fn;
- std::shared_ptr<backend::IConfig> _config;
-};
-
-} // namespace
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-
-ExecutorFactory &ExecutorFactory::get()
-{
- static ExecutorFactory singleton;
- return singleton;
-}
-
-ExecutorFactory::ExecutorFactory()
-{
- _map["Linear"] = createLinearExecutor;
- _map["Dataflow"] = std::bind(createDataflowExecutor, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, false);
- _map["Parallel"] = std::bind(createDataflowExecutor, std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3, true);
-}
-
-exec::IExecutor *ExecutorFactory::create(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map)
-{
- return _map.at(options.executor)(std::move(lowered_graph), options, executor_map);
-}
-
-void ExecutorFactory::initializeBackendContext(compiler::LoweredGraph *lowered_graph)
-{
- struct Entry
- {
- std::vector<backend::BackendContext::OperationInfo> operation_list;
- std::vector<ir::OperandIndex> operand_list;
- };
- std::unordered_map<const backend::Backend *, Entry> backend_assets;
-
- // Build lists for operations
- lowered_graph->op_seqs().iterate(
- [&](const ir::OpSequenceIndex &op_seq_index, const ir::OpSequence &op_seq) {
- auto &op_seq_li = lowered_graph->getLowerInfo()->op_seq;
- auto backend = op_seq_li.at(op_seq_index)->backend();
- for (auto &operation_idx : op_seq.operations())
- {
- backend_assets[backend].operation_list.emplace_back(operation_idx, op_seq.getLayout());
- }
- });
-
- // Build lists for operands
- lowered_graph->graph().operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &) {
- const auto lower_info = lowered_graph->getLowerInfo(ind);
- for (auto factor : lower_info->def_factors())
- {
- auto backend = factor.backend();
- backend_assets[backend].operand_list.emplace_back(ind);
- }
- });
-
- for (auto &pair : backend_assets)
- {
- auto backend = pair.first;
- auto &arg = pair.second;
- lowered_graph->backend_contexts().at(backend)->initialize(arg.operation_list, arg.operand_list);
- }
-}
-
-void ExecutorFactory::runTensorRegistration(compiler::LoweredGraph *lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order)
-{
- for (const auto index : order)
- {
- const auto &op_seq = lowered_graph->op_seqs().at(index);
- const auto backend = lowered_graph->getLowerInfo(index)->backend();
- const auto tensor_register = lowered_graph->backend_contexts().at(backend)->tensor_register;
- auto tensor_builder = lowered_graph->backend_contexts().at(backend)->tensor_builder;
- auto model_io = lowered_graph->graph().getInputs() + lowered_graph->graph().getOutputs();
-
- if (tensor_register)
- {
- // Custom registration
- tensor_register->registerTensors(op_seq, lowered_graph->getLowerInfo());
- }
- else
- {
- // Default registration
- for (const auto op_idx : op_seq)
- {
- const auto &op = lowered_graph->graph().operations().at(op_idx);
- for (const auto &index :
- (op.getInputs() | ir::Remove::UNDEFINED) + (op.getOutputs() | ir::Remove::UNDEFINED))
- {
- if (!tensor_builder->isRegistered(index) && !model_io.contains(index))
- {
- const auto &operand_lower_info =
- lowered_graph->getLowerInfo(index)->def_factors().getOnlyElement();
-
- // E.g., permute (CPU) -> tensor A -> MaxPool2D(acl_cl)
- // op.getOutputs() of permute (CPU) returns tensor A
- // but tensor A belongs to the backend of acl_cl.
- // So, we have to make this tensor NOT registered for CPU.
- if (operand_lower_info.backend() != backend)
- continue;
-
- const auto &obj = lowered_graph->graph().operands().at(index);
- const auto frontend_layout = op_seq.getLayout();
- const auto backend_layout = operand_lower_info.layout();
- ir::OperandInfo backend_info{permuteShape(obj.shape(), frontend_layout, backend_layout),
- obj.typeInfo(), obj.info().memAllocType(),
- obj.isConstant()};
- tensor_builder->registerTensorInfo(index, backend_info, backend_layout);
- }
- }
- }
- }
- }
-}
-
-std::vector<backend::ITensor *>
-ExecutorFactory::initializeModelIOTensors(compiler::LoweredGraph &lowered_graph,
- const ir::OperandIndexSequence &indices)
-{
- std::vector<backend::ITensor *> ret;
-
- // TODO Store controlflow backend in BackendContext
- std::shared_ptr<backend::controlflow::TensorBuilder> cf_tensor_builder;
- std::shared_ptr<backend::controlflow::TensorRegistry> cf_tensor_reg;
- for (const auto &e : lowered_graph.backend_contexts())
- {
- auto backend = e.first;
- auto &context = e.second;
- if (backend->config()->id() == backend::controlflow::Config::ID)
- {
- cf_tensor_builder =
- std::dynamic_pointer_cast<backend::controlflow::TensorBuilder>(context->tensor_builder);
- cf_tensor_reg =
- std::dynamic_pointer_cast<backend::controlflow::TensorRegistry>(context->tensor_registry);
- }
- }
- assert(cf_tensor_builder);
- assert(cf_tensor_reg);
-
- for (auto ind : indices)
- {
- const auto &operand = lowered_graph.graph().operands().at(ind);
- auto tensor = std::make_unique<backend::controlflow::UserTensor>(
- operand.info(),
- ir::Layout::NHWC /* FIXME find op_seq for this operand and use frontend_layout */
- );
-
- // Add tensor to controlflow TensorRegistry.
- cf_tensor_reg->setNativeUserTensor(ind, std::move(tensor));
- auto *itensor = cf_tensor_reg->getITensor(ind);
- ret.push_back(itensor);
- }
- return ret;
-}
-
-void ExecutorFactory::prepareMigrantTensors(compiler::LoweredGraph &lowered_graph)
-{
- TensorRegistries tensor_regs{lowered_graph.backend_contexts(), true};
-
- lowered_graph.op_seqs().iterate(
- [&](const ir::OpSequenceIndex &op_seq_index, const ir::OpSequence &op_seq) {
- auto lower_info = lowered_graph.getLowerInfo(op_seq_index);
- auto &backend_ctx = lowered_graph.backend_contexts().at(lower_info->backend());
- for (auto ind : (op_seq.getInputs() + op_seq.getOutputs()) | ir::Remove::DUPLICATED |
- ir::Remove::UNDEFINED)
- {
- // If an OpSequence input/output tensor does not have a own tensor object,
- // it must be using migrant tensors, so find the tensor from other tensor builders and
- // set the tensor to this tensor builder if portable
- if (!backend_ctx->tensor_registry->getITensor(ind))
- {
- auto tensor = tensor_regs.getITensor(ind);
- assert(tensor); // The tensor must have been registered
- auto ptensor = dynamic_cast<backend::IPortableTensor *>(tensor);
- if (ptensor)
- backend_ctx->tensor_registry->setMigrantTensor(ind, ptensor);
- }
- }
- });
-}
-
-exec::IExecutor *
-ExecutorFactory::createLinearExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map)
-{
- const auto &backend_contexts = lowered_graph->backend_contexts();
-
- initializeBackendContext(lowered_graph.get());
-
- // linearize
- assert(!lowered_graph->graph().isBuildingPhase());
-
- /*************************************************
- * Backend dependent analysis & optimization phase
- *************************************************/
-
- for (auto &pair : backend_contexts)
- {
- auto &optimizer = pair.second->optimizer;
- if (optimizer)
- optimizer->optimize();
- }
-
- /**********************************************************
- * Backend dependent analysis & optimization phase finished
- **********************************************************/
-
- /***********************
- * Code generation phase
- ***********************/
-
- auto order = Linear::linearize(*lowered_graph);
- runTensorRegistration(lowered_graph.get(), order);
-
- std::vector<backend::ITensor *> input_tensors;
- std::vector<backend::ITensor *> output_tensors;
- if (options.is_primary_subgraph)
- {
- input_tensors = initializeModelIOTensors(*lowered_graph, lowered_graph->graph().getInputs());
- output_tensors = initializeModelIOTensors(*lowered_graph, lowered_graph->graph().getOutputs());
- }
-
- Linear::dump(*lowered_graph, order);
- Linear::planTensors(*lowered_graph, order);
-
- TensorBuilders tensor_builders{lowered_graph->backend_contexts(), true};
- TensorRegistries tensor_regs{lowered_graph->backend_contexts(), true};
-
- for (auto &tensor_builder : tensor_builders)
- {
- tensor_builder->prepare();
- }
-
- prepareMigrantTensors(*lowered_graph);
-
- ExecutionBuilder builder;
-
- // Generate kernels
- lowered_graph->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &op_seq_index,
- const ir::OpSequence &op_seq) {
- auto lower_info = lowered_graph->getLowerInfo(op_seq_index);
- auto kernel_gen = lowered_graph->backend_contexts().at(lower_info->backend())->kernel_gen;
- // Set TensorBuilderSet and ExecutorMap to kernel_gen of control flow
- auto cf_kernel_gen = dynamic_cast<backend::controlflow::KernelGenerator *>(kernel_gen.get());
- if (cf_kernel_gen != nullptr)
- {
- cf_kernel_gen->setTensorRegistries(tensor_regs);
- cf_kernel_gen->setExecutorMap(executor_map);
- }
- auto fn_seq = kernel_gen->generate(op_seq);
- if (options.he_profiling_mode)
- {
- fn_seq->wrap<SyncFunction>(lower_info->backend()->config());
- }
- builder.append(op_seq_index, {&op_seq, lower_info, std::move(fn_seq)});
- });
-
- for (auto &tensor_builder : tensor_builders)
- {
- tensor_builder->allocate();
- }
-
- for (auto &pair : backend_contexts)
- {
- pair.second->initConsts();
- }
-
- lowered_graph->graph().operands().iterate(
- [](const ir::OperandIndex &, ir::Operand &obj) { obj.releaseData(); });
-
- auto code_map = builder.releaseCodeMap();
-
- for (auto &it : code_map)
- {
- auto op_seq_index = it.first;
- auto &fn_seq = it.second.fn_seq;
-
- fn_seq->iterate([&](exec::IFunction &ifunc) {
- ifunc.prepare();
- auto backend = lowered_graph->getLowerInfo(op_seq_index)->backend();
- auto tensor_builder = lowered_graph->backend_contexts().at(backend)->tensor_builder;
- tensor_builder->postFunctionPrepare();
- });
- }
-
- auto exec =
- new exec::LinearExecutor{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs,
- std::move(code_map), order};
-
- if (!options.trace_filepath.empty())
- {
- std::unique_ptr<exec::IExecutionObserver> ctp =
- std::make_unique<exec::ChromeTracingObserver>(options.trace_filepath, exec->graph());
- exec->addObserver(std::move(ctp));
- }
-
- return exec;
-}
-
-exec::IExecutor *ExecutorFactory::createDataflowExecutor(
- std::unique_ptr<compiler::LoweredGraph> lowered_graph, const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map, bool parallel)
-{
- const auto &backend_contexts = lowered_graph->backend_contexts();
-
- initializeBackendContext(lowered_graph.get());
-
- auto order = Linear::linearize(*lowered_graph);
- runTensorRegistration(lowered_graph.get(), order);
-
- std::vector<backend::ITensor *> input_tensors;
- std::vector<backend::ITensor *> output_tensors;
- if (options.is_primary_subgraph)
- {
- input_tensors = initializeModelIOTensors(*lowered_graph, lowered_graph->graph().getInputs());
- output_tensors = initializeModelIOTensors(*lowered_graph, lowered_graph->graph().getOutputs());
- }
-
- TensorBuilders tensor_builders{lowered_graph->backend_contexts(), true};
- TensorRegistries tensor_regs{lowered_graph->backend_contexts(), true};
-
- // To make tensors never be deallocated, this is a workaround to use static memory planner
- for (auto &tensor_builder : tensor_builders)
- {
- lowered_graph->graph().operands().iterate(
- [&](const ir::OperandIndex &ind, const ir::Operand &) {
- if (tensor_builder->isRegistered(ind))
- {
- tensor_builder->notifyFirstUse(ind);
- }
- });
- }
-
- for (auto &tensor_builder : tensor_builders)
- {
- tensor_builder->prepare();
- }
-
- prepareMigrantTensors(*lowered_graph);
-
- ExecutionBuilder builder;
-
- // Generate kernels
- lowered_graph->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &op_seq_index,
- const ir::OpSequence &op_seq) {
- auto lower_info = lowered_graph->getLowerInfo(op_seq_index);
- auto kernel_gen = lowered_graph->backend_contexts().at(lower_info->backend())->kernel_gen;
- // Set TensorBuilderSet and ExecutorMap to kernel_gen of control flow
- auto cf_kernel_gen = dynamic_cast<backend::controlflow::KernelGenerator *>(kernel_gen.get());
- if (cf_kernel_gen != nullptr)
- {
- assert(cf_kernel_gen != nullptr);
- cf_kernel_gen->setTensorRegistries(tensor_regs);
- cf_kernel_gen->setExecutorMap(executor_map);
- }
- auto fn_seq = kernel_gen->generate(op_seq);
- if (options.he_profiling_mode)
- {
- fn_seq->wrap<SyncFunction>(lower_info->backend()->config());
- }
- builder.append(op_seq_index, {&op_seq, lower_info, std::move(fn_seq)});
- });
-
- for (const auto &tensor_builder : tensor_builders)
- {
- tensor_builder->allocate();
- }
-
- for (auto &pair : backend_contexts)
- {
- pair.second->initConsts();
- }
-
- lowered_graph->graph().operands().iterate(
- [](const ir::OperandIndex &, ir::Operand &obj) { obj.releaseData(); });
-
- auto code_map = builder.releaseCodeMap();
-
- for (auto &it : code_map)
- {
- auto op_seq_index = it.first;
- auto &fn_seq = it.second.fn_seq;
-
- fn_seq->iterate([&](exec::IFunction &ifunc) {
- ifunc.prepare();
- auto backend = lowered_graph->getLowerInfo(op_seq_index)->backend();
- auto tensor_builder = lowered_graph->backend_contexts().at(backend)->tensor_builder;
- tensor_builder->postFunctionPrepare();
- });
- }
-
- exec::ExecutorBase *exec = nullptr;
- if (parallel)
- {
- exec = new exec::ParallelExecutor{std::move(lowered_graph), input_tensors, output_tensors,
- tensor_regs, std::move(code_map)};
- }
- else
- {
- auto dataflow_exec = new exec::DataflowExecutor{
- std::move(lowered_graph), input_tensors, output_tensors, tensor_regs, std::move(code_map)};
- if (options.he_profiling_mode)
- {
- std::vector<const backend::Backend *> backends;
- for (const auto &pair : backend_contexts)
- {
- backends.push_back(pair.first);
- }
- auto et = std::make_shared<exec::ExecTime>(backends);
- std::unique_ptr<exec::IExecutionObserver> obs =
- std::make_unique<exec::ProfileObserver>(et, dataflow_exec->graph());
- dataflow_exec->addObserver(std::move(obs));
- }
- exec = dataflow_exec;
- }
-
- if (!options.trace_filepath.empty())
- {
- std::unique_ptr<exec::IExecutionObserver> ctp =
- std::make_unique<exec::ChromeTracingObserver>(options.trace_filepath, exec->graph());
- exec->addObserver(std::move(ctp));
- }
-
- return exec;
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ExecutorFactory.h b/runtime/onert/core/src/compiler/ExecutorFactory.h
deleted file mode 100644
index e76b721ea..000000000
--- a/runtime/onert/core/src/compiler/ExecutorFactory.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_EXECUTOR_FACTORY_H__
-#define __ONERT_COMPILER_EXECUTOR_FACTORY_H__
-
-#include <unordered_map>
-
-#include "backend/ITensor.h"
-#include "exec/IExecutor.h"
-#include "compiler/LoweredGraph.h"
-#include "TensorRegistries.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class ExecutorFactory
-{
-public:
- static ExecutorFactory &get();
-
-public:
- exec::IExecutor *create(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map);
-
-private:
- ExecutorFactory();
-
-private:
- static void initializeBackendContext(compiler::LoweredGraph *lowered_graph);
- static void runTensorRegistration(compiler::LoweredGraph *lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order);
- static std::vector<backend::ITensor *>
- initializeModelIOTensors(compiler::LoweredGraph &lowered_graph,
- const ir::OperandIndexSequence &indices);
- static void prepareMigrantTensors(compiler::LoweredGraph &lowered_graph);
- static exec::IExecutor *
- createLinearExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map);
- static exec::IExecutor *
- createDataflowExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map, bool parallel);
-
-private:
- std::unordered_map<std::string, std::function<exec::IExecutor *(
- std::unique_ptr<compiler::LoweredGraph>,
- const compiler::CompilerOptions &options,
- const std::shared_ptr<exec::ExecutorMap> &executor_map)>>
- _map;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_EXECUTOR_FACTORY_H__
diff --git a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc b/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc
deleted file mode 100644
index 23a6a253d..000000000
--- a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc
+++ /dev/null
@@ -1,954 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Fp32ToFp16Converter.h"
-#include "ir/operation/ConvertFp32ToFp16.h"
-#include "ir/operation/ConvertFp16ToFp32.h"
-#include "util/logging.h"
-
-#include <Half.h>
-
-using float16 = Half;
-
-namespace
-{
-
-const std::string kAclClBackendConfigId = "acl_cl";
-
-void copyDataFromFp32ToFp16(const float *from, float16 *into, size_t num_elements)
-{
- for (size_t i = 0; i < num_elements; ++i)
- {
- into[i] = static_cast<float16>(from[i]);
- }
-}
-
-} // namespace
-
-namespace onert
-{
-
-namespace compiler
-{
-
-Fp32ToFp16Converter::Fp32ToFp16Converter(compiler::LoweredGraph &lowered_graph)
- : _lowered_graph{lowered_graph}
-{
- VERBOSE(Fp32ToFp16Converter) << "Fp16 Enable on" << std::endl;
-}
-
-// For example, two OpSequences are there and each OpSequence has an Operation
-//
-// OP#0 // model input
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#1
-// |
-// [OPERATION] // OpSeq#1
-// |
-// OP#2 // model output
-//
-//
-// AFTER `appendOpSequences()`,
-// note that model_input and model_output are not changed.
-//
-// OP#0
-// |
-// [FP32TO16] // OpSeq#2
-// |
-// OP#3
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#4
-// |
-// [FP16TO32] // OpSeq#3
-// |
-// OP#1
-// |
-// [FP32TO16] // OpSeq#4
-// |
-// OP#5
-// |
-// [OPERATION] // OpSeq#1
-// |
-// OP#6
-// |
-// [FP16TO32] // OpSeq#5
-// |
-// OP#2
-//
-//
-// AFTER `optimize()`,
-//
-// OP#0
-// |
-// [FP32TO16] // OpSeq#2
-// |
-// OP#3
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#4
-// |
-// [OPERATION] // OpSeq#1
-// |
-// OP#6
-// |
-// [FP16TO32] // OpSeq#5
-// |
-// OP#2
-//
-//
-// AFTER `convertOperands()`,
-//
-// OP#0 // model_input, not fp16
-// |
-// [FP32TO16] // OpSeq#2
-// |
-// OP#3 // fp16
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#4 // fp16
-// |
-// [OPERATION] // OpSeq#1
-// |
-// OP#6 // fp16
-// |
-// [FP16TO32] // OpSeq#5
-// |
-// OP#2 // model_output, notfp16
-//
-//
-// AFTER `convertDatas()`,
-//
-// OP#0 // model_input, not fp16
-// |
-// [FP32TO16] // OpSeq#2
-// |
-// OP#3 // fp16
-// |
-// [OPERATION] // OpSeq#0, constants are fp16
-// |
-// OP#4 // fp16
-// |
-// [OPERATION] // OpSeq#1, constants are fp16
-// |
-// OP#6 // fp16
-// |
-// [FP16TO32] // OpSeq#5
-// |
-// OP#2 // model_output, notfp16
-//
-void Fp32ToFp16Converter::run()
-{
- // Append new OpSequence which includes ConvertFp32ToFp16
- // and append new OpSequence which includes ConvertFp16ToFp32
- appendOpSequences();
-
- // Remove unnecessary converting operations
- optimize();
-
- // Convert operands' data types from fp32 to fp16
- convertOperands();
-
- // Convert Datas
- convertDatas();
-
- // Print the result
- printOpSequences("FINAL OpSequences");
-}
-
-void Fp32ToFp16Converter::appendOpSequences()
-{
- _lowered_graph.op_seqs().iterate(
- [&](const ir::OpSequenceIndex &op_seq_ind, ir::OpSequence &op_seq) {
- const auto lower_info = _lowered_graph.getLowerInfo(op_seq_ind);
- assert(lower_info != nullptr);
-
- // For now, the only acl_cl supports fully fp16 type
- // TODO Support fp16 on acl_neon. Current acl_neon supports the only reshape and concat
- // operations.
- // To do this, we could check the support by `operation by operation`. After that, we
- // would partition an op_seq if it contains unsupported operations.
- if (lower_info->backend()->config()->id() != kAclClBackendConfigId)
- return;
-
- // OpSeq's input set should be included in the first operation's input set or
- // OpSeq's output set should be included in the last operation's output set
- assert(checkOperandsOfOpSequence(op_seq));
-
- // Append converting OpSequence for fp16 but all operands' types are not fp16 still.
- appendNewOpSeqForConvertFp32ToFp16(op_seq_ind, op_seq);
- appendNewOpSeqForConvertFp16ToFp32(op_seq_ind, op_seq);
- });
-}
-
-//
-// BEFORE
-//
-// OP#0 // model input
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#1 // model output
-//
-//
-// AFTER
-//
-// OP#0 // model input
-// |
-// [FP32TO16] // OpSeq#1
-// |
-// OP#2
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#1 // model output
-//
-void Fp32ToFp16Converter::appendNewOpSeqForConvertFp32ToFp16(const ir::OpSequenceIndex &op_seq_ind,
- ir::OpSequence &op_seq)
-{
- // OpSeq's input set is included in the first operation's input set
- const ir::OperandIndexSequence op_seq_inputs = op_seq.getInputs(); // copied
-
- // NOTE Please do not change sequence of op_seq_inputs. It can change the sequence of inputs of
- // Subgraph
- for (const auto &op_seq_input_ind :
- op_seq_inputs | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- if (checkOperandType(op_seq_input_ind) == false)
- continue;
-
- // new operand w/ datatype fp32
- const auto new_op_ind = newCopiedOperand(op_seq_input_ind);
-
- // set new lower_info for operand
- setNewOperandLowerInfo(op_seq_ind, new_op_ind);
-
- // manipulate input of operation and op_seq
- // - replace the first operation's input to new operand
- // with old operand's removeUse and new operand's appendUse()
- manipulateInput(op_seq_ind, op_seq_input_ind, new_op_ind);
-
- // new op
- const auto new_node_ind = newOperationConvertFp32ToFp16(op_seq_input_ind, new_op_ind);
-
- // new op_seq
- const auto new_op_seq_ind = newOpSequence(op_seq_ind, new_node_ind);
-
- // set new lower_info for op_seq
- setNewOpSequenceLowerInfo(op_seq_ind, new_op_seq_ind);
-
- _list_fp32_to_fp16.insert(new_op_seq_ind);
-
- VERBOSE(Fp32ToFp16Converter) << "NEW |Fp32To16]"
- << ir::getStrFromOpSeq(_lowered_graph.op_seqs().at(new_op_seq_ind),
- _lowered_graph.graph().operations())
- << std::endl;
- }
-}
-
-//
-// BEFORE
-//
-// OP#0 // model input
-// |
-// [FP32TO16] // OpSeq#1
-// |
-// OP#2
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#1 // model output
-//
-//
-// AFTER
-//
-// OP#0 // model input
-// |
-// [FP32TO16] // OpSeq#1
-// |
-// OP#2
-// |
-// [OPERATION] // OpSeq#0
-// |
-// OP#3
-// |
-// [FP16TO32] // OpSeq#2
-// |
-// OP#1 // model output
-//
-void Fp32ToFp16Converter::appendNewOpSeqForConvertFp16ToFp32(const ir::OpSequenceIndex &op_seq_ind,
- ir::OpSequence &op_seq)
-{
- // OpSeq's output set is included in the last operation's output set
- const ir::OperandIndexSequence op_seq_outputs = op_seq.getOutputs(); // copied
-
- // NOTE Please do not change sequence of op_seq_outputs. It can change the sequence of outputs of
- // Subgraph
- for (const auto &op_seq_output_ind :
- op_seq_outputs | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- if (checkOperandType(op_seq_output_ind) == false)
- continue;
-
- // new operand w/ datatype fp32
- const auto new_op_ind = newCopiedOperand(op_seq_output_ind);
-
- // set new lower_info for operand
- setNewOperandLowerInfo(op_seq_ind, new_op_ind);
-
- // manipulate output of operation and op_seq
- // - replace output of the last operation's output to new operand
- // with old operand's unsetDef and new operand's appendDef()
- manipulateOutput(op_seq_ind, op_seq_output_ind, new_op_ind);
-
- // new op
- auto new_node_ind = newOperationConvertFp16ToFp32(op_seq_output_ind, new_op_ind);
-
- // new op_seq
- auto new_op_seq_ind = newOpSequence(op_seq_ind, new_node_ind);
-
- // set new lower_info for op_seq
- setNewOpSequenceLowerInfo(op_seq_ind, new_op_seq_ind);
-
- _list_fp16_to_fp32.insert(new_op_seq_ind);
-
- VERBOSE(Fp32ToFp16Converter) << "NEW |Fp16To32]"
- << ir::getStrFromOpSeq(_lowered_graph.op_seqs().at(new_op_seq_ind),
- _lowered_graph.graph().operations())
- << std::endl;
- }
-}
-
-void Fp32ToFp16Converter::optimize()
-{
- printOpSequences("BEFORE opt");
-
- removeContiguousConvertOpSequences();
-
- printOpSequences("AFTER removeContiguousConverts");
-
- // TODO Handle Split from the beginning of the model. ex) MODELS/inception_module
- //
- // BEFORE)
- //
- // OP#0---------------------. // model_input
- // | |
- // [FP32TO16] // OpSeq#0 [FP32TO16] // OpSeq#1
- // | |
- // OP#1 OP#2
- // | |
- // [OPERATION] // OpSeq#2 [OPERATION] // OpSeq#3
- //
- //
- // AFTER)
- //
- // OP#0 // model_input
- // |
- // [FP32TO16] // OpSeq#4
- // |
- // OP#3---------------------------.
- // | |
- // [OPERATION] // OpSeq#2 [OPERATION] // OpSeq#3
-}
-
-void Fp32ToFp16Converter::convertOperands()
-{
- _lowered_graph.op_seqs().iterate(
- [&](const ir::OpSequenceIndex &op_seq_ind, ir::OpSequence &op_seq) {
- const auto lower_info = _lowered_graph.getLowerInfo(op_seq_ind);
- assert(lower_info != nullptr);
- // For now, the only acl_cl supports fully fp16
- if (lower_info->backend()->config()->id() != kAclClBackendConfigId)
- return;
-
- // Convert input,output operands' type to fp16
- convertOperandsOfOpSequence(op_seq);
- });
-}
-
-void Fp32ToFp16Converter::convertOperandsOfOpSequence(ir::OpSequence &op_seq)
-{
- auto &operands = _lowered_graph.graph().operands();
- const auto &operations = _lowered_graph.graph().operations();
- const auto &op_seq_inputs = _lowered_graph.graph().getInputs();
- const auto &op_seq_outputs = _lowered_graph.graph().getOutputs();
-
- for (auto &op_idx : op_seq)
- {
- const auto &node = operations.at(op_idx);
- for (auto &ind : node.getInputs() | ir::Remove::UNDEFINED)
- {
- if (node.opcode() == ir::OpCode::ConvertFp32ToFp16 || op_seq_inputs.contains(ind))
- continue;
-
- auto &obj = operands.at(ind);
- if (obj.isConstant() || obj.typeInfo().type() != ir::DataType::FLOAT32)
- continue;
-
- obj.type(ir::DataType::FLOAT16);
-
- VERBOSE(Fp32ToFp16Converter) << "Input Operand #" << ind.value() << ": fp16" << std::endl;
- }
-
- for (auto &ind : node.getOutputs())
- {
- if (node.opcode() == ir::OpCode::ConvertFp16ToFp32 || op_seq_outputs.contains(ind))
- continue;
-
- auto &obj = operands.at(ind);
- if (obj.isConstant() || obj.typeInfo().type() != ir::DataType::FLOAT32)
- continue;
-
- obj.type(ir::DataType::FLOAT16);
-
- VERBOSE(Fp32ToFp16Converter) << "Output Operand #" << ind.value() << ": fp16" << std::endl;
- }
- }
-}
-
-void Fp32ToFp16Converter::convertDatas()
-{
- _lowered_graph.graph().operands().iterate([&](const ir::OperandIndex &ind, ir::Operand &obj) {
- const auto type = obj.typeInfo().type();
- if (type == ir::DataType::FLOAT32 && obj.isConstant())
- {
- auto data = obj.data();
- assert(data != nullptr);
-
- size_t num_elements = obj.operandSize() / ir::sizeOfDataType(type);
- size_t new_ptr_size = num_elements * sizeof(float16);
- auto new_ptr = std::make_unique<uint8_t[]>(new_ptr_size);
- copyDataFromFp32ToFp16(reinterpret_cast<const float *>(data->base()),
- reinterpret_cast<float16 *>(new_ptr.get()), num_elements);
- obj.releaseData();
-
- auto new_data = std::make_unique<ir::CachedData>(new_ptr.get(), new_ptr_size);
-
- obj.data(std::move(new_data));
- obj.type(ir::DataType::FLOAT16);
- VERBOSE(Fp32ToFp16Converter) << "Constant Operand #" << ind.value() << ": fp16" << std::endl;
- }
- });
-}
-
-void Fp32ToFp16Converter::printOpSequences(const std::string &pre_msg, const std::string &post_msg)
-{
- if (pre_msg.empty() == false)
- {
- VERBOSE(Fp32ToFp16Converter) << pre_msg << std::endl;
- }
-
- _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, const ir::OpSequence &op_seq) {
- VERBOSE(Fp32ToFp16Converter) << ir::getStrFromOpSeq(op_seq, _lowered_graph.graph().operations())
- << std::endl;
- });
-
- if (post_msg.empty() == false)
- {
- VERBOSE(Fp32ToFp16Converter) << post_msg << std::endl;
- }
-}
-
-bool Fp32ToFp16Converter::checkOperandType(const ir::OperandIndex &op_ind) const
-{
- const auto &operands = _lowered_graph.graph().operands();
- const auto &obj = operands.at(op_ind);
- return (obj.isConstant() == false && obj.typeInfo().type() == ir::DataType::FLOAT32);
-}
-
-bool Fp32ToFp16Converter::checkOperandsOfOpSequence(const ir::OpSequence &op_seq) const
-{
- const auto &operations = _lowered_graph.graph().operations();
-
- // the first node's input
- const auto &first_node_ind = op_seq.operations().at(0);
- const auto &first_node = operations.at(first_node_ind);
- const auto &first_node_inputs = first_node.getInputs();
- for (const auto &op_seq_input_ind : op_seq.getInputs() | ir::Remove::UNDEFINED)
- {
- if (first_node_inputs.contains(op_seq_input_ind) == false)
- return false;
- }
-
- // the last node's output
- size_t last_ind = op_seq.size() - 1;
- const auto &last_node_ind = op_seq.operations().at(last_ind);
- const auto &last_node = operations.at(last_node_ind);
- const auto &last_node_outputs = last_node.getOutputs();
- for (const auto &op_seq_output_ind : op_seq.getOutputs())
- {
- if (last_node_outputs.contains(op_seq_output_ind) == false)
- return false;
- }
-
- return true;
-}
-
-ir::OperandIndex Fp32ToFp16Converter::newCopiedOperand(const ir::OperandIndex &op_ind)
-{
- auto &operands = _lowered_graph.graph().operands();
- const auto &obj = operands.at(op_ind);
- auto new_op_ind = operands.emplace(obj.shape(), obj.typeInfo());
- return new_op_ind;
-}
-
-void Fp32ToFp16Converter::setNewOperandLowerInfo(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &new_op_ind)
-{
- const auto lower_info = _lowered_graph.getLowerInfo(op_seq_ind);
- assert(lower_info != nullptr);
- auto new_lower_info = std::make_unique<ir::operand::LowerInfo>();
- auto permute_factor = ir::operand::PermuteFactor(lower_info->backend(), lower_info->layout());
- new_lower_info->addDefPermuteFactor(permute_factor);
- new_lower_info->addUsePermuteFactor(permute_factor);
- _lowered_graph.setLowerInfo(new_op_ind, std::move(new_lower_info));
-}
-
-void Fp32ToFp16Converter::setNewOpSequenceLowerInfo(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OpSequenceIndex &new_op_seq_ind)
-{
- const auto lower_info = _lowered_graph.getLowerInfo(op_seq_ind);
- assert(lower_info != nullptr);
-
- auto new_lower_info =
- std::make_unique<ir::operation::LowerInfo>(lower_info->backend(), lower_info->layout());
- _lowered_graph.setLowerInfo(new_op_seq_ind, std::move(new_lower_info));
-}
-
-void Fp32ToFp16Converter::manipulateInput(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &op_seq_input_ind,
- const ir::OperandIndex &new_op_ind)
-{
- auto &operands = _lowered_graph.graph().operands();
- auto &operations = _lowered_graph.graph().operations();
-
- auto &op_seq = _lowered_graph.op_seqs().at(op_seq_ind);
-
- auto &first_node_ind = op_seq.operations().at(0);
- auto &first_node = operations.at(first_node_ind);
- assert(first_node.getInputs().contains(op_seq_input_ind));
-
- auto &input_obj = operands.at(op_seq_input_ind);
- assert(input_obj.isConstant() == false);
-
- auto &new_op_obj = operands.at(new_op_ind);
-
- // The same inputs having the index as op_seq_input_ind are replaced all at once
- op_seq.replaceInputs(op_seq_input_ind, new_op_ind);
- first_node.replaceInputs(op_seq_input_ind, new_op_ind);
-
- // op_seq_obj doesn't have uses/def
- input_obj.removeUse(first_node_ind);
- new_op_obj.insertUse(first_node_ind);
-}
-
-void Fp32ToFp16Converter::manipulateOutput(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &op_seq_output_ind,
- const ir::OperandIndex &new_op_ind)
-{
- auto &operands = _lowered_graph.graph().operands();
- auto &operations = _lowered_graph.graph().operations();
-
- auto &op_seq = _lowered_graph.op_seqs().at(op_seq_ind);
-
- size_t last_ind = op_seq.size() - 1;
- auto &last_node_ind = op_seq.operations().at(last_ind);
- auto &last_node = operations.at(last_node_ind);
- assert(last_node.getOutputs().contains(op_seq_output_ind));
-
- auto &output_obj = operands.at(op_seq_output_ind);
- assert(output_obj.isConstant() == false);
-
- auto &new_op_obj = operands.at(new_op_ind);
-
- // The same outputs having the index as op_seq_output_ind are replaced all at once
- op_seq.replaceOutputs(op_seq_output_ind, new_op_ind);
- last_node.replaceOutputs(op_seq_output_ind, new_op_ind);
-
- // op_seq_obj doesn't have uses/def
- assert(output_obj.getDef() == last_node_ind);
- output_obj.unsetDef();
- new_op_obj.setDef(last_node_ind);
-}
-
-ir::OperationIndex
-Fp32ToFp16Converter::newOperationConvertFp32ToFp16(const ir::OperandIndex &op_seq_input_ind,
- const ir::OperandIndex &new_op_ind)
-{
- auto &operands = _lowered_graph.graph().operands();
- auto &operations = _lowered_graph.graph().operations();
-
- auto &input_obj = operands.at(op_seq_input_ind);
- auto &new_op_obj = operands.at(new_op_ind);
-
- std::unique_ptr<ir::Operation> new_node(
- new ir::operation::ConvertFp32ToFp16({op_seq_input_ind}, {new_op_ind}));
- const auto new_node_ind = operations.push(std::move(new_node));
-
- input_obj.insertUse(new_node_ind);
- new_op_obj.setDef(new_node_ind);
-
- return new_node_ind;
-}
-
-ir::OperationIndex
-Fp32ToFp16Converter::newOperationConvertFp16ToFp32(const ir::OperandIndex &op_seq_output_ind,
- const ir::OperandIndex &new_op_ind)
-{
- auto &operands = _lowered_graph.graph().operands();
- auto &operations = _lowered_graph.graph().operations();
-
- auto &output_obj = operands.at(op_seq_output_ind);
- auto &new_op_obj = operands.at(new_op_ind);
-
- std::unique_ptr<ir::Operation> new_node(
- new ir::operation::ConvertFp16ToFp32({new_op_ind}, {op_seq_output_ind}));
- const auto new_node_ind = operations.push(std::move(new_node));
-
- new_op_obj.insertUse(new_node_ind);
- output_obj.setDef(new_node_ind);
-
- return new_node_ind;
-}
-
-ir::OpSequenceIndex Fp32ToFp16Converter::newOpSequence(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperationIndex &node_index)
-{
- auto &node = _lowered_graph.graph().operations().at(node_index);
- const auto lower_info = _lowered_graph.getLowerInfo(op_seq_ind);
- assert(lower_info != nullptr);
- auto layout = lower_info->layout();
-
- auto op_seq = std::make_unique<ir::OpSequence>(layout);
- op_seq->appendOperation(node_index);
- op_seq->setOutputs(node.getOutputs());
- op_seq->setInputs(node.getInputs());
-
- return _lowered_graph.op_seqs().emplace(std::move(op_seq));
-}
-
-// The op_seq(Fp16To32)'s output operand is the next to op_seq (Fp32To16)?
-// If so, connect Fp16To32's previous OpSeq to Fp32To16's next OpSeq
-//
-// Assume that an OpSequence has an operation for easy explaination
-//
-// BEFORE)
-//
-// [OPERATION] // OpSeq#0
-// |
-// OP#0
-// |
-// [FP16TO32] // OpSeq#1
-// |
-// OP#1
-// |
-// [FP32TO16] // OpSeq#2
-// |
-// OP#2
-// |
-// [OPERATION] // OpSeq#3
-//
-//
-// AFTER)
-//
-// [OPERATION] // OpSeq#0
-// |
-// OP#0
-// |
-// [OPERATION] // OpSeq#3
-//
-void Fp32ToFp16Converter::removeContiguousConvertOpSequences()
-{
- // Prepare InputToOpSeqs map
- const auto input_to_op_seqs = prepareInputToOpSeqs();
-
- // Find OpSequences to delete while manipulating input of OpSeq.
- auto opseq_map_to_delete = findOpSequencesContiguous(input_to_op_seqs);
-
- // Find Operations to delete
- auto list_to_delete_op_seqs = getListOpSequences(opseq_map_to_delete);
- auto list_to_delete_ops = findOperationsToDelete(list_to_delete_op_seqs);
-
- // Before deleting, manipulateInputs of OpSeq & Operation
- manipulateContiguousOpSequences(input_to_op_seqs, opseq_map_to_delete);
-
- // Delete OpSequences & Operations & obj's use/def & operands
- deleteContiguousOpSequences(list_to_delete_op_seqs, list_to_delete_ops);
-}
-
-Fp32ToFp16Converter::OpSeqIndexToOpSeqIndexList
-Fp32ToFp16Converter::findOpSequencesContiguous(const InputToOpSeqs &input_to_op_seqs) const
-{
- const auto &op_seqs = _lowered_graph.op_seqs();
- OpSeqIndexToOpSeqIndexList opseq_map_to_delete;
-
- //
- // Assume that an Operation an OpSequence for easy explaination
- //
- // [OPERATION]
- // |
- // OP#0
- // |
- // [FP16TO32] // op_seq_ind_fp16_to_fp32 & op_seq_fp16_to_fp32
- // |
- // OP#1 // output_ind_fp16_fp32
- // |
- // [FP32TO16] // op_seq_ind
- // |
- // OP#2
- // |
- // [OPERATION]
- //
- for (auto it = _list_fp16_to_fp32.cbegin(); it != _list_fp16_to_fp32.cend(); ++it)
- {
- // fp16_to_fp32's input/output num is always 1
- auto &op_seq_ind_fp16_to_fp32 = *it;
- auto &op_seq_fp16_to_fp32 = op_seqs.at(op_seq_ind_fp16_to_fp32);
- assert(op_seq_fp16_to_fp32.size() == 1);
- assert(op_seq_fp16_to_fp32.getInputs().size() == 1);
-
- auto &output_ind_fp16_to_fp32 = op_seq_fp16_to_fp32.getOutputs().at(0);
- auto found_input_in_op_seqs = input_to_op_seqs.find(output_ind_fp16_to_fp32);
- if (found_input_in_op_seqs == input_to_op_seqs.end())
- {
- continue;
- }
-
- // DO NOT FORGET THE CASE
- //
- // |
- // [FP16TO32]
- // |
- // OP#0---------------------.
- // | |
- // [FP32TO16] [FP32TO16]
- // | |
- // OP#1 OP#2
- // | |
- // [OPERATION] [OPERATION]
- //
- for (auto &op_seq_ind : found_input_in_op_seqs->second)
- {
- auto found_in_fp32_to_fp16 = _list_fp32_to_fp16.find(op_seq_ind);
- if (found_in_fp32_to_fp16 != _list_fp32_to_fp16.end())
- {
- if (opseq_map_to_delete.find(op_seq_ind_fp16_to_fp32) == opseq_map_to_delete.end())
- {
- opseq_map_to_delete[op_seq_ind_fp16_to_fp32].emplace(op_seq_ind);
- }
- else
- {
- opseq_map_to_delete[op_seq_ind_fp16_to_fp32].insert(op_seq_ind);
- }
-
- VERBOSE(Fp32ToFp16Converter)
- << "Contiguous from OpSeq#" << op_seq_ind_fp16_to_fp32.value() << "(ToFp32)"
- << " to OpSeq#" << op_seq_ind.value() << "(ToFp16)" << std::endl;
- }
- }
- }
-
- return opseq_map_to_delete;
-}
-
-Fp32ToFp16Converter::InputToOpSeqs Fp32ToFp16Converter::prepareInputToOpSeqs() const
-{
- const auto &op_seqs = _lowered_graph.op_seqs();
-
- InputToOpSeqs input_to_op_seqs;
- op_seqs.iterate([&](const ir::OpSequenceIndex &op_seq_idx, const ir::OpSequence &op_seq) {
- for (auto input : op_seq.getInputs() | ir::Remove::UNDEFINED)
- {
- auto it = input_to_op_seqs.find(input);
- if (it == input_to_op_seqs.end())
- {
- input_to_op_seqs[input].emplace(op_seq_idx);
- }
- else
- {
- input_to_op_seqs[input].insert(op_seq_idx);
- }
- }
- });
-
- return input_to_op_seqs;
-}
-
-Fp32ToFp16Converter::OpSeqIndexList
-Fp32ToFp16Converter::getListOpSequences(const OpSeqIndexToOpSeqIndexList &opseq_map_to_delete) const
-{
- OpSeqIndexList list;
- for (const auto &it : opseq_map_to_delete)
- {
- auto &opseq_ind_fp16_to_fp32 = it.first;
- if (list.find(opseq_ind_fp16_to_fp32) == list.end())
- {
- list.emplace(opseq_ind_fp16_to_fp32);
- }
-
- for (auto &opseq_ind_fp32_to_fp16 : it.second)
- {
- if (list.find(opseq_ind_fp32_to_fp16) == list.end())
- {
- list.emplace(opseq_ind_fp32_to_fp16);
- }
- }
- }
- return list;
-}
-
-ir::OperandIndexSequence
-Fp32ToFp16Converter::findOperationsToDelete(const OpSeqIndexList &list_to_delete_op_seqs) const
-{
- const auto &operations = _lowered_graph.graph().operations();
- const auto &op_seqs = _lowered_graph.op_seqs();
-
- ir::OperandIndexSequence list_to_delete_ops;
- for (const auto &op_seq_ind : list_to_delete_op_seqs)
- {
- const auto &op_seq = op_seqs.at(op_seq_ind);
- assert(op_seq.size() == 1);
-
- const auto &first_node_ind = op_seq.operations().at(0);
- const auto &first_node = operations.at(first_node_ind);
- assert(first_node.opcode() == ir::OpCode::ConvertFp32ToFp16 ||
- first_node.opcode() == ir::OpCode::ConvertFp16ToFp32);
-
- for (const auto &ind : first_node.getOutputs())
- {
- list_to_delete_ops.append(ind);
- }
- }
-
- return list_to_delete_ops;
-}
-
-void Fp32ToFp16Converter::manipulateContiguousOpSequences(
- const InputToOpSeqs &input_to_op_seqs, const OpSeqIndexToOpSeqIndexList &opseq_map_to_delete)
-{
- auto &op_seqs = _lowered_graph.op_seqs();
-
- //
- // [OPERATION]
- // |
- // OP#0 // input_ind_fp16_to_fp32
- // |
- // [FP16TO32] // op_seq_ind_fp16_to_fp32 & op_seq_fp16_to_fp32
- // |
- // OP#1
- // |
- // [FP32TO16] // op_seq_ind_fp32_to_fp16, op_seq_fp32_to_fp16
- // |
- // OP#2 // output_ind_fp32_to_fp16
- // |
- // [OPERATION] // op_seq_ind_next_to_fp16
- //
- for (auto it : opseq_map_to_delete)
- {
- // fp16_to_fp32's input/output num is always 1
- auto &op_seq_ind_fp16_to_fp32 = it.first;
- auto &op_seq_fp16_to_fp32 = op_seqs.at(op_seq_ind_fp16_to_fp32);
- auto &input_ind_fp16_to_fp32 = op_seq_fp16_to_fp32.getInputs().at(0);
-
- for (auto &op_seq_ind_fp32_to_fp16 : it.second)
- {
- auto &op_seq_fp32_to_fp16 = op_seqs.at(op_seq_ind_fp32_to_fp16);
- assert(op_seq_fp32_to_fp16.size() == 1);
- assert(op_seq_fp32_to_fp16.getInputs().size() == 1);
-
- auto &output_ind_fp32_to_fp16 = op_seq_fp32_to_fp16.getOutputs().at(0);
- auto found_next_to_fp16 = input_to_op_seqs.find(output_ind_fp32_to_fp16);
- assert(found_next_to_fp16 != input_to_op_seqs.end());
-
- for (auto &op_seq_ind_next_to_fp16 : found_next_to_fp16->second)
- {
- manipulateInput(op_seq_ind_next_to_fp16, output_ind_fp32_to_fp16, input_ind_fp16_to_fp32);
- }
- //
- // [OPERATION]
- // |
- // OP#0 // input_ind_fp16_to_fp32
- // |
- // [OPERATION] // op_seq_ind_next_to_fp16
- //
- }
- }
-}
-
-void Fp32ToFp16Converter::deleteContiguousOpSequences(
- const OpSeqIndexList &list_to_delete_op_seqs,
- const ir::OperandIndexSequence &list_to_delete_ops)
-{
- auto &operands = _lowered_graph.graph().operands();
- auto &operations = _lowered_graph.graph().operations();
- auto &op_seqs = _lowered_graph.op_seqs();
-
- for (auto &op_seq_ind : list_to_delete_op_seqs)
- {
- auto &op_seq = op_seqs.at(op_seq_ind);
- assert(op_seq.size() == 1);
- VERBOSE(Fp32ToFp16Converter) << "Delete OpSeq #" << op_seq_ind.value() << std::endl;
-
- auto &first_node_ind = op_seq.operations().at(0);
- auto &first_node = operations.at(first_node_ind);
- assert(first_node.opcode() == ir::OpCode::ConvertFp32ToFp16 ||
- first_node.opcode() == ir::OpCode::ConvertFp16ToFp32);
- VERBOSE(Fp32ToFp16Converter) << "Delete Node #" << first_node_ind.value() << std::endl;
-
- // Uses
- for (auto &ind : first_node.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- auto &obj = operands.at(ind);
- obj.removeUse(first_node_ind);
- VERBOSE(Fp32ToFp16Converter) << "Operand #" << ind.value() << "'s Use(Node#"
- << first_node_ind.value() << ") is removed" << std::endl;
- }
-
- // Def
- for (auto &ind : first_node.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- auto &obj = operands.at(ind);
- assert(obj.getDef() == first_node_ind);
- obj.unsetDef();
- VERBOSE(Fp32ToFp16Converter) << "Operand #" << ind.value() << "'s Def(Node#"
- << first_node_ind.value() << ") is removed" << std::endl;
- }
-
- // Operation
- operations.remove(first_node_ind);
- VERBOSE(Fp32ToFp16Converter) << "Node#" << first_node_ind.value() << " is removed" << std::endl;
-
- // OpSequence
- op_seqs.remove(op_seq_ind);
- VERBOSE(Fp32ToFp16Converter) << "OpSeq#" << op_seq_ind.value() << " is removed" << std::endl;
- }
-
- // Operand
- for (auto &ind : list_to_delete_ops)
- {
- operands.remove(ind);
- VERBOSE(Fp32ToFp16Converter) << "Operand #" << ind.value() << " is removed" << std::endl;
- }
-}
-
-} // namespace compiler
-
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.h b/runtime/onert/core/src/compiler/Fp32ToFp16Converter.h
deleted file mode 100644
index eeecb9846..000000000
--- a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_FP32_TO_FP16_CONVERTER_H__
-#define __ONERT_COMPILER_FP32_TO_FP16_CONVERTER_H__
-
-#include "compiler/LoweredGraph.h"
-
-namespace onert
-{
-
-namespace compiler
-{
-
-class Fp32ToFp16Converter
-{
-public:
- Fp32ToFp16Converter(compiler::LoweredGraph &lowered_graph);
-
-public:
- void run();
-
-private:
- using OpSeqIndexList = std::unordered_set<ir::OpSequenceIndex>;
- using InputToOpSeqs = std::unordered_map<ir::OperandIndex, OpSeqIndexList>;
- using OpSeqIndexToOpSeqIndexList = std::unordered_map<ir::OpSequenceIndex, OpSeqIndexList>;
-
-private:
- void appendOpSequences();
- void optimize();
- void convertOperands();
- void convertDatas();
- void printOpSequences(const std::string &pre_msg = std::string(),
- const std::string &post_msg = std::string());
-
- bool checkOperandType(const ir::OperandIndex &op_ind) const;
- bool checkOperandsOfOpSequence(const ir::OpSequence &op_seq) const;
-
- void appendNewOpSeqForConvertFp32ToFp16(const ir::OpSequenceIndex &op_seq_ind,
- ir::OpSequence &op_seq);
- void appendNewOpSeqForConvertFp16ToFp32(const ir::OpSequenceIndex &op_seq_ind,
- ir::OpSequence &op_seq);
-
- ir::OperandIndex newCopiedOperand(const ir::OperandIndex &op_ind);
- ir::OperationIndex newOperationConvertFp32ToFp16(const ir::OperandIndex &op_seq_input_ind,
- const ir::OperandIndex &new_op_ind);
- ir::OperationIndex newOperationConvertFp16ToFp32(const ir::OperandIndex &op_seq_output_ind,
- const ir::OperandIndex &new_op_ind);
- ir::OpSequenceIndex newOpSequence(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperationIndex &node_index);
-
- void setNewOperandLowerInfo(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &new_op_ind);
- void setNewOpSequenceLowerInfo(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OpSequenceIndex &new_op_seq_ind);
-
- void manipulateInput(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &op_seq_input_ind,
- const ir::OperandIndex &new_op_ind);
- void manipulateOutput(const ir::OpSequenceIndex &op_seq_ind,
- const ir::OperandIndex &op_seq_output_ind,
- const ir::OperandIndex &new_op_ind);
-
- void removeContiguousConvertOpSequences();
- InputToOpSeqs prepareInputToOpSeqs() const;
- OpSeqIndexToOpSeqIndexList
- findOpSequencesContiguous(const InputToOpSeqs &intput_to_op_seqs) const;
- OpSeqIndexList getListOpSequences(const OpSeqIndexToOpSeqIndexList &opseq_map_to_delete) const;
- ir::OperandIndexSequence
- findOperationsToDelete(const OpSeqIndexList &list_to_delete_op_seqs) const;
- void manipulateContiguousOpSequences(const InputToOpSeqs &input_to_op_seqs,
- const OpSeqIndexToOpSeqIndexList &opseq_map_to_delete);
- void deleteContiguousOpSequences(const OpSeqIndexList &list_to_delete_op_seqs,
- const ir::OperandIndexSequence &list_to_delete_ops);
-
- void convertOperandsOfOpSequence(ir::OpSequence &op_seq);
-
-private:
- compiler::LoweredGraph &_lowered_graph;
- OpSeqIndexList _list_fp32_to_fp16;
- OpSeqIndexList _list_fp16_to_fp32;
-};
-
-} // namespace compiler
-
-} // namespace onert
-
-#endif // __ONERT_COMPILER_FP32_TO_FP16_CONVERTER_H__
diff --git a/runtime/onert/core/src/compiler/HEScheduler.cc b/runtime/onert/core/src/compiler/HEScheduler.cc
deleted file mode 100644
index fe54b0fdd..000000000
--- a/runtime/onert/core/src/compiler/HEScheduler.cc
+++ /dev/null
@@ -1,593 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Operand.h"
-#include "compiler/HEScheduler.h"
-#include "ir/Graph.h"
-#include "util/ConfigSource.h"
-#include "compiler/BackendResolver.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-#include "exec/FunctionSequence.h"
-#include <cassert>
-#include <cmath>
-#include <chrono>
-
-namespace onert
-{
-
-namespace compiler
-{
-static uint32_t getOperationsFlattenedIOSize(const ir::Graph &graph, const ir::Operation &node)
-{
- uint32_t size = 0;
- for (const auto &ind :
- (node.getInputs() | ir::Remove::UNDEFINED) + (node.getOutputs() | ir::Remove::UNDEFINED))
- {
- size += graph.operands().at(ind).info().total_size();
- }
- return size;
-}
-
-static bool isQuant(const ir::Graph &graph, const ir::Operation &node)
-{
- for (const auto &input : node.getInputs() | ir::Remove::UNDEFINED)
- {
- const auto &obj = graph.operands().at(input);
- if (obj.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
- {
- return true;
- }
- }
- return false;
-}
-
-static bool isWorkaroundSkip(const ir::Graph &, const backend::Backend *, const ir::Operation &,
- bool)
-{
- // Now, there is no workaround
- return false;
-}
-
-// if a node can be merged into op_seq
-static bool isMergeable(const ir::Graph &graph, const ir::Operation &node)
-{
- size_t prev_op_cnt = 0;
- for (const auto &input : node.getInputs())
- {
- // only valid_inputs
- const auto &operand = graph.operands().at(input);
- if (operand.isConstant())
- continue;
-
- // This operand is output of operation, not weight or bias
- if (operand.getDef().valid())
- ++prev_op_cnt;
-
- // Current node has multiple inputs as concat or at the beginning of the separated branch
- if (prev_op_cnt > 1 || operand.getUses().size() > 1)
- {
- return false;
- }
- }
- return true;
-}
-
-void HEScheduler::scheduleShufflingBackends()
-{
- VERBOSE(HEScheduler::schedule)
- << "Started task scheduling: uses all backends to get more metrics for data transfer"
- << std::endl;
- size_t backend_ind = 0;
- for (const auto &rank : _rank_to_op)
- {
- VERBOSE(HEScheduler::schedule) << "scheduling (" << rank.second.value() << ")" << std::endl;
- const auto &node = _graph->operations().at(rank.second);
- const bool quant = isQuant(*_graph, node);
- const auto size = getOperationsFlattenedIOSize(*_graph, node);
- for (size_t i = 0;; ++i)
- {
- if (i == _all_backends.size())
- {
- // wasn't able to find backend
- assert(false);
- break;
- }
- if (backend_ind == _all_backends.size())
- {
- backend_ind = 0;
- }
- if (isWorkaroundSkip(*_graph, _all_backends[backend_ind], node, quant))
- {
- ++backend_ind;
- continue;
- }
- const auto exec_time =
- _exec_time->getOperationExecTime(_all_backends[backend_ind], node.name(), quant, size);
- // Scheduling to measure data transfer must be done after measuring all backends separately
- assert(exec_time != _exec_time->NOT_FOUND);
- if (exec_time == _exec_time->getMax())
- {
- ++backend_ind;
- continue;
- }
- _backend_resolver->setBackend(rank.second, _all_backends[backend_ind]);
- VERBOSE(HEScheduler::schedule) << "backend for " << node.name() << " is "
- << _all_backends[backend_ind]->config()->id() << std::endl;
- ++backend_ind;
- break;
- }
- }
-}
-
-bool HEScheduler::isNodeProfiled(const ir::Operation &node)
-{
- const bool quant = isQuant(*_graph, node);
- const auto size = getOperationsFlattenedIOSize(*_graph, node);
- for (const auto *backend : _all_backends)
- {
- const auto exec_time = _exec_time->getOperationExecTime(backend, node.name(), quant, size);
- if (exec_time == _exec_time->NOT_FOUND)
- return false;
- }
- return true;
-}
-
-void HEScheduler::scheduleBranch(const ir::OperationIndex &index,
- ir::OperationIndexMap<bool> &scheduled)
-{
- auto loc_index = index;
- const backend::Backend *parent_backend = nullptr;
- while (true)
- {
- if (scheduled[loc_index])
- {
- return;
- }
- if (!schedule(loc_index, parent_backend))
- {
- return;
- }
- scheduled[loc_index] = true;
- parent_backend = _backend_resolver->getBackend(loc_index);
-
- const auto &node = _graph->operations().at(loc_index);
- /* get the only output operand, that is input of the next single operation
- * and just this nodes output.*/
- if (node.getOutputs().size() != 1)
- {
- return;
- }
- const auto &only_out_operand = _graph->operands().at(*node.getOutputs().begin());
- // One of the last nodes
- if (only_out_operand.getUses().size() == 0)
- {
- return;
- }
- loc_index = *only_out_operand.getUses().begin();
- /* verify, that next node is neither beginning nor ending node of a branch*/
- const auto &next_node = _graph->operations().at(loc_index);
- if (!isMergeable(*_graph, next_node))
- {
- return;
- }
- }
-}
-
-std::unique_ptr<compiler::BackendResolver> HEScheduler::schedule(const ir::Graph &graph)
-{
- _graph = &graph;
- VERBOSE(HEScheduler::schedule) << "task scheduling started" << std::endl;
- // Make ranks and save in descending order
- makeRank();
-
- for (const auto *backend : _all_backends)
- {
- _backends_avail_time.emplace(backend, std::map<int64_t, int64_t>{{0, 0}});
- }
-
- if (_is_profiling_mode)
- {
- // Check if profiling info about all backend/node pairs already exists
- bool all_nodes_are_profiled = true;
- _graph->operations().iterate([&](const ir::OperationIndex &, const ir::Operation &op) {
- if (all_nodes_are_profiled)
- all_nodes_are_profiled = isNodeProfiled(op);
- });
-
- // If all nodes are already profiled - schedule backends in such order, so more profiling
- // information about between-backends data transfer could be collected
- if (all_nodes_are_profiled)
- {
- scheduleShufflingBackends();
- VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
- return std::move(_backend_resolver);
- }
- }
-
- ir::OperationIndexMap<bool> visited;
- graph.operations().iterate(
- [&](const ir::OperationIndex &index, const ir::Operation &) { visited[index] = false; });
- // for each task select the backend with the smallest earliest finishing time(eft)
- for (const auto &rank : _rank_to_op)
- {
- scheduleBranch(rank.second, visited);
- }
- VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
- return std::move(_backend_resolver);
-}
-
-int64_t HEScheduler::getOpTime(const backend::Backend *backend, const std::string &operation,
- bool quant, uint32_t size)
-{
- const auto time = _exec_time->getOperationExecTime(backend, operation, quant, size);
- if (time != _exec_time->NOT_FOUND)
- return time;
-
- return _is_supported.at(backend).at(operation) ? 1 : _exec_time->getMax();
-}
-
-int64_t HEScheduler::getPermuteTime(const backend::Backend *src_backend,
- const backend::Backend *dst_backend, bool quant, uint32_t size)
-{
- // TODO Change it to getOperationExecTime()
- const auto time = _exec_time->getPermuteTime(src_backend, dst_backend, quant, size);
-
- if (time != _exec_time->NOT_FOUND)
- return time;
-
- // FIXME permute time is not recorded so the control reaches here always
- // Makes the scheduler prefer keeping computations on one backend
- return size / 400;
-}
-
-int64_t HEScheduler::tryBackend(const ir::Operation &node, const backend::Backend *backend)
-{
- // if there is no profiling info don't use this backend during scheduling
- if (!_is_profiling_mode)
- {
- VERBOSE(HEScheduler::tryBackend)
- << "Trying to HE schedule while there is no profiling info for " << node.name()
- << " on backend " << backend->config()->id() << ". So this backend won't be used. "
- << std::endl;
- _is_supported[backend][node.name()] = false;
- return _exec_time->getMax();
- }
- auto iter = _is_supported.find(backend);
- if (iter != _is_supported.end())
- {
- auto it2 = iter->second.find(node.name());
- if (it2 != iter->second.end())
- {
- return _is_supported[backend][node.name()] ? 1 : _exec_time->getMax();
- }
- }
- try
- {
- // DO NOTHING
-
- _is_supported[backend][node.name()] = true;
- }
- catch (std::runtime_error &e)
- {
- _is_supported[backend][node.name()] = false;
- }
- return _is_supported[backend][node.name()] ? 1 : _exec_time->getMax();
-}
-
-void HEScheduler::makeRank()
-{
- VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl;
-
- _graph->operations().iterate(
- [&](const ir::OperationIndex &index, const ir::Operation &) { DFSMaxRank(index); });
-
- // Check that ranks are calculated for all operations(nodes)
- _graph->operations().iterate([&](const ir::OperationIndex &index, const ir::Operation &) {
- UNUSED_RELEASE(index);
- assert(_op_to_rank->find(index) != _op_to_rank->end());
- });
- VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl;
-}
-
-int64_t HEScheduler::DFSMaxRank(const ir::OperationIndex &index)
-{
- auto op_to_rank_it = _op_to_rank->find(index);
- if (op_to_rank_it != _op_to_rank->end())
- return op_to_rank_it->second;
-
- const auto &node = _graph->operations().at(index);
- int64_t rank = 0;
- const bool quant = isQuant(*_graph, node);
- const auto size = getOperationsFlattenedIOSize(*_graph, node);
- auto supported_backends_quantity = static_cast<int64_t>(_all_backends.size());
-
- const auto max_child_rank = DFSChildrenMaxRank(index);
-
- // get average exec time of this op
- for (const auto &backend : _all_backends)
- {
- auto exec_time = _exec_time->getOperationExecTime(backend, node.name(), quant, size);
- if (exec_time == _exec_time->NOT_FOUND)
- {
- exec_time = tryBackend(node, backend);
- }
- if (exec_time < _exec_time->getMax())
- {
- rank += exec_time;
- }
- else
- {
- // this operation isn't supported in this backend
- --supported_backends_quantity;
- }
- }
- if (supported_backends_quantity == 0)
- {
- throw std::runtime_error{"Encountered unsupported op: " + node.name()};
- }
- rank /= supported_backends_quantity;
-
- // get standard deviation
- int64_t std = 0;
- for (const auto backend : _all_backends)
- {
- const auto exec_time = getOpTime(backend, node.name(), quant, size);
- if (exec_time < _exec_time->getMax())
- {
- std += (exec_time - rank) * (exec_time - rank);
- }
- }
- std /= supported_backends_quantity;
- if (std > 0)
- {
- std = static_cast<int>(std::sqrt(std));
- rank *= std;
- }
- rank += max_child_rank;
-
- assert(rank >= 0);
- _rank_to_op.emplace(rank, index);
- _op_to_rank->emplace(index, rank);
- VERBOSE(HEScheduler::DFSMaxRank) << "rank of operation (" << index.value() << ")" << node.name()
- << " is " << rank << std::endl;
-
- return rank;
-}
-
-int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index)
-{
- const auto &node = _graph->operations().at(index);
- int64_t max_child_rank = 0;
- for (const auto &output : node.getOutputs() | ir::Remove::UNDEFINED)
- {
- const auto &operand = _graph->operands().at(output);
- const bool quant = operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM;
- // average data transfer cost of this operand's data
- int64_t avg_transfer_cost = 1;
- for (const auto *backend : _all_backends)
- {
- for (const auto *other_backend : _all_backends)
- {
- if (backend == other_backend)
- {
- continue;
- }
- // TODO Change it to controlflow backend
- auto transfer_cost =
- getPermuteTime(backend, other_backend, quant, operand.info().total_size());
- avg_transfer_cost += transfer_cost;
- }
- }
- avg_transfer_cost /= _all_backends.size();
- for (const auto &use : operand.getUses())
- {
- const auto cur_child_rank = DFSMaxRank(use);
- max_child_rank = std::max(max_child_rank, cur_child_rank + avg_transfer_cost);
- }
- }
- return max_child_rank;
-}
-
-int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend,
- const int64_t &starting_time, const int64_t &time_amount)
-{
- const auto backend_times = _backends_avail_time.at(backend);
- // finishing and starting times of an op, that will come after current op
- auto next_op_fst = backend_times.upper_bound(starting_time);
- // finishing time of an op, that will come before current op
- auto prev_op_ft = starting_time;
- // until reach the "hole/gap", that is enough to run this op
- while (next_op_fst != backend_times.end() && next_op_fst->second - prev_op_ft <= time_amount)
- {
- prev_op_ft = next_op_fst->first + 1;
- ++next_op_fst;
- }
- return prev_op_ft;
-}
-
-bool HEScheduler::schedule(const ir::OperationIndex &index, const backend::Backend *parent_backend)
-{
- VERBOSE(HEScheduler::schedule) << "scheduling (" << index.value() << ")" << std::endl;
- int64_t eft = std::numeric_limits<int64_t>::max(), selected_exec_time = 0;
- const auto &node = _graph->operations().at(index);
-
- std::multimap<int64_t, int64_t> selected_transfer_st_exec_time;
- // select the backend with the smallest eft of this task
- const backend::Backend *chosen_backend = nullptr;
- for (const auto *backend : _all_backends)
- {
- std::multimap<int64_t, int64_t> transfer_st_exec_time;
- const auto est_and_et = ESTAndExecTime(backend, index, transfer_st_exec_time);
-
- if (eft > est_and_et.first + est_and_et.second)
- {
- eft = est_and_et.first + est_and_et.second;
- selected_exec_time = est_and_et.second;
- chosen_backend = backend;
- selected_transfer_st_exec_time = transfer_st_exec_time;
- }
- }
-
- if (chosen_backend == nullptr)
- {
- throw std::runtime_error{"Fail to choose backend on scheduler"};
- }
-
- // this is part of a branch and it is assigned another backend
- if (parent_backend && parent_backend != chosen_backend)
- {
- return false;
- }
- for (const auto &it : selected_transfer_st_exec_time)
- {
- auto prev_op_ft = backendAvailableTime(_cpu_backend, it.first, it.second);
- _backends_avail_time[_cpu_backend].insert({prev_op_ft + it.second, prev_op_ft});
- }
-
- _ops_eft[index] = eft;
- _backends_avail_time[chosen_backend].emplace(eft, eft - selected_exec_time);
- _backend_resolver->setBackend(index, chosen_backend);
-
- VERBOSE(HEScheduler::schedule) << "backend for " << node.name() << " is "
- << chosen_backend->config()->id() << ". Its eft: " << eft
- << std::endl;
- return true;
-}
-
-std::pair<int64_t, int64_t>
-HEScheduler::ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time)
-{
- // Permutation will cause creating a separate op_seq that contains just this permutation node.
- // This isn't needed for Linear executor since it doesn't use op_seqs
- // Number 1 ms is picked experimentally
- int64_t permute_fine = 1000;
- // Multiply cpu operations' exec time by 2 because in parallel executor it might be busy with
- // permutation on other branches or non-nnfw specific tasks and have to wait for it.
- // Number 2 is picked experimentally
- const int64_t CPU_DELAY = 2;
- const auto &node = _graph->operations().at(index);
- const bool quant = isQuant(*_graph, node);
- const auto size = getOperationsFlattenedIOSize(*_graph, node);
- // if this node can be part of a op_seq, then assigning different backend will cause creating
- // another op_seq
- if (isMergeable(*_graph, node))
- {
- permute_fine *= 2;
- }
- if (isWorkaroundSkip(*_graph, backend, node, quant))
- {
- return {_exec_time->getMax(), _exec_time->getMax()};
- }
- // get average exec time of the op on this backend
- auto exec_time = getOpTime(backend, node.name(), quant, size);
- if (backend->config()->id() == "cpu" && _is_parallel_exec)
- {
- exec_time *= CPU_DELAY;
- }
-
- // get max eft of direct (one level above) predecessors
- auto max_pred_eft = predMaxEFT(backend, node, transfer_st_exec_time);
-
- int64_t total_transfer_cost = 0;
- std::vector<std::multimap<int64_t, int64_t>::iterator> inserted_permutations;
- // Find free time for data transferring and insert it into backend taskset. This is needed:
- // 1. Time for multiple permutations for this node's input is found correctly
- // 2. If backend==cpu, then free time for this node must come after permutations
- for (auto &it : transfer_st_exec_time)
- {
- if (_is_parallel_exec)
- {
- it.second *= CPU_DELAY;
- }
- if (!_is_linear_exec)
- {
- it.second += permute_fine;
- }
- total_transfer_cost += it.second;
-
- const auto prev_op_ft = backendAvailableTime(_cpu_backend, it.first, it.second);
-
- max_pred_eft = std::max(max_pred_eft, prev_op_ft + it.second);
-
- const auto tmp = _backends_avail_time[_cpu_backend].emplace(prev_op_ft + it.second, prev_op_ft);
- inserted_permutations.push_back(tmp.first);
- }
- // find the hole/gap, where this op can be put or the finishing time of the last assigned op
- auto prev_op_ft = backendAvailableTime(backend, max_pred_eft, exec_time);
-
- // Remove inserted permutation from cpu's task set
- for (const auto &it : inserted_permutations)
- {
- _backends_avail_time[_cpu_backend].erase(it);
- }
-
- /* In case non-parallel executor measure just exec time and data transfer time
- * because EFT(prev_op_ft) is the same for all backends. Since two operations
- * can't be run simultaneously, finish of running operation must be waited for.
- * When an operation starts, all backends are free. So, they need time just for
- * data transfer.*/
- if (!_is_parallel_exec)
- {
- VERBOSE(HEScheduler::ESTAndExecTime)
- << "exec_time of (" << index.value() << ") " << node.name() << " quant==" << quant << " on "
- << backend->config()->id() << " is " << exec_time
- << " microseconds. Data transfer cost: " << total_transfer_cost << std::endl;
-
- return {total_transfer_cost, exec_time};
- }
- VERBOSE(HEScheduler::ESTAndExecTime)
- << "exec_time of (" << index.value() << ") " << node.name() << " quant==" << quant << " on "
- << backend->config()->id() << ": " << exec_time
- << " microseconds. Backend available time: " << prev_op_ft
- << " Parent's max eft: " << max_pred_eft - total_transfer_cost
- << " data transfer cost: " << total_transfer_cost << std::endl;
-
- return {prev_op_ft, exec_time};
-}
-
-int64_t HEScheduler::predMaxEFT(const backend::Backend *backend, const ir::Operation &node,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time)
-{
- int64_t max_pred_eft = 0;
- for (const auto &input_operand_idx : node.getInputs())
- {
- const auto &input_operand = _graph->operands().at(input_operand_idx);
- const bool quant = input_operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM;
-
- auto input_node_idx = input_operand.getDef();
- if (input_node_idx.valid())
- {
- // Data transfer cost from parent's node backend to current node's backend:
- auto parent_backend = _backend_resolver->getBackend(input_node_idx);
-
- max_pred_eft = std::max(max_pred_eft, _ops_eft.at(input_node_idx));
- if (parent_backend != backend)
- {
- // Multiply operand size by 2 because size must describe input+output size
- int64_t transfer_cost =
- getPermuteTime(parent_backend, backend, quant, input_operand.info().total_size() * 2);
- transfer_st_exec_time.emplace(_ops_eft.at(input_node_idx), transfer_cost);
- }
- }
- }
- return max_pred_eft;
-}
-
-} // namespace compiler
-
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/HEScheduler.h b/runtime/onert/core/src/compiler/HEScheduler.h
deleted file mode 100644
index b9cee5881..000000000
--- a/runtime/onert/core/src/compiler/HEScheduler.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file HEScheduler.h
- * @brief This file contains HEScheduler class to define and run task Heterogeneous Execution
- * Scheduler
- */
-
-#ifndef __ONERT_COMPILER_H_E_SCHEDULER_H_
-#define __ONERT_COMPILER_H_E_SCHEDULER_H_
-
-#include "compiler/IScheduler.h"
-#include "compiler/BackendManager.h"
-#include "compiler/Compiler.h"
-#include "ir/Graph.h"
-#include "exec/ExecTime.h"
-#include "backend/Backend.h"
-#include <memory>
-#include "ir/OperationIndexMap.h"
-#include <map>
-#include <memory>
-
-namespace onert
-{
-
-namespace compiler
-{
-/**
- * @brief Class to schedule tasks
- */
-class HEScheduler : IScheduler
-{
-public:
- /**
- * @brief Construct a new Heterogeneous Execution Scheduler object
- * @param[in] model Graph model
- * @param[in] backend_resolver backend resolver
- */
- HEScheduler(const backend::BackendContexts &backend_contexts, const CompilerOptions &options)
- : _is_supported{}, _backends_avail_time{}, _ops_eft{},
- _op_to_rank{std::make_shared<ir::OperationIndexMap<int64_t>>()},
- _is_profiling_mode{options.he_profiling_mode},
- _is_linear_exec{options.executor == "Linear"},
- _is_parallel_exec{options.executor == "Parallel"}
- {
- for (auto &entry : backend_contexts)
- {
- if (entry.first->config()->id() == backend::controlflow::Config::ID)
- continue;
- _all_backends.push_back(entry.first);
- }
- _backend_resolver = std::make_unique<compiler::BackendResolver>();
- _exec_time = std::make_unique<exec::ExecTime>(_all_backends);
-
- // Find cpu backend
- auto cpu_backend_it = std::find_if(
- _all_backends.begin(), _all_backends.end(),
- [](const backend::Backend *backend) { return backend->config()->id() == "cpu"; });
- if (cpu_backend_it == _all_backends.end())
- throw std::runtime_error("HEScheduler could be used only if 'cpu' backend is available");
- _cpu_backend = *cpu_backend_it;
- }
-
-public:
- /**
- * @brief Task scheduling
- *
- * @note The main idea is taken from HSIP algo:
- * https://www.hindawi.com/journals/sp/2016/3676149/
- */
- std::unique_ptr<compiler::BackendResolver> schedule(const ir::Graph &graph) final;
- std::shared_ptr<ir::OperationIndexMap<int64_t>> getIndexedRanks() { return _op_to_rank; }
-
-private:
- bool isNodeProfiled(const ir::Operation &);
-
- bool schedule(const ir::OperationIndex &, const backend::Backend *parent_backend);
- /**
- * @brief Get earliest starting time and execution time of an operation on a backend.
- *
- * @note Returns a time when operation's inputs are ready and backend is available
- * It also returns exec time. If this is "cpu" backend, then exec_time*CPU_DELAY
- *
- * @param[in] backend: backend, for which to return the time
- * @param[in] index: index of an operation
- * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
- *
- * @return earliest starting time and execution time
- */
- std::pair<int64_t, int64_t>
- ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time);
- /**
- * @brief Returns the latest finishing time of parents of a node.
- *
- * @param[in] backend: backend, for which to return the time
- * @param[in] node: node to get eft of parents
- * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
- *
- * @return earliest finishing time of parent nodes
- */
- int64_t predMaxEFT(const backend::Backend *backend, const ir::Operation &node,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time);
-
- void makeRank();
-
- int64_t DFSMaxRank(const ir::OperationIndex &index);
-
- int64_t DFSChildrenMaxRank(const ir::OperationIndex &index);
- /**
- * @brief Returns the time, when backend is available for at least given amount of time.
- *
- * @note Returns either hole/gap between two performing two already scheduled operations,
- * or the finishing time of the last scheduled operation
- *
- * @param[in] backend backend, for which to return the time
- * @param[in] starting_time time, starting which to look for gap
- * @param[in] time_amount amount of the time, for which to look gap
- *
- * @return time, when backend has at least time_amount free time
- */
- int64_t backendAvailableTime(const backend::Backend *backend, const int64_t &starting_time,
- const int64_t &time_amount);
-
- int64_t getOpTime(const backend::Backend *backend, const std::string &operation, bool quant,
- uint32_t size);
-
- int64_t getPermuteTime(const backend::Backend *src_backend, const backend::Backend *dst_backend,
- bool quant, uint32_t size);
-
- void scheduleShufflingBackends();
-
- int64_t tryBackend(const ir::Operation &node, const backend::Backend *backend);
-
- /**
- * @brief Schedule a node and its successor until:
- * 1. there is no branching or connection of multiple branches
- * 2. for subsequent nodes: other than predecessor's backend is prefered
- *
- * @param[in] index: index of an operation
- * @param[in] scheduled: a map to check if this node has already been scheduled
- *
- * @return N/A
- */
- void scheduleBranch(const ir::OperationIndex &index, ir::OperationIndexMap<bool> &scheduled);
-
-private:
- // This variable stores backend/node pairs with unknown execution time, and hints scheduler
- // whether it should assign these backends to these nodes:
- // * It stores false for unsupported nodes
- // * During rank calculation with enabled profiling mode it stores true for supported nodes
- std::unordered_map<const backend::Backend *, std::unordered_map<std::string, bool>> _is_supported;
- // Finishing and starting time of each backend
- std::unordered_map<const backend::Backend *, std::map<int64_t, int64_t>> _backends_avail_time;
- ir::OperationIndexMap<int64_t> _ops_eft;
- std::multimap<int64_t, ir::OperationIndex, std::greater<int64_t>> _rank_to_op;
- std::shared_ptr<ir::OperationIndexMap<int64_t>> _op_to_rank;
- std::unique_ptr<compiler::BackendResolver> _backend_resolver;
- std::unique_ptr<exec::ExecTime> _exec_time;
- const ir::Graph *_graph{nullptr};
- std::vector<const backend::Backend *> _all_backends;
- const backend::Backend *_cpu_backend{nullptr}; // TODO Change this to controlflow_backend
- bool _is_profiling_mode;
- bool _is_linear_exec;
- bool _is_parallel_exec;
-};
-
-} // namespace compiler
-
-} // namespace onert
-
-#endif // __ONERT_COMPILER_H_E_SCHEDULER_H_
diff --git a/runtime/onert/core/src/compiler/IScheduler.h b/runtime/onert/core/src/compiler/IScheduler.h
deleted file mode 100644
index 5e9b9bd3c..000000000
--- a/runtime/onert/core/src/compiler/IScheduler.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_CORE_COMPILER_I_SCHEDULER_H__
-#define __ONERT_CORE_COMPILER_I_SCHEDULER_H__
-
-#include "compiler/BackendResolver.h"
-#include "ir/Graph.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-struct IScheduler
-{
- virtual ~IScheduler() = default;
-
- virtual std::unique_ptr<BackendResolver> schedule(const ir::Graph &graph) = 0;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_CORE_COMPILER_I_SCHEDULER_H__
diff --git a/runtime/onert/core/src/compiler/Linear.cc b/runtime/onert/core/src/compiler/Linear.cc
deleted file mode 100644
index 39e58fe11..000000000
--- a/runtime/onert/core/src/compiler/Linear.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-
-#include "Linear.h"
-
-#include "backend/IConfig.h"
-#include "backend/IConstantInitializer.h"
-#include "backend/ITensorRegister.h"
-#include "backend/Backend.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-std::vector<ir::OpSequenceIndex> Linear::linearize(const compiler::LoweredGraph &lowered_graph)
-{
- std::vector<ir::OpSequenceIndex> order;
- lowered_graph.iterateTopolOpSeqs(
- [&](const ir::OpSequenceIndex &index, const ir::OpSequence &) -> void {
- order.emplace_back(index);
- });
- return order;
-}
-
-void Linear::dump(const compiler::LoweredGraph &lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order)
-{
- {
- const auto &toString = [](const onert::backend::Backend *backend) {
- assert(backend);
- std::string str;
- str += backend->config()->id();
- return "{" + str + "}";
- };
-
- VERBOSE(Linear) << "Final OpSequence" << std::endl;
- for (const auto index : order)
- {
- const auto &op_seq = lowered_graph.op_seqs().at(index);
- const auto lower_info = lowered_graph.getLowerInfo(index);
- const auto &operations = lowered_graph.graph().operations();
- VERBOSE(Linear) << "* OP_SEQ " << toString(lower_info->backend()) << " "
- << ir::getStrFromOpSeq(op_seq, operations) << std::endl;
- }
- }
-}
-
-void Linear::planTensors(const compiler::LoweredGraph &lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order)
-{
- const auto &graph = lowered_graph.graph();
- ir::OperandIndexMap<std::shared_ptr<backend::ITensorBuilder>> tensor_builder_map;
-
- ir::OperandIndexMap<uint32_t> uses_map;
- ir::OperandIndexMap<uint32_t> def_map;
- ir::OperandIndexSequence constants;
-
- // Prepare scanning
- graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
- const auto lower_info = lowered_graph.getLowerInfo(ind);
- // TODO Remove if onert doesn't support anymore such as
- // GeneratedTests.reshape_quant8_weights_as_inputs
- if (lower_info->def_factors().size() == 0 && lower_info->use_factors().size() == 0 &&
- !graph.getInputs().contains(ind))
- {
- VERBOSE(LINEAR) << "Operand #" << ind.value() << " will not be used. no more process."
- << std::endl;
- return;
- }
-
- // Unused input of subgraph
- // TODO Register unused input as nullptr in tensor_builder
- if (lower_info->def_factors().size() == 0 && lower_info->use_factors().size() == 0 &&
- graph.getInputs().contains(ind))
- {
- VERBOSE(LINEAR) << "Operand #" << ind.value() << " will not be used. no more process."
- << std::endl;
- return;
- }
-
- uses_map[ind] = obj.getUses().size();
- def_map[ind] = obj.getDef().valid() ? 1 : 0;
-
- bool is_const = obj.isConstant();
- if (is_const)
- {
- constants.append(ind);
- }
-
- auto factor = lower_info->def_factors().getOnlyElement();
- auto backend = factor.backend();
- auto tensor_builder = lowered_graph.backend_contexts().at(backend)->tensor_builder;
- if (!tensor_builder->isRegistered(ind))
- {
- // These tensors do not exist in any op_seq (No use and def)
- const auto info = obj.info();
- const auto backend_layout = factor.layout();
- // TODO Change tensor info to have permuted shape
- tensor_builder->registerTensorInfo(ind, info, backend_layout);
- }
-
- tensor_builder_map[ind] = tensor_builder;
- });
-
- // If a tensor is model output, increase the use of the tensor.
- // This aim is same to above one.
- for (const auto &ind : graph.getOutputs() | ir::Remove::DUPLICATED)
- {
- uses_map[ind]++;
- }
-
- // Start scanning to do notify{First|Last}Use for each tensor
-
- // If a tensor is a constant, increase the use of the tensor.
- // It makes the tensor not be dealloced. It means these will be deallocated last.
- // And allocate constant operands first
- VERBOSE(LINEAR) << "TENSORS as CONSTANT" << std::endl;
- for (const auto &ind : constants)
- {
- uses_map[ind]++;
- tensor_builder_map[ind]->notifyFirstUse(ind);
- }
-
- // Allocate Model's inputs
- VERBOSE(LINEAR) << "TENSORS as MODEL INPUT" << std::endl;
- for (const auto &ind : graph.getInputs() | ir::Remove::DUPLICATED)
- {
- auto tensor_builder = tensor_builder_map[ind];
- if (!tensor_builder) // for GeneratedTests.xxx_weights_as_inputs
- continue;
- tensor_builder->notifyFirstUse(ind);
- }
-
- const auto io_tensors =
- (graph.getInputs() + graph.getOutputs()) | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
-
- // At each operation,
- // 1. Scan DEF of outputs. If the DEF, allocate it
- // 2. Scan USE of inputs. Decrease the USE and deallocate if the USE is 0
- VERBOSE(LINEAR) << "TENSORS" << std::endl;
- for (const auto op_seq_ind : order)
- {
- const auto &op_seq = lowered_graph.op_seqs().at(op_seq_ind);
- for (const auto &op_idx : op_seq.operations())
- {
- for (const auto &ind : graph.operations().at(op_idx).getOutputs() | ir::Remove::DUPLICATED |
- ir::Remove::UNDEFINED)
- {
- assert(def_map.find(ind) != def_map.end());
- if (def_map[ind])
- {
- def_map[ind] = 0;
- tensor_builder_map[ind]->notifyFirstUse(ind);
- }
- }
-
- for (const auto &ind : graph.operations().at(op_idx).getInputs() | ir::Remove::DUPLICATED |
- ir::Remove::UNDEFINED)
- {
- assert(uses_map.find(ind) != uses_map.end());
- assert(uses_map[ind] > 0);
- uses_map[ind]--;
- if (uses_map[ind] == 0)
- {
- // plan for deallocation of static tensornode
- tensor_builder_map[ind]->notifyLastUse(ind);
-
- // plan for deallocation of dynamic tensor
- auto dyn_tensor_manager = tensor_builder_map[ind]->dynamicTensorManager();
- if (dyn_tensor_manager)
- {
- const auto *backend =
- lowered_graph.getLowerInfo(ind)->def_factors().getOnlyElement().backend();
- auto &tensor_registry = lowered_graph.backend_contexts().at(backend)->tensor_registry;
- auto *tensor = tensor_registry->getITensor(ind);
- assert(tensor);
- if (!io_tensors.contains(ind)) // I/O tensors cannot be deallocated
- dyn_tensor_manager->planDealloc(op_idx, tensor);
- }
- }
- }
- }
- }
-
- // Dispose and validate
- for (const auto &ind : graph.getOutputs() | ir::Remove::DUPLICATED)
- {
- --uses_map[ind];
- if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice
- {
- tensor_builder_map[ind]->notifyLastUse(ind);
- }
- }
-
- for (const auto &ind : constants)
- {
- --uses_map[ind];
- if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice
- {
- tensor_builder_map[ind]->notifyLastUse(ind);
- }
- }
-
- assert(
- std::all_of(uses_map.begin(), uses_map.end(),
- [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
-
- assert(
- std::all_of(def_map.begin(), def_map.end(),
- [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/Linear.h b/runtime/onert/core/src/compiler/Linear.h
deleted file mode 100644
index 1e24cf92b..000000000
--- a/runtime/onert/core/src/compiler/Linear.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_LINEAR_H__
-#define __ONERT_COMPILER_LINEAR_H__
-
-#include <vector>
-#include <memory>
-
-#include "ir/OpSequences.h"
-#include "ir/Index.h"
-#include "backend/ITensorBuilder.h"
-#include "compiler/LoweredGraph.h"
-
-namespace onert
-{
-namespace ir
-{
-struct OperationVisitor;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-
-class Linear
-{
-public:
- static std::vector<ir::OpSequenceIndex> linearize(const compiler::LoweredGraph &lowered_graph);
- static void dump(const compiler::LoweredGraph &lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order);
- static void planTensors(const compiler::LoweredGraph &lowered_graph,
- const std::vector<ir::OpSequenceIndex> &order);
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_LINEAR_H__
diff --git a/runtime/onert/core/src/compiler/LoweredGraph.cc b/runtime/onert/core/src/compiler/LoweredGraph.cc
deleted file mode 100644
index cdf1a8158..000000000
--- a/runtime/onert/core/src/compiler/LoweredGraph.cc
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler/LoweredGraph.h"
-
-#include <assert.h>
-#include <sstream>
-#include "util/logging.h"
-#include "compiler/pass/ConstantInsertionPass.h"
-#include "compiler/pass/ConstantLoweringPass.h"
-#include "compiler/pass/PassRunner.h"
-#include "compiler/pass/PermutationOperationPass.h"
-#include "compiler/pass/PermutationInsertionPass.h"
-#include "compiler/pass/PermutationEliminationPass.h"
-#include "ir/GraphIterator.h"
-#include "ir/verifier/Verifier.h"
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-#include "compiler/BackendResolver.h"
-#include "compiler/ManualScheduler.h"
-#include "compiler/HEScheduler.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-LoweredGraph::LoweredGraph(const ir::Graph &graph, const CompilerOptions &options) : _graph{graph}
-{
- bool linear_executor = (options.executor == "Linear");
-
- // Build backend contexts
- auto &backend_manager = BackendManager::get();
-
- // Always create Controlflow backend context
- auto cf_backend = backend_manager.getControlflow();
- _backend_contexts.emplace(
- cf_backend, cf_backend->newContext(_graph, _graph.getKernelBuilder(), linear_executor));
-
- // Create contexts for other backends
- for (auto backend_str : options.backend_list)
- {
- backend_manager.loadBackend(backend_str);
- auto backend = backend_manager.get(backend_str);
-
- // TODO As the default value of backend list contains "cpu", "acl_cl" and "acl_neon", and some
- // are not available on x64 or some other platforms. So this may be a workaround for x64 and
- // we should change it back(throw if backend is not loaded) later.
- if (!backend)
- {
- VERBOSE(LoweredGraph) << "Cannot load backend - " << backend_str;
- continue;
- }
-
- _backend_contexts.emplace(
- backend, backend->newContext(_graph, _graph.getKernelBuilder(), linear_executor));
- }
- if (backend_manager.num_backends() == 0)
- throw std::runtime_error{"No available backends loaded."};
-
- // TODO Move "schedule" phase out of here
- // Schedule
- std::unique_ptr<BackendResolver> backend_resolver;
- if (options.he_scheduler)
- {
- auto scheduler = HEScheduler(_backend_contexts, options);
- backend_resolver = scheduler.schedule(_graph);
- _indexed_ranks = scheduler.getIndexedRanks();
- }
- else
- {
- auto scheduler = ManualScheduler(_backend_contexts, options);
- backend_resolver = scheduler.schedule(_graph);
- }
-
- {
- // operand::LowerInfo holder
- ir::OperandIndexMap<std::unique_ptr<ir::operand::LowerInfo>> operands_lower_info;
-
- _graph.operands().iterate([&](const ir::OperandIndex &index, const ir::Operand &) {
- operands_lower_info[index] = std::make_unique<ir::operand::LowerInfo>();
- });
-
- // Make op_seqs while checking whether a node can be merged into a op_seq.
- makeOpSequences(operands_lower_info, options, *backend_resolver);
-
- _op_seqs.iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- assert(op_seq.operations().size() > 0);
- std::reverse(std::begin(op_seq.operations()), std::end(op_seq.operations()));
- });
-
- VERBOSE(OpSequences) << "dump before permutation insertion" << std::endl;
- dumpOpSequences(_op_seqs, _graph.operations());
-
- // Mandatory passes
- pass::PassRunner{}
- .append(std::make_unique<pass::ConstantInsertionPass>(*this))
- .append(std::make_unique<pass::ConstantLoweringPass>(*this))
- .run();
-
- // Set LowerInfo for each operand from the operand::LowerInfo holder
- manipulateLowerInfo(operands_lower_info, options.is_primary_subgraph);
-
- dumpLowerInfo();
- }
-
- // Mandatory passes
- pass::PassRunner{}
- .append(std::make_unique<pass::PermutationOperationPass>(*this))
- .append(std::make_unique<pass::PermutationInsertionPass>(*this))
- .run();
-
- // Optimization passes
- pass::PassRunner{}.append(std::make_unique<pass::PermutationEliminationPass>(*this)).run();
-
- VERBOSE(OpSequences) << "Dump after permutation insertion" << std::endl;
- dumpOpSequences(_op_seqs, _graph.operations());
-
- // Graph verifications
- {
- assert(ir::verifier::DAGChecker().verify(_graph));
- assert(ir::verifier::EdgeConsistencyChecker().verify(_graph));
- }
-}
-
-const ir::operation::LowerInfo *
-LoweredGraph::getLowerInfo(const ir::OpSequenceIndex &op_seq_index) const
-{
- auto itr = _lower_info_map.op_seq.find(op_seq_index);
- if (itr == _lower_info_map.op_seq.end())
- return nullptr;
- return itr->second.get();
-}
-
-void LoweredGraph::setLowerInfo(const ir::OpSequenceIndex &op_seq_index,
- std::unique_ptr<ir::operation::LowerInfo> &&lower_info)
-{
- _lower_info_map.op_seq.insert(std::make_pair(op_seq_index, std::move(lower_info)));
-}
-
-void LoweredGraph::removeLowerInfo(const ir::OpSequenceIndex &op_seq_index)
-{
- auto &op_seq_lower_info = _lower_info_map.op_seq;
- assert(op_seq_lower_info.find(op_seq_index) != op_seq_lower_info.end());
- for (auto it = op_seq_lower_info.begin(); it != op_seq_lower_info.end(); ++it)
- {
- if (it->first == op_seq_index)
- {
- op_seq_lower_info.erase(it);
- break;
- }
- }
-}
-
-const ir::operand::LowerInfo *LoweredGraph::getLowerInfo(const ir::OperandIndex &index) const
-{
- auto itr = _lower_info_map.operand.find(index);
- if (itr == _lower_info_map.operand.end())
- return nullptr;
- return itr->second.get();
-}
-
-ir::operand::LowerInfo *LoweredGraph::getLowerInfo(const ir::OperandIndex &index)
-{
- auto itr = _lower_info_map.operand.find(index);
- if (itr == _lower_info_map.operand.end())
- return nullptr;
- return itr->second.get();
-}
-
-void LoweredGraph::setLowerInfo(const ir::OperandIndex &index,
- std::unique_ptr<ir::operand::LowerInfo> &&lower_info)
-{
- _lower_info_map.operand.insert(std::make_pair(index, std::move(lower_info)));
-}
-
-void LoweredGraph::removeLowerInfo(const ir::OperandIndex &index)
-{
- _lower_info_map.operand.erase(index);
-}
-
-void LoweredGraph::iterateTopolOpSeqs(
- const std::function<void(const ir::OpSequenceIndex &, const ir::OpSequence &)> &fn) const
-{
- // Topological Sorting for ir::OpSequences
- std::vector<ir::OpSequenceIndex> topol_sorted;
- ir::PostDfsIterator<true>{}.iterateOpSeqs(
- *this, [&](const ir::OpSequenceIndex &index, const ir::OpSequence &) {
- topol_sorted.emplace_back(index);
- });
- std::reverse(topol_sorted.begin(), topol_sorted.end());
- for (const auto op_seq_idx : topol_sorted)
- {
- const auto &op_seq = _op_seqs.at(op_seq_idx);
- fn(op_seq_idx, op_seq);
- }
-}
-
-void LoweredGraph::iterateTopolOpSeqs(
- const std::function<void(const ir::OpSequenceIndex &, ir::OpSequence &)> &fn)
-{
- // Topological Sorting for ir::OpSequences
- std::vector<ir::OpSequenceIndex> topol_sorted;
- ir::PostDfsIterator<false>{}.iterateOpSeqs(
- *this, [&](const ir::OpSequenceIndex &index, ir::OpSequence &) {
- topol_sorted.emplace_back(index);
- });
- std::reverse(topol_sorted.begin(), topol_sorted.end());
- for (const auto op_seq_idx : topol_sorted)
- {
- auto &op_seq = _op_seqs.at(op_seq_idx);
- fn(op_seq_idx, op_seq);
- }
-}
-
-ir::OpSequenceIndex LoweredGraph::appendFreshSingleOpSequence(const ir::OperationIndex &node_index,
- const ir::Operation &node)
-{
- // Create a fresh op_seq with one operation, and append it to op_seqs
- // Create a fresh op_seq
- auto op_seq = std::make_unique<ir::OpSequence>(_graph.layout());
-
- // Add an operation
- op_seq->appendOperation(node_index);
-
- // Update input/output
- op_seq->setOutputs(node.getOutputs());
- op_seq->setInputs(node.getInputs());
-
- return _op_seqs.emplace(std::move(op_seq));
-}
-
-void LoweredGraph::makeOpSequences(
- ir::OperandIndexMap<std::unique_ptr<ir::operand::LowerInfo>> &operands_lower_info,
- const CompilerOptions &options, const BackendResolver &backend_resolver)
-{
- // if SUBG_MAX_NODE == 0, no limit on nodes of a op_seq
- const int op_seq_max_node = options.op_seq_max_node;
- assert(op_seq_max_node >= 0);
-
- bool is_profiling = options.he_profiling_mode;
- ir::OpSequence *op_seq = nullptr;
- ir::OpSequenceIndex op_seq_index;
-
- // NOTE: The below method appends nodes while making one op_seq if needed. If something better
- // ways, happy to update this code.
- ir::PostDfsConstIterator{}.iterate(
- _graph, [&](const ir::OperationIndex &node_index, const ir::Operation &node) {
- // LowerInfo for in/output operands
- auto backend = backend_resolver.getBackend(node_index);
-
- // Get frontend's layout
- auto frontend_layout = _graph.layout();
-
- // The layout of each backend should be set at another place
- // TODO Change setting layout of each backend at another place
- auto backend_layout = backend->config()->supportLayout(node, frontend_layout);
-
- for (auto operand : node.getInputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(operand);
- lower_info->addUsePermuteFactor(ir::operand::PermuteFactor{backend, backend_layout});
- }
- for (auto operand : node.getOutputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(operand);
- lower_info->addDefPermuteFactor(ir::operand::PermuteFactor{backend, backend_layout});
- }
-
- bool new_op_seq = (op_seq == nullptr ||
- (op_seq_max_node != 0 &&
- op_seq->operations().size() >= static_cast<size_t>(op_seq_max_node)));
-
- // for profiling each op_seq must contain just one node,
- // so that we can measure a node separately
- if (new_op_seq || is_profiling ||
- !mergeable(op_seq_index, node_index, backend_layout, backend_resolver))
- {
- auto new_op_seq_index = appendFreshSingleOpSequence(node_index, node);
-
- // ir::OpSequence LowerInfo
- setLowerInfo(new_op_seq_index,
- std::make_unique<ir::operation::LowerInfo>(backend, backend_layout));
-
- op_seq_index = new_op_seq_index;
- op_seq = &(_op_seqs.at(new_op_seq_index));
-
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " is created for "
- << "NODE#" << node_index.value() << "(" << node.name() << ")" << std::endl;
- }
- else
- {
- op_seq->appendOperation(node_index);
- // Set inputs
- auto new_inputs = node.getInputs();
- // Add inputs except outputs of the previous node
- for (auto ind : op_seq->getInputs())
- {
- if (!node.getOutputs().contains(ind))
- new_inputs.append(ind);
- }
- op_seq->setInputs(new_inputs);
-
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " merges "
- << "NODE#" << node_index.value() << "(" << node.name() << ")" << std::endl;
- }
- });
-}
-
-void LoweredGraph::manipulateLowerInfo(
- ir::OperandIndexMap<std::unique_ptr<ir::operand::LowerInfo>> &operands_lower_info,
- bool is_primary)
-{
- const auto controlflow_backend = BackendManager::get().getControlflow();
-
- // TODO Rather than handling primary graph specially,
- // let the permute inserted and remove it later
- if (is_primary)
- {
- // TODO Rather than using NHWC Get frontend layout of this node from IR
- auto factor = ir::operand::PermuteFactor{controlflow_backend, ir::Layout::NHWC};
- for (auto index : _graph.getInputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(index);
- assert(lower_info->def_factors().empty());
- lower_info->addDefPermuteFactor(factor);
- }
- for (auto index : _graph.getOutputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(index);
- lower_info->addUsePermuteFactor(factor);
- }
- }
- else
- {
- for (auto index : _graph.getInputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(index);
- if (!(lower_info->def_factors().size() == 0 && lower_info->use_factors().size() == 0))
- {
- // In case of not that Graph's input is not used in any operation and not the graph's
- // output.
- // In other words, it is not unused input in Graph.
- lower_info->addDefPermuteFactor(*lower_info->use_factors().begin());
- }
- else
- {
- // In case of that an operand is Graph's input and not input or output of any operation
- lower_info->addDefPermuteFactor(ir::operand::PermuteFactor{
- controlflow_backend,
- ir::Layout::NHWC // TODO Get frontend layout of this node from IR
- });
- }
- }
- }
- for (auto index : _graph.getOutputs() | ir::Remove::UNDEFINED)
- {
- auto &&lower_info = operands_lower_info.at(index);
- if (lower_info->def_factors().size() == 0)
- {
- // In case of that an operand is Graph's output and not input or output of any operation
- lower_info->addDefPermuteFactor(ir::operand::PermuteFactor{
- controlflow_backend,
- ir::Layout::NHWC // TODO Get frontend layout of this node from IR
- });
- }
- }
-
- // Set LowerInfo for each operand from the operand::LowerInfo holder
- _graph.operands().iterate([&](const ir::OperandIndex &index, ir::Operand &) {
- setLowerInfo(index, std::move(operands_lower_info[index]));
- });
-}
-
-void LoweredGraph::dumpLowerInfo()
-{
- if (::onert::util::logging::ctx.enabled() == false)
- return;
-
- std::map<uint32_t, std::string> dumps;
-
- _graph.operands().iterate([&](const ir::OperandIndex &index, ir::Operand &object) {
- std::stringstream sstream;
- if (!getLowerInfo(index)->def_factors().empty() || !getLowerInfo(index)->use_factors().empty())
- {
- auto factors_to_string = [](const ir::operand::PermuteFactorSet &factors) {
- std::string str;
- for (auto factor : factors)
- {
- str += factor.backend()->config()->id();
- str += "(" + to_string(factor.layout()) + ")";
- str += " ";
- }
- return "{ " + str + "}";
- };
-
- auto operation_index_to_string = [](const ir::OperationIndexSet &operations) {
- std::string str;
- for (auto op : operations)
- {
- str += std::to_string(op.value());
- str += " ";
- }
- return "{ " + str + "}";
- };
-
- const auto lower_info = getLowerInfo(index);
- const auto &shape = object.shape();
- std::string def_ops =
- object.getDef().valid() ? std::to_string(object.getDef().value()) : "N/A";
- std::string use_ops = operation_index_to_string(object.getUses());
- std::string def_layouts = factors_to_string(lower_info->def_factors());
- std::string use_layouts = factors_to_string(lower_info->use_factors());
- sstream << "Operand #" << index.value() << " LowerInfo" << std::endl;
- sstream << " - Shape : { ";
- for (auto i = 0; i < shape.rank(); ++i)
- {
- sstream << (shape.dim(i)) << " ";
- }
- sstream << "}" << std::endl;
- sstream << " - Def ir::Operations : " << def_ops << std::endl;
- sstream << " - Use ir::Operations : " << use_ops << std::endl;
- sstream << " - Lower Info" << std::endl;
- sstream << " - Def Backends : " << def_layouts << std::endl;
- sstream << " - Use Backends : " << use_layouts << std::endl;
- }
- dumps.emplace(index.value(), sstream.str());
- });
-
- for (const auto &e : dumps)
- {
- if (!e.second.empty())
- {
- VERBOSE(Lower) << e.second;
- }
- }
-}
-
-bool LoweredGraph::mergeable(const ir::OpSequenceIndex &op_seq_index,
- const ir::OperationIndex &node_index, ir::Layout layout,
- const BackendResolver &backend_resolver)
-{
- // Are they mergeable?
- // 1. the same backend id and layout?
- // 2. Is op_seq or node branched?
- // 3. if 1 is true, the op_seq and a node are connected?
- const auto &op_seq = _op_seqs.at(op_seq_index);
- const auto &node = _graph.operations().at(node_index);
-
- // The same backend id and layout?
- {
- const auto op_seq_backend_layout = getLowerInfo(op_seq_index)->layout();
- const auto &op_seq_backend_id = getLowerInfo(op_seq_index)->backend()->config()->id();
- const auto &node_backend_id = backend_resolver.getBackend(node_index)->config()->id();
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " { " << op_seq_backend_id << "("
- << to_string(op_seq_backend_layout) << ") } "
- << " NODE#" << node_index.value() << " (" << node.name() << ") { "
- << node_backend_id << "(" << to_string(layout) << ") } " << std::endl;
- if (op_seq_backend_id != node_backend_id || op_seq_backend_layout != layout)
- return false;
- }
-
- // Branched?
- {
- std::unordered_set<ir::OperationIndex> branched_set;
-
- // Check for branching up
- for (const auto &input : op_seq.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- const auto &input_obj = _graph.operands().at(input);
- auto def = input_obj.getDef();
- if (def.valid())
- {
- branched_set.insert(def);
- if (branched_set.size() > 1)
- {
- return false;
- }
- }
- }
- branched_set.clear();
-
- // Check for branching down
- for (const auto &output : node.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- // TODO Fix this workaround for the case of model outputs that are used by another operation
- // This is needed since the branching is decided by operation, but for model outputs,
- // there is controlflow backen(use backend) but no actual use operation exists
- if (_graph.getOutputs().contains(output))
- return false;
-
- const auto &output_obj = _graph.operands().at(output);
- for (const auto &use : output_obj.getUses())
- {
- branched_set.insert(use);
- if (branched_set.size() > 1)
- {
- return false;
- }
- }
- }
- }
-
- // Connected?
- // an input of one node is an output of the other node? or vice-versa?
- {
- const auto &node_inputs = node.getInputs();
- const auto &node_outputs = node.getOutputs();
-
- // op_seq's operations are in order so that we just check the first and the last
- std::vector<ir::OperationIndex> op_seq_ops{op_seq.operations()[0]};
- if (op_seq.operations().size() > 1)
- op_seq_ops.emplace_back(op_seq.operations()[op_seq.operations().size() - 1]);
-
- for (const auto &n_index : op_seq_ops)
- {
- const auto &n = _graph.operations().at(n_index);
-
- // node's output == op_seq's input?
- for (const auto input : n.getInputs() | ir::Remove::UNDEFINED)
- {
- if (node_outputs.contains(input))
- {
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " 's NODE#" << n_index.value()
- << "(" << n.name() << ") is connected to NODE#" << node_index.value()
- << "(" << node.name() << ")" << std::endl;
- return true;
- }
- }
-
- // node's input == op_seq's output?
- for (const auto output : n.getOutputs() | ir::Remove::UNDEFINED)
- {
- if (node_inputs.contains(output))
- {
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " 's NODE#" << n_index.value()
- << " (" << n.name() << ") is connected to NODE#" << node_index.value()
- << std::endl;
- return true;
- }
- }
- }
-
- VERBOSE(Lower) << "OpSequence#" << op_seq_index.value() << " is not connected to NODE#"
- << node_index.value() << "(" << node.name() << ")" << std::endl;
- }
-
- return false;
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ManualScheduler.cc b/runtime/onert/core/src/compiler/ManualScheduler.cc
deleted file mode 100644
index ed49ee56f..000000000
--- a/runtime/onert/core/src/compiler/ManualScheduler.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ManualScheduler.h"
-#include "ir/OpCode.h"
-#include "ir/Operations.Include.h"
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-#include "compiler/BackendManager.h"
-#include "util/ConfigSource.h"
-#include "util/logging.h"
-#include "misc/string_helpers.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-ManualScheduler::ManualScheduler(const backend::BackendContexts &backend_contexts,
- const compiler::CompilerOptions &options)
- : _backend_contexts{backend_contexts}, _options{options}
-{
-}
-
-std::unique_ptr<BackendResolver> ManualScheduler::schedule(const ir::Graph &graph)
-{
- const auto &manual_options = _options.manual_scheduler_options;
- auto backend_resolver = std::make_unique<compiler::BackendResolver>();
-
- // This fallback will be used in case that `backend_for_all` is unavailable
- auto fallback = [&]() -> const backend::Backend * {
- for (auto backend_id : _options.backend_list)
- {
- auto backend = resolveBackend(backend_id);
- if (backend)
- return backend;
- }
- return nullptr;
- }();
- if (fallback == nullptr)
- throw std::runtime_error{"No loaded backends available."};
-
- // 1. Backend for All operations
- const backend::Backend *backend_all = resolveBackend(manual_options.backend_for_all, fallback);
- VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all->config()->id()
- << std::endl;
-
- graph.operations().iterate([&](const ir::OperationIndex &index, const ir::Operation &) {
- backend_resolver->setBackend(index, backend_all);
- });
-
- // 2. Backend per operation type
- std::unordered_map<ir::OpCode, backend::Backend *> op_type_map;
- for (auto &pair : manual_options.opcode_to_backend)
- {
- op_type_map.emplace(pair.first, BackendManager::get().get(pair.second));
- }
- // By default, Custom uses cpu backend
- op_type_map[ir::OpCode::Custom] = BackendManager::get().get("cpu");
-
- graph.operations().iterate([&](const ir::OperationIndex &index, const ir::Operation &operation) {
- auto itr = op_type_map.find(operation.opcode());
- if (itr != op_type_map.end())
- {
- backend_resolver->setBackend(index, itr->second);
- }
- });
-
- // 3. Backend per operation
- for (auto &pair : manual_options.index_to_backend)
- {
- const auto &key = pair.first;
- const auto &val = pair.second;
-
- try
- {
- graph.operations().at(key); // Check if exist, or this will throw
- backend_resolver->setBackend(
- key, BackendManager::get().get(
- val)); // TODO Ensure this backend is available in backend contexts
- }
- catch (...)
- {
- VERBOSE(ManualScheduler) << "Invalid value while OperationIndex to Backend mapping : @"
- << key.value() << " -> \"" << val << "\"" << std::endl;
- }
- }
-
- // Dump final assignment
- backend_resolver->iterate([&](const ir::OperationIndex &index, const backend::Backend &backend) {
- VERBOSE(ManualScheduler) << "backend for operation #" << index.value() << ": "
- << backend.config()->id() << std::endl;
- });
-
- return backend_resolver;
-}
-
-const backend::Backend *ManualScheduler::resolveBackend(const std::string &id,
- const backend::Backend *fallback)
-{
- // Ensure if the backend is available in the current backend context
- const backend::Backend *backend = BackendManager::get().get(id);
- if (!backend || _backend_contexts.find(backend) == _backend_contexts.end())
- {
- backend = fallback;
- }
- return backend;
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ManualScheduler.h b/runtime/onert/core/src/compiler/ManualScheduler.h
deleted file mode 100644
index 41503f7ff..000000000
--- a/runtime/onert/core/src/compiler/ManualScheduler.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_CORE_COMPILER_MANUAL_SCHEDULER_H__
-#define __ONERT_CORE_COMPILER_MANUAL_SCHEDULER_H__
-
-#include "IScheduler.h"
-#include "compiler/Compiler.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class ManualScheduler : public IScheduler
-{
-public:
- ManualScheduler(const backend::BackendContexts &backend_contexts,
- const compiler::CompilerOptions &options);
- std::unique_ptr<BackendResolver> schedule(const ir::Graph &graph) override;
-
-private:
- const backend::Backend *resolveBackend(const std::string &id,
- const backend::Backend *fallback = nullptr);
-
-private:
- const backend::BackendContexts &_backend_contexts;
- compiler::CompilerOptions _options;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_CORE_COMPILER_MANUAL_SCHEDULER_H__
diff --git a/runtime/onert/core/src/compiler/OperationValidator.cc b/runtime/onert/core/src/compiler/OperationValidator.cc
deleted file mode 100644
index 0582cf154..000000000
--- a/runtime/onert/core/src/compiler/OperationValidator.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationValidator.h"
-
-#include "ir/Graph.h"
-
-#define OP_REQUIRES(EXP) \
- do \
- { \
- if (!(EXP)) \
- throw std::runtime_error("OperationValidator failed at line " + std::to_string(__LINE__)); \
- } while (0)
-
-namespace onert
-{
-namespace compiler
-{
-
-OperationValidator::OperationValidator(const ir::Graph &graph)
- : _graph{graph}, _ctx{graph.operands()}
-{
-}
-
-void OperationValidator::operator()()
-{
- assert(_graph.subgraphs() == nullptr);
-
- _graph.operations().iterate(
- [&](const ir::OperationIndex &, const ir::Operation &node) { node.accept(*this); });
-}
-
-void OperationValidator::visit(const ir::operation::BatchMatMul &node)
-{
- const auto lhs_index(node.getInputs().at(ir::operation::BatchMatMul::Input::LHS));
- const auto rhs_index(node.getInputs().at(ir::operation::BatchMatMul::Input::RHS));
-
- // Constant lhs and rhs is not implemented yet
- OP_REQUIRES(!_ctx.at(lhs_index).isConstant() && !_ctx.at(rhs_index).isConstant());
-}
-
-void OperationValidator::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto block_size_index{
- node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)};
-
- // Non-constant block_size is not implemented yet
- OP_REQUIRES(_ctx.at(block_size_index).isConstant());
-}
-
-void OperationValidator::visit(const ir::operation::Comparison &node)
-{
- const auto output_index{node.getOutputs().at(0)};
-
- const auto lhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT0)};
- const auto rhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT1)};
-
- OP_REQUIRES(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type());
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == ir::DataType::BOOL8);
-}
-
-void OperationValidator::visit(const ir::operation::DepthToSpace &node)
-{
- int32_t block_size = node.param().block_size;
-
- OP_REQUIRES(block_size > 0);
-}
-
-void OperationValidator::visit(const ir::operation::ElementwiseActivation &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(0)};
-
- // Check if I/O types match
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
-}
-
-void OperationValidator::visit(const ir::operation::ElementwiseBinary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS)};
-
- OP_REQUIRES(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type());
- OP_REQUIRES(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(output_index).typeInfo().type());
-}
-
-void OperationValidator::visit(const ir::operation::ElementwiseUnary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)};
-
- // Check if I/O types match
- if (node.param().op_type == ir::operation::ElementwiseUnary::Type::DEQUANTIZE)
- {
- OP_REQUIRES(_ctx.at(input_index).typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM);
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == ir::DataType::FLOAT32);
- }
- else if (node.param().op_type == ir::operation::ElementwiseUnary::Type::QUANTIZE)
- {
- OP_REQUIRES(_ctx.at(input_index).typeInfo().type() == ir::DataType::FLOAT32);
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM);
- }
- else if (node.param().op_type != ir::operation::ElementwiseUnary::Type::CAST)
- {
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
- }
-}
-
-void OperationValidator::visit(const ir::operation::EmbeddingLookup &node)
-{
- const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)};
-
- OP_REQUIRES(_ctx.at(lookups_index).typeInfo().type() == ir::DataType::INT32);
-}
-
-void OperationValidator::visit(const ir::operation::ExpandDims &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::ExpandDims::Input::AXIS)};
-
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
- OP_REQUIRES(_ctx.at(axis_index).typeInfo().type() == ir::DataType::INT32);
-}
-
-void OperationValidator::visit(const ir::operation::HashtableLookup &node)
-{
- const auto hits_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::HITS)};
- const auto lookups_index{node.getInputs().at(ir::operation::HashtableLookup::Input::LOOKUPS)};
- const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)};
-
- OP_REQUIRES(_ctx.at(lookups_index).typeInfo().type() == ir::DataType::INT32);
- OP_REQUIRES(_ctx.at(keys_index).typeInfo().type() == ir::DataType::INT32);
- OP_REQUIRES(_ctx.at(hits_index).typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM);
-}
-
-void OperationValidator::visit(const ir::operation::Pack &node)
-{
- const auto num{node.param().num};
-
- OP_REQUIRES(num == static_cast<int32_t>(node.getInputs().size()));
-}
-
-void OperationValidator::visit(const ir::operation::Pad &node)
-{
- const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
-
- OP_REQUIRES(_ctx.at(pad_index).typeInfo().type() == ir::DataType::INT32);
-}
-
-void OperationValidator::visit(const ir::operation::ResizeBilinear &node)
-{
- auto align_corners = node.param().align_corners;
- auto half_pixel_centers = node.param().half_pixel_centers;
-
- OP_REQUIRES(!align_corners || !half_pixel_centers);
-}
-
-void OperationValidator::visit(const ir::operation::Reverse &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reverse::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Reverse::Input::AXIS)};
-
- OP_REQUIRES(_ctx.at(axis_index).typeInfo().type() == ir::DataType::INT32);
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
-}
-
-void OperationValidator::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto block_size_index{
- node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
-
- // Non-constant block_size and padding is not implemented yet
- OP_REQUIRES(_ctx.at(block_size_index).isConstant());
- OP_REQUIRES(_ctx.at(paddings_index).isConstant());
-}
-
-void OperationValidator::visit(const ir::operation::SpaceToDepth &node)
-{
- const auto block_size = node.param().block_size;
- OP_REQUIRES(block_size >= 1);
-}
-
-void OperationValidator::visit(const ir::operation::Split &node)
-{
- const auto num_splits = node.param().num_splits;
-
- OP_REQUIRES(num_splits > 0 && num_splits <= 0xFFFF);
- OP_REQUIRES(node.getOutputs().size() == static_cast<uint32_t>(num_splits));
-}
-
-void OperationValidator::visit(const ir::operation::SquaredDifference &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)};
-
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(lhs_index).typeInfo().type());
- OP_REQUIRES(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type());
-}
-
-void OperationValidator::visit(const ir::operation::StridedSlice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
-
- OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
-}
-
-void OperationValidator::visit(const ir::operation::TransposeConv &node)
-{
- OP_REQUIRES((node.param().padding.type == ir::PaddingType::SAME) ||
- (node.param().padding.type == ir::PaddingType::VALID));
-}
-
-void OperationValidator::visit(const ir::operation::Unpack &node)
-{
- const auto num{node.param().num};
- OP_REQUIRES(num == static_cast<int32_t>(node.getOutputs().size()));
-}
-
-void OperationValidator::visit(const ir::operation::While &node)
-{
- OP_REQUIRES(node.getInputs().size() == node.getOutputs().size());
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/OperationValidator.h b/runtime/onert/core/src/compiler/OperationValidator.h
deleted file mode 100644
index f884a3765..000000000
--- a/runtime/onert/core/src/compiler/OperationValidator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_OPERATION_VALIDATOR_H__
-#define __ONERT_COMPILER_OPERATION_VALIDATOR_H__
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-class Graph;
-class Operands;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-
-class OperationValidator : public ir::OperationVisitor
-{
-public:
- OperationValidator(void) = delete;
- OperationValidator(const ir::Graph &graph);
-
-public:
- void operator()();
-
-public:
- void visit(const ir::operation::BatchMatMul &node) override;
- void visit(const ir::operation::BatchToSpaceND &node) override;
- void visit(const ir::operation::Comparison &node) override;
- void visit(const ir::operation::DepthToSpace &node) override;
- void visit(const ir::operation::ElementwiseActivation &node) override;
- void visit(const ir::operation::ElementwiseBinary &node) override;
- void visit(const ir::operation::ElementwiseUnary &node) override;
- void visit(const ir::operation::EmbeddingLookup &node) override;
- void visit(const ir::operation::ExpandDims &node) override;
- void visit(const ir::operation::HashtableLookup &node) override;
- void visit(const ir::operation::Pack &node) override;
- void visit(const ir::operation::Pad &node) override;
- void visit(const ir::operation::ResizeBilinear &node) override;
- void visit(const ir::operation::Reverse &node) override;
- void visit(const ir::operation::SpaceToBatchND &node) override;
- void visit(const ir::operation::SpaceToDepth &node) override;
- void visit(const ir::operation::Split &node) override;
- void visit(const ir::operation::SquaredDifference &node) override;
- void visit(const ir::operation::StridedSlice &node) override;
- void visit(const ir::operation::TransposeConv &node) override;
- void visit(const ir::operation::Unpack &node) override;
- void visit(const ir::operation::While &node) override;
-
-private:
- // TODO Remove _ctx field
- const ir::Graph &_graph;
- const ir::Operands &_ctx;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_OPERATION_VALIDATOR_H__
diff --git a/runtime/onert/core/src/compiler/ParamChecker.cc b/runtime/onert/core/src/compiler/ParamChecker.cc
deleted file mode 100644
index c4f80f087..000000000
--- a/runtime/onert/core/src/compiler/ParamChecker.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ParamChecker.h"
-
-#include "ir/Graph.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-void ParamChecker::operator()()
-{
- _model->operations().iterate(
- [&](const ir::OperationIndex &, const ir::Operation &node) { node.accept(*this); });
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ParamChecker.h b/runtime/onert/core/src/compiler/ParamChecker.h
deleted file mode 100644
index 61429d521..000000000
--- a/runtime/onert/core/src/compiler/ParamChecker.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file ParamChecker.h
- * @brief This file contains ParamChecker to check\n
- * operations' parameters are compilable at machine independent phase\n
- * ex) Check param is constant
- */
-#ifndef __ONERT_COMPILER_PARAM_CHECKER_H__
-#define __ONERT_COMPILER_PARAM_CHECKER_H__
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-class Graph;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-
-class ParamChecker : public ir::OperationVisitor
-{
-public:
- /**
- * @brief Construct a new Param Checker object (deleted)
- */
- ParamChecker(void) = delete;
- /**
- * @brief Construct a new Param Checker object
- * @param[in] model Graph model to check
- */
- ParamChecker(std::shared_ptr<ir::Graph> model) : _model{model} {}
-
-public:
- /**
- * @brief Run parameter analysis
- */
- void operator()();
- /**
- * @brief Return analysis result if model have non-const parameter
- * @return @c true if there is non-const parameter, otherwise @c false
- */
- bool haveNoneConstParam(void) { return _nonConstParam; }
-
-private:
- const std::shared_ptr<ir::Graph> _model;
- bool _nonConstParam{false};
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_OPERATION_VALIDATOR_H__
diff --git a/runtime/onert/core/src/compiler/ShapeValidator.cc b/runtime/onert/core/src/compiler/ShapeValidator.cc
deleted file mode 100644
index 8be4fe6ec..000000000
--- a/runtime/onert/core/src/compiler/ShapeValidator.cc
+++ /dev/null
@@ -1,1021 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ShapeValidator.h"
-
-#include <typeinfo>
-
-#include "ir/Graph.h"
-#include "ir/operation/LowerInfo.h"
-
-#include "util/logging.h"
-#include "util/Utils.h"
-
-#define OP_REQUIRES(EXP) \
- do \
- { \
- if (!(EXP)) \
- throw std::runtime_error("ShapeValidator failed at line " + std::to_string(__LINE__)); \
- } while (0)
-
-namespace onert
-{
-namespace compiler
-{
-
-ShapeValidator::ShapeValidator(const ir::Graph &graph)
- : _graph{graph}, _ctx{graph.operands()}, _current_op_seq_layout{ir::Layout::UNKNOWN}
-{
-}
-
-void ShapeValidator::checkUnaryOp(const ir::Operation &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(0)};
-
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- // Check if I/O shapes match
- OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
-}
-
-void ShapeValidator::operator()()
-{
- // There is no reason for each subgraph to have subgraphs since compiler has subgraphs when
- // creating Compiler
- assert(_graph.subgraphs() == nullptr);
-
- _current_op_seq_layout = _graph.layout();
-
- _graph.operations().iterate(
- [&](const ir::OperationIndex &, const ir::Operation &node) { node.accept(*this); });
-}
-
-void ShapeValidator::visit(const ir::operation::BatchMatMul &node)
-{
- const auto lhs_index(node.getInputs().at(ir::operation::BatchMatMul::Input::LHS));
- const auto rhs_index(node.getInputs().at(ir::operation::BatchMatMul::Input::RHS));
- const auto out_index{node.getOutputs().at(0)};
-
- if (_ctx.at(out_index).info().isDynamic())
- return;
-
- OP_REQUIRES(_ctx.at(lhs_index).shape().rank() <= 4);
- OP_REQUIRES(_ctx.at(rhs_index).shape().rank() <= 4);
- OP_REQUIRES(_ctx.at(lhs_index).shape().rank() >= 2);
- OP_REQUIRES(_ctx.at(rhs_index).shape().rank() >= 2);
-}
-
-void ShapeValidator::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::BatchToSpaceND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)};
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto input_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
- const auto output_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
-
- // All requirement as per NNAPI specification.
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(block_size_index).shape().rank() == 1);
-
- OP_REQUIRES(_ctx.at(block_size_index).shape().dim(0) == 2);
-
- OP_REQUIRES(input_shape.C == output_shape.C);
-}
-
-void ShapeValidator::visit(const ir::operation::BCQFullyConnected &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
- const auto weight_scales_index{
- node.getInputs().at(ir::operation::BCQFullyConnected::Input::WEIGHTS_SCALES)};
- const auto weight_binary_index{
- node.getInputs().at(ir::operation::BCQFullyConnected::Input::WEIGHTS_BINARY)};
- const auto weight_cluster_index{
- node.getInputs().at(ir::operation::BCQFullyConnected::Input::WEIGHTS_CLUSTERS)};
- // const auto bias_index{node.getInputs().at(ir::operation::BCQFullyConnected::Input::BIAS)};
-
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 2);
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == 2);
- OP_REQUIRES(_ctx.at(weight_scales_index).shape().rank() == 1);
- OP_REQUIRES(_ctx.at(weight_binary_index).shape().rank() == 2);
- OP_REQUIRES(_ctx.at(weight_cluster_index).shape().rank() == 2);
-
- OP_REQUIRES(_ctx.at(ifm_index).shape().dim(1) == _ctx.at(ofm_index).shape().dim(1));
-
- OP_REQUIRES(_ctx.at(weight_cluster_index).shape().dim(0) > 0);
- OP_REQUIRES(_ctx.at(weight_cluster_index).shape().dim(1) == 2);
-
- // more shape validation will be done inside kernel.
-
- // TODO Check bias dimension (can be null tensor)
-}
-
-void ShapeValidator::visit(const ir::operation::BCQGather &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto indices_index{node.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
- const auto input_binary_index{node.getInputs().at(ir::operation::BCQGather::Input::INPUT_BINARY)};
- const auto input_scales_index{node.getInputs().at(ir::operation::BCQGather::Input::INPUT_SCALES)};
- const auto input_clusters_index{
- node.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
-
- OP_REQUIRES(_ctx.at(indices_index).shape().rank() <= 2); // TODO : support rank up to 4 or more
- OP_REQUIRES(_ctx.at(input_binary_index).shape().rank() == 2);
- OP_REQUIRES(_ctx.at(input_scales_index).shape().rank() == 1);
- OP_REQUIRES(_ctx.at(input_clusters_index).shape().rank() == 2);
-
- OP_REQUIRES(_ctx.at(input_clusters_index).shape().dim(0) > 0);
- OP_REQUIRES(_ctx.at(input_clusters_index).shape().dim(1) == 2);
-
- // more shape validation will be done inside kernel.
-}
-
-void ShapeValidator::visit(const ir::operation::Comparison &)
-{
- // TODO Shape validation of comparison
-}
-
-void ShapeValidator::visit(const ir::operation::Softmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(0)};
-
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
-}
-
-void ShapeValidator::visit(const ir::operation::InstanceNorm &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::InstanceNorm::Input::INPUT)};
- const auto gamma_index{node.getInputs().at(ir::operation::InstanceNorm::Input::GAMMA)};
- const auto beta_index{node.getInputs().at(ir::operation::InstanceNorm::Input::BETA)};
-
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(ifm_index).shape() == _ctx.at(ofm_index).shape());
- OP_REQUIRES(_ctx.at(gamma_index).shape().rank() == 1);
- OP_REQUIRES(_ctx.at(beta_index).shape().rank() == 1);
-}
-
-void ShapeValidator::visit(const ir::operation::Pool2D &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::Pool2D::Input::INPUT)};
-
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 4);
-}
-
-void ShapeValidator::visit(const ir::operation::Permute &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(0)};
-
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
-}
-
-void ShapeValidator::visit(const ir::operation::Reduce &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto input_shape = _ctx.at(input_index).shape();
- const auto output_shape = _ctx.at(output_index).shape();
-
- OP_REQUIRES(input_shape.rank() <= 4);
- OP_REQUIRES(output_shape.rank() <= input_shape.rank());
-
- // NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
- // supports cases reducing height and width or reducing depth.
- // TODO We have to support all cases of dimensions up to 4.
- // For correct permuting, we have to set output's shape to be equal in dimension position of the
- // input. But the positions of the same dimensions in the input and output may be set differently.
- // For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
- // output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
- // extend it in 4 dimensions, it should be {1,1,3,5}.
- // Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
- // output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
- // next operation is not desired.
- if (input_shape.rank() == 4 && input_shape.rank() != output_shape.rank())
- {
- if (output_shape.rank() == 2)
- {
- // Reducing HW
- OP_REQUIRES(input_shape.dim(0) == output_shape.dim(0) &&
- input_shape.dim(3) == output_shape.dim(1));
- }
- else if (output_shape.rank() == 3)
- {
- // Reducing C or
- // (Reducing H and C(input and output) == 1) or (Reducing W and C(input and output) == 1)
- OP_REQUIRES((input_shape.dim(0) == output_shape.dim(0) &&
- input_shape.dim(1) == output_shape.dim(1) &&
- input_shape.dim(2) == output_shape.dim(2)) ||
- (input_shape.dim(0) == output_shape.dim(0) &&
- (input_shape.dim(1) == output_shape.dim(1) ||
- input_shape.dim(2) == output_shape.dim(1)) &&
- input_shape.dim(3) == 1 && output_shape.dim(2) == 1));
- }
- }
-}
-
-void ShapeValidator::visit(const ir::operation::Transpose &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::Transpose::Input::INPUT)};
- const auto perm_index{node.getInputs().at(ir::operation::Transpose::Input::PERMUTATION)};
-
- const auto &output_shape = _ctx.at(output_index).shape();
- const auto &input_shape = _ctx.at(input_index).shape();
-
- OP_REQUIRES(_ctx.at(perm_index).shape().num_elements() == 0 ||
- input_shape.rank() == static_cast<int>(_ctx.at(perm_index).shape().num_elements()));
- OP_REQUIRES(input_shape.rank() == output_shape.rank());
-}
-
-void ShapeValidator::visit(const ir::operation::RNN &node)
-{
- // NOTE This validation is for static rnn(non-dynamic shape), but not for dynamic rnn
- // TODO Support dynamic rnn
- const auto output_index{node.getOutputs().at(ir::operation::RNN::Output::OUTPUT)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto hidden_state_out_index{
- node.getOutputs().at(ir::operation::RNN::Output::HIDDEN_STATE_OUT)};
-
- const auto input_index{node.getInputs().at(ir::operation::RNN::Input::INPUT)};
- const auto weights_index{node.getInputs().at(ir::operation::RNN::Input::WEIGHTS)};
- const auto recurrent_weights_index{
- node.getInputs().at(ir::operation::RNN::Input::RECURRENT_WEIGHTS)};
- const auto bias_index{node.getInputs().at(ir::operation::RNN::Input::BIAS)};
- const auto hidden_state_in_index{node.getInputs().at(ir::operation::RNN::Input::HIDDEN_STATE_IN)};
-
- const auto batch_size = _ctx.at(output_index).shape().dim(0);
- const auto num_units = _ctx.at(output_index).shape().dim(1);
-
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == 2 &&
- _ctx.at(hidden_state_out_index).shape().rank() == 2 &&
- _ctx.at(input_index).shape().rank() == 2 &&
- _ctx.at(weights_index).shape().rank() == 2 &&
- _ctx.at(recurrent_weights_index).shape().rank() == 2 &&
- _ctx.at(hidden_state_in_index).shape().rank() == 2);
- OP_REQUIRES(_ctx.at(bias_index).shape().rank() == 1);
-
- OP_REQUIRES(batch_size == _ctx.at(input_index).shape().dim(0) &&
- batch_size == _ctx.at(hidden_state_in_index).shape().dim(0) &&
- batch_size == _ctx.at(hidden_state_out_index).shape().dim(0));
- OP_REQUIRES(_ctx.at(input_index).shape().dim(1) == _ctx.at(weights_index).shape().dim(1));
-
- OP_REQUIRES(num_units == _ctx.at(weights_index).shape().dim(0) &&
- num_units == _ctx.at(recurrent_weights_index).shape().dim(0) &&
- num_units == _ctx.at(bias_index).shape().dim(0));
- OP_REQUIRES(num_units == _ctx.at(output_index).shape().dim(1) &&
- num_units == _ctx.at(recurrent_weights_index).shape().dim(1) &&
- num_units == _ctx.at(hidden_state_in_index).shape().dim(1) &&
- num_units == _ctx.at(hidden_state_out_index).shape().dim(1));
-}
-
-void ShapeValidator::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_size_index{
- node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto input_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
- const auto output_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
-
- // All requirement as per NNAPI specification.
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(block_size_index).shape().rank() == 1);
- OP_REQUIRES(_ctx.at(paddings_index).shape().rank() == 2);
-
- OP_REQUIRES(_ctx.at(block_size_index).shape().dim(0) == 2);
- OP_REQUIRES(_ctx.at(paddings_index).shape().dim(0) == 2);
- OP_REQUIRES(_ctx.at(paddings_index).shape().dim(1) == 2);
-
- OP_REQUIRES(input_shape.C == output_shape.C);
-}
-
-void ShapeValidator::visit(const ir::operation::SpaceToDepth &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::SpaceToDepth::Input::INPUT)};
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto input_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
- const auto output_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
- const auto block_size = node.param().block_size;
-
- // All assertions as per NNAPI specification.
- OP_REQUIRES(_ctx.at(ifm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == 4);
- OP_REQUIRES((input_shape.H % block_size == 0) && (input_shape.W % block_size == 0));
- OP_REQUIRES(input_shape.N == output_shape.N);
- OP_REQUIRES(input_shape.C * block_size * block_size == output_shape.C);
-}
-
-void ShapeValidator::visit(const ir::operation::ElementwiseActivation &node) { checkUnaryOp(node); }
-
-void ShapeValidator::visit(const ir::operation::ElementwiseBinary &)
-{
- // TODO Shape validation of ElementwiseBinary
-}
-
-void ShapeValidator::visit(const ir::operation::ElementwiseUnary &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)};
-
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
-}
-
-void ShapeValidator::visit(const ir::operation::EmbeddingLookup &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)};
- const auto values_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::VALUES)};
-
- const auto &output_obj = _ctx.at(output_index);
- const auto &lookups_obj = _ctx.at(lookups_index);
- const auto &values_obj = _ctx.at(values_index);
-
- // Verify operand here, not at SimpleEmbeddingLookup::configure() to avoid acl's modifying
- // TensorShape sometimes(Issue: https://github.sec.samsung.net/STAR/nnfw/issues/729)
- {
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto &output_shape = output_obj.shape();
- const auto &lookups_shape = lookups_obj.shape();
- const auto &values_shape = values_obj.shape();
-
- OP_REQUIRES(lookups_shape.rank() == 1);
- OP_REQUIRES(values_shape.rank() >= 2);
-
- // output should be a n-D tensor with the same rank and shape as the values tensor, except for
- // the first dimension which has the same size as lookups' only dimension.
- OP_REQUIRES(output_shape.rank() == values_shape.rank());
- OP_REQUIRES(output_shape.dim(0) == lookups_shape.dim(0));
- for (int n = 1; n < output_shape.rank(); ++n)
- {
- OP_REQUIRES(output_shape.dim(n) == values_shape.dim(n));
- }
- }
-}
-
-void ShapeValidator::visit(const ir::operation::ExpandDims &node)
-{
- const auto axis_index{node.getInputs().at(ir::operation::ExpandDims::Input::AXIS)};
-
- if (_ctx.at(axis_index).info().isDynamic())
- return;
- OP_REQUIRES(_ctx.at(axis_index).shape().rank() <= 1);
-}
-
-void ShapeValidator::visit(const ir::operation::HashtableLookup &node)
-{
- const auto output_index{node.getOutputs().at(ir::operation::HashtableLookup::Output::OUTPUT)};
- const auto lookups_index{node.getInputs().at(ir::operation::HashtableLookup::Input::LOOKUPS)};
- const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)};
- const auto values_index{node.getInputs().at(ir::operation::HashtableLookup::Input::VALUES)};
-
- const auto &output_obj = _ctx.at(output_index);
- const auto &lookups_obj = _ctx.at(lookups_index);
- const auto &keys_obj = _ctx.at(keys_index);
- const auto &values_obj = _ctx.at(values_index);
-
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto &output_shape = output_obj.shape();
- const auto &lookups_shape = lookups_obj.shape();
- const auto &keys_shape = keys_obj.shape();
- const auto &values_shape = values_obj.shape();
-
- OP_REQUIRES(values_shape.rank() == output_shape.rank());
- OP_REQUIRES(lookups_shape.rank() == 1);
- OP_REQUIRES(keys_shape.rank() == 1);
- OP_REQUIRES(values_shape.dim(0) == keys_shape.dim(0));
- OP_REQUIRES(lookups_shape.dim(0) == output_shape.dim(0));
-}
-
-void ShapeValidator::visit(const ir::operation::TransposeConv &node)
-{
- // shape check
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::TransposeConv::Input::INPUT)};
- const auto ker_index{node.getInputs().at(ir::operation::TransposeConv::Input::KERNEL)};
-
- // Only 4D tensors are supported
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == _ctx.at(ifm_index).shape().rank());
- OP_REQUIRES(_ctx.at(ofm_index).shape().rank() == _ctx.at(ker_index).shape().rank());
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
- // The kernel has only IHWO layout on frontend
- // So ker_shape is treated here below
- // I -> N
- // H -> H
- // W -> W
- // O -> C
- const auto ker_shape = _ctx.at(ker_index).shape().asFeature(ir::Layout::NHWC);
-
- OP_REQUIRES(ifm_shape.N == ofm_shape.N);
- OP_REQUIRES(ifm_shape.C == ker_shape.C);
- OP_REQUIRES(ker_shape.N == ofm_shape.C);
-}
-
-void ShapeValidator::visit(const ir::operation::Gather &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape();
- const auto indices_shape = _ctx.at(indices_index).shape();
- const auto ofm_shape = _ctx.at(ofm_index).shape();
-
- OP_REQUIRES(ifm_shape.rank() <= 4);
- OP_REQUIRES(indices_shape.rank() <= 3);
- OP_REQUIRES(ofm_shape.rank() <= 4);
-}
-
-void ShapeValidator::visit(const ir::operation::DepthToSpace &node)
-{
- int32_t block_size = node.param().block_size;
-
- // shape check
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::DepthToSpace::Input::INPUT)};
-
- const auto frontend_layout = _current_op_seq_layout;
- const auto output_shape = _ctx.at(output_index).shape().asFeature(frontend_layout);
- const auto input_shape = _ctx.at(input_index).shape().asFeature(frontend_layout);
-
- OP_REQUIRES(_ctx.at(input_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == 4);
-
- {
- OP_REQUIRES(output_shape.N == input_shape.N);
- OP_REQUIRES(output_shape.H == input_shape.H * block_size);
- OP_REQUIRES(output_shape.W == input_shape.W * block_size);
- OP_REQUIRES(input_shape.C % (block_size * block_size) == 0);
- OP_REQUIRES(output_shape.C == input_shape.C / (block_size * block_size));
- }
-}
-
-void ShapeValidator::visit(const ir::operation::Pack &node)
-{
- const auto axis{node.param().axis};
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- // shape check
- const auto &output_shape = _ctx.at(output_index).shape();
- const auto output_rank = static_cast<int32_t>(output_shape.rank());
-
- const auto input1_index{node.getInputs().at(0)};
- const auto input_shape = _ctx.at(input1_index).shape();
-
- OP_REQUIRES(axis >= -output_rank && axis < output_rank);
- for (const auto &index : node.getInputs())
- {
- OP_REQUIRES(input_shape == _ctx.at(index).shape());
- }
-}
-
-void ShapeValidator::visit(const ir::operation::LSTM &node)
-{
- // NOTE This validation is for static rnn(non-dynamic shape), but not for dynamic rnn
- // TODO Support dynamic rnn
- const auto output_index{node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto scratch_buffer_index{
- node.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
- const auto output_state_out_index{
- node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)};
- const auto cell_state_out_index{
- node.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
-
- const auto input_index{node.getInputs().at(ir::operation::LSTM::Input::INPUT)};
- const auto input_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)};
- const auto input_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_FORGET_WEIGHTS)};
- const auto input_to_cell_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_CELL_WEIGHTS)};
- const auto input_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)};
- const auto recurrent_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)};
- const auto recurrent_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)};
- const auto recurrent_to_cell_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)};
- const auto recurrent_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)};
- const auto cell_to_input_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_INPUT_WEIGHTS)};
- const auto cell_to_forget_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_FORGET_WEIGHTS)};
- const auto cell_to_output_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_OUTPUT_WEIGHTS)};
- const auto input_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::INPUT_GATE_BIAS)};
- const auto forget_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::FORGET_GATE_BIAS)};
- const auto cell_bias_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_BIAS)};
- const auto output_gate_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_GATE_BIAS)};
- const auto projection_weights_index{
- node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_WEIGHTS)};
- const auto projection_bias_index{
- node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_BIAS)};
- const auto output_state_in_index{
- node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_STATE_IN)};
- const auto cell_state_in_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_STATE_IN)};
-
- OP_REQUIRES(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank());
- for (int i = 0; i < _ctx.at(input_index).shape().rank() - 1; ++i)
- {
- OP_REQUIRES(_ctx.at(input_index).shape().dim(i) == _ctx.at(output_index).shape().dim(i));
- }
- OP_REQUIRES(
- (_ctx.at(output_index).shape().rank() == 2 || _ctx.at(output_index).shape().rank() == 3) &&
- (_ctx.at(input_index).shape().rank() == 2 || _ctx.at(input_index).shape().rank() == 3) &&
- _ctx.at(input_to_input_weights_index).shape().rank() == 2 &&
- _ctx.at(input_to_forget_weights_index).shape().rank() == 2 &&
- _ctx.at(input_to_cell_weights_index).shape().rank() == 2 &&
- _ctx.at(input_to_output_weights_index).shape().rank() == 2 &&
- _ctx.at(recurrent_to_input_weights_index).shape().rank() == 2 &&
- _ctx.at(recurrent_to_forget_weights_index).shape().rank() == 2 &&
- _ctx.at(recurrent_to_cell_weights_index).shape().rank() == 2 &&
- _ctx.at(recurrent_to_output_weights_index).shape().rank() == 2 &&
- _ctx.at(projection_weights_index).shape().rank() == 2 &&
- _ctx.at(output_state_in_index).shape().rank() == 2 &&
- _ctx.at(cell_state_in_index).shape().rank() == 2);
-
- OP_REQUIRES(_ctx.at(cell_to_input_weights_index).shape().rank() == 1 &&
- _ctx.at(cell_to_forget_weights_index).shape().rank() == 1 &&
- _ctx.at(cell_to_output_weights_index).shape().rank() == 1 &&
- _ctx.at(input_gate_bias_index).shape().rank() == 1 &&
- _ctx.at(forget_gate_bias_index).shape().rank() == 1 &&
- _ctx.at(cell_bias_index).shape().rank() == 1 &&
- _ctx.at(output_gate_bias_index).shape().rank() == 1 &&
- _ctx.at(projection_bias_index).shape().rank() == 1);
-
- // CIFG assertion
- OP_REQUIRES((_ctx.at(input_to_input_weights_index).shape().dim(0) == 0 &&
- _ctx.at(input_to_input_weights_index).shape().dim(1) == 0 &&
- _ctx.at(recurrent_to_input_weights_index).shape().dim(0) == 0 &&
- _ctx.at(recurrent_to_input_weights_index).shape().dim(1) == 0 &&
- _ctx.at(input_gate_bias_index).shape().dim(0) == 0 &&
- _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0) ||
- (_ctx.at(input_to_input_weights_index).shape().dim(0) != 0 &&
- _ctx.at(input_to_input_weights_index).shape().dim(1) != 0 &&
- _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
- _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0 &&
- _ctx.at(input_gate_bias_index).shape().dim(0) != 0));
-
- // Peephole assertion
- OP_REQUIRES((_ctx.at(cell_to_forget_weights_index).shape().dim(0) == 0 &&
- _ctx.at(cell_to_output_weights_index).shape().dim(0) == 0) ||
- (_ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0 &&
- _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0));
-
- bool has_input_to_input_weights = _ctx.at(input_to_input_weights_index).shape().dim(0) != 0 &&
- _ctx.at(input_to_input_weights_index).shape().dim(1) != 0;
- bool has_recurrent_to_input_weights =
- _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
- _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0;
- bool has_input_gate_bias = _ctx.at(input_gate_bias_index).shape().dim(0) != 0;
- bool has_cell_to_input_weights = _ctx.at(cell_to_input_weights_index).shape().dim(0) != 0;
- bool has_cell_to_forget_weights = _ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0;
- bool has_cell_to_output_weights = _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0;
- bool has_projection_weights = _ctx.at(projection_weights_index).shape().dim(0) != 0 &&
- _ctx.at(projection_weights_index).shape().dim(1) != 0;
- bool has_projection_bias = _ctx.at(projection_bias_index).shape().dim(0);
-
- // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
- // true: no CIFG
- // false: CIFG
- bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
-
- // NOTE The cell_to_input_weights do not exist in regular CIFG although peephole.
- // true: peephole
- // false: no peephole
- bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights;
-
- // NOTE The projection weights may have data but the projection bias may not.
- bool has_projection_param = has_projection_weights;
-
- const auto batch_size = (_ctx.at(input_index).shape().rank() == 3 && node.param().time_major)
- ? _ctx.at(input_index).shape().dim(1)
- : _ctx.at(input_index).shape().dim(0);
- OP_REQUIRES(batch_size == _ctx.at(output_state_in_index).shape().dim(0) &&
- batch_size == _ctx.at(cell_state_in_index).shape().dim(0));
-
- const auto input_size = _ctx.at(input_index).shape().dim(_ctx.at(input_index).shape().rank() - 1);
- OP_REQUIRES(input_size == _ctx.at(input_to_forget_weights_index).shape().dim(1) &&
- input_size == _ctx.at(input_to_cell_weights_index).shape().dim(1) &&
- input_size == _ctx.at(input_to_output_weights_index).shape().dim(1));
-
- const auto num_units = _ctx.at(input_to_output_weights_index).shape().dim(0);
- OP_REQUIRES(num_units == _ctx.at(input_to_cell_weights_index).shape().dim(0) &&
- num_units == _ctx.at(input_to_output_weights_index).shape().dim(0) &&
- num_units == _ctx.at(recurrent_to_forget_weights_index).shape().dim(0) &&
- num_units == _ctx.at(recurrent_to_cell_weights_index).shape().dim(0) &&
- num_units == _ctx.at(recurrent_to_output_weights_index).shape().dim(0) &&
- num_units == _ctx.at(forget_gate_bias_index).shape().dim(0) &&
- num_units == _ctx.at(cell_bias_index).shape().dim(0) &&
- num_units == _ctx.at(output_gate_bias_index).shape().dim(0) &&
- num_units == _ctx.at(cell_state_in_index).shape().dim(1));
-
- const auto output_size =
- _ctx.at(output_index).shape().dim(_ctx.at(output_index).shape().rank() - 1);
- OP_REQUIRES(output_size == _ctx.at(recurrent_to_forget_weights_index).shape().dim(1) &&
- output_size == _ctx.at(recurrent_to_cell_weights_index).shape().dim(1) &&
- output_size == _ctx.at(recurrent_to_output_weights_index).shape().dim(1) &&
- output_size == _ctx.at(output_state_in_index).shape().dim(1));
-
- if (has_cifg_param)
- {
- OP_REQUIRES(input_size == _ctx.at(input_to_input_weights_index).shape().dim(1));
- OP_REQUIRES(num_units == _ctx.at(input_to_input_weights_index).shape().dim(0) &&
- num_units == _ctx.at(recurrent_to_input_weights_index).shape().dim(0) &&
- (num_units == _ctx.at(cell_to_input_weights_index).shape().dim(0) ||
- _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0 /* non-peephole */) &&
- num_units == _ctx.at(input_gate_bias_index).shape().dim(0));
- OP_REQUIRES(output_size == _ctx.at(recurrent_to_input_weights_index).shape().dim(1));
- OP_REQUIRES(has_input_to_input_weights && has_recurrent_to_input_weights &&
- has_input_gate_bias);
- if (has_cell_to_input_weights)
- {
- // NOTE The cell_to_input_weights exist only in case of non-CIFG and peephole.
- OP_REQUIRES(has_peephole_param);
- }
- if (_ctx.exist(scratch_buffer_index))
- OP_REQUIRES(_ctx.at(scratch_buffer_index).shape().dim(1) == num_units * 4);
- }
- else
- {
- if (_ctx.exist(scratch_buffer_index))
- OP_REQUIRES(_ctx.at(scratch_buffer_index).shape().dim(1) == num_units * 3);
- }
-
- if (has_peephole_param)
- {
- OP_REQUIRES(num_units == _ctx.at(cell_to_forget_weights_index).shape().dim(0) &&
- num_units == _ctx.at(cell_to_output_weights_index).shape().dim(0) &&
- (num_units == _ctx.at(cell_to_input_weights_index).shape().dim(0) ||
- _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0 /* CIFG */));
- }
-
- if (has_projection_param)
- {
- OP_REQUIRES(num_units == _ctx.at(projection_weights_index).shape().dim(1));
- OP_REQUIRES(output_size == _ctx.at(projection_weights_index).shape().dim(0));
- if (has_projection_bias)
- {
- OP_REQUIRES(output_size == _ctx.at(projection_bias_index).shape().dim(0));
- }
- }
-
- if (_ctx.exist(scratch_buffer_index))
- {
- OP_REQUIRES(_ctx.at(scratch_buffer_index).shape().rank() == 2);
- OP_REQUIRES(batch_size == _ctx.at(scratch_buffer_index).shape().dim(0));
- }
-
- if (_ctx.exist(output_state_out_index))
- {
- OP_REQUIRES(_ctx.at(output_state_out_index).shape().rank() == 2);
- OP_REQUIRES(batch_size == _ctx.at(output_state_out_index).shape().dim(0));
- OP_REQUIRES(output_size == _ctx.at(output_state_out_index).shape().dim(1));
- }
-
- if (_ctx.exist(cell_state_out_index))
- {
- OP_REQUIRES(_ctx.at(cell_state_out_index).shape().rank() == 2);
- OP_REQUIRES(batch_size == _ctx.at(cell_state_out_index).shape().dim(0));
- OP_REQUIRES(num_units == _ctx.at(cell_state_out_index).shape().dim(1));
- }
-}
-
-void ShapeValidator::visit(const ir::operation::L2Normalization &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- if (_ctx.at(ofm_index).info().isDynamic())
- return;
-
- const auto ifm_index{node.getInputs().at(ir::operation::L2Normalization::Input::INPUT)};
-
- auto ifm_shape = _ctx.at(ifm_index).shape();
- auto ofm_shape = _ctx.at(ofm_index).shape();
-
- OP_REQUIRES(ifm_shape.rank() == ofm_shape.rank());
-
- for (auto i = 0; i < ifm_shape.rank(); i++)
- {
- OP_REQUIRES(ifm_shape.dim(i) == ofm_shape.dim(i));
- }
-}
-
-void ShapeValidator::visit(const ir::operation::Unpack &node)
-{
- const auto axis{node.param().axis};
- const auto output_index{node.getInputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::Unpack::Input::INPUT)};
-
- const auto &input_shape = _ctx.at(input_index).shape();
- const auto input_rank = static_cast<int32_t>(input_shape.rank());
-
- OP_REQUIRES(axis >= -input_rank && axis < input_rank);
-}
-
-void ShapeValidator::visit(const ir::operation::Pad &node)
-{
- const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
- OP_REQUIRES(_ctx.at(pad_index).typeInfo().type() == ir::DataType::INT32);
-
- const auto output_index{node.getInputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::Pad::Input::INPUT)};
-
- const auto &pad_shape = _ctx.at(pad_index).shape();
- const auto input_rank = static_cast<int32_t>(_ctx.at(input_index).shape().rank());
-
- OP_REQUIRES(pad_shape.rank() == 2);
- OP_REQUIRES(pad_shape.dim(0) == input_rank);
- OP_REQUIRES(pad_shape.dim(1) == 2);
- OP_REQUIRES(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank());
-}
-
-void ShapeValidator::visit(const ir::operation::Select &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- // This validator does not check shape. So checking isDynamic() is skipped.
-
- const auto condition_index{node.getInputs().at(ir::operation::Select::Input::CONDITION)};
- const auto input_true_index{node.getInputs().at(ir::operation::Select::Input::INPUT_TRUE)};
- const auto input_false_index{node.getInputs().at(ir::operation::Select::Input::INPUT_FALSE)};
- UNUSED_RELEASE(output_index);
- UNUSED_RELEASE(input_true_index);
- UNUSED_RELEASE(input_false_index);
-
- OP_REQUIRES(_ctx.at(condition_index).typeInfo().type() == ir::DataType::BOOL8);
-}
-
-void ShapeValidator::visit(const ir::operation::StridedSlice &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
-
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- OP_REQUIRES(_ctx.at(input_index).shape().rank() <= 4);
-}
-
-void ShapeValidator::visit(const ir::operation::Split &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto axis_index{node.getInputs().at(ir::operation::Split::Input::AXIS)};
-
- const auto num_splits = node.param().num_splits;
- const auto input_rank = _ctx.at(input_index).shape().rank();
- auto axis = *reinterpret_cast<const int32_t *>(_ctx.at(axis_index).data()->base());
- axis = axis < 0 ? axis + input_rank : axis;
-
- OP_REQUIRES(axis >= 0 && axis < input_rank);
- OP_REQUIRES(_ctx.at(input_index).shape().dim(axis) % num_splits == 0);
-}
-
-void ShapeValidator::visit(const ir::operation::Shape &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(0)};
- UNUSED_RELEASE(input_index);
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == 1);
-}
-
-void ShapeValidator::visit(const ir::operation::ResizeBilinear &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)};
-
- if (_ctx.at(output_index).info().isDynamic())
- {
- return;
- }
- OP_REQUIRES(_ctx.at(input_index).shape().rank() == 4);
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == 4);
-}
-
-void ShapeValidator::visit(const ir::operation::Reverse &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::Reverse::Input::INPUT)};
-
- if (_ctx.at(output_index).info().isDynamic())
- return;
- OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
-}
-
-void ShapeValidator::visit(const ir::operation::If &)
-{
- // TODO Add to validate with subgraphs
-}
-
-void ShapeValidator::visit(const ir::operation::While &)
-{
- // This validator does not check shape. So checking isDynamic() is skipped.
- // TODO Add to validate with subgraphs
-}
-
-void ShapeValidator::visit(const ir::operation::SquaredDifference &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)};
- const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)};
-
- // Check for dimension constraints
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- auto output_shape = _ctx.at(output_index).shape();
- auto lhs_shape = _ctx.at(lhs_index).shape();
- auto rhs_shape = _ctx.at(rhs_index).shape();
- // Check for output rank
- OP_REQUIRES(output_shape.rank() == std::max(lhs_shape.rank(), rhs_shape.rank()));
- auto min_rank = std::min(lhs_shape.rank(), rhs_shape.rank());
-
- for (int idx = 1; idx <= min_rank; idx++)
- {
- int l_idx = lhs_shape.rank() - idx;
- int r_idx = rhs_shape.rank() - idx;
- int out_idx = output_shape.rank() - idx;
-
- OP_REQUIRES((l_idx >= 0) && (r_idx >= 0) && (out_idx >= 0));
-
- auto l_dims = lhs_shape.dim(l_idx);
- auto r_dims = rhs_shape.dim(r_idx);
- auto out_dims = output_shape.dim(out_idx);
-
- OP_REQUIRES(((l_dims == r_dims) && (out_dims == l_dims)) ||
- ((l_dims == 1) && (out_dims == r_dims)) || ((r_dims == 1) && (out_dims == l_dims)));
- }
- auto &tmp_shape = (lhs_shape.rank() > rhs_shape.rank()) ? lhs_shape : rhs_shape;
- for (int idx = min_rank + 1; idx <= output_shape.rank(); idx++)
- {
- int out_idx = output_shape.rank() - idx;
- int tmp_idx = tmp_shape.rank() - idx;
-
- OP_REQUIRES((out_idx >= 0) && (tmp_idx >= 0) &&
- (output_shape.dim(out_idx) == tmp_shape.dim(tmp_idx)));
- }
-}
-void ShapeValidator::visit(const ir::operation::Tile &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(0)};
- const auto multiple_index{node.getInputs().at(1)};
-
- OP_REQUIRES(_ctx.at(multiple_index).shape().rank() == 1);
- OP_REQUIRES(_ctx.at(multiple_index).shape().dim(0) == _ctx.at(input_index).shape().rank());
- OP_REQUIRES(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank());
-}
-
-void ShapeValidator::visit(const ir::operation::Range &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto start_index{node.getInputs().at(ir::operation::Range::Input::START)};
- const auto limit_index{node.getInputs().at(ir::operation::Range::Input::LIMIT)};
- const auto delta_index{node.getInputs().at(ir::operation::Range::Input::DELTA)};
-
- // Check for dimension constraints
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- OP_REQUIRES(_ctx.at(start_index).shape().rank() == 0);
- OP_REQUIRES(_ctx.at(limit_index).shape().rank() == 0);
- OP_REQUIRES(_ctx.at(delta_index).shape().rank() == 0);
-}
-
-void ShapeValidator::visit(const ir::operation::MatrixBandPart &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(ir::operation::MatrixBandPart::Input::INPUT)};
- const auto num_lower_index{
- node.getInputs().at(ir::operation::MatrixBandPart::Input::NUM_LOWER_DIAG)};
- const auto num_upper_index{
- node.getInputs().at(ir::operation::MatrixBandPart::Input::NUM_UPPER_DIAG)};
-
- // Check for dimension constraints
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- OP_REQUIRES(_ctx.at(input_index).shape().rank() >= 2); // input must be more than 2 dim matrix
- OP_REQUIRES(_ctx.at(num_upper_index).shape().rank() == 0); // num_lower must be scalar
- OP_REQUIRES(_ctx.at(num_lower_index).shape().rank() == 0); // num_upper must be scalar
-}
-
-void ShapeValidator::visit(const ir::operation::LogSoftmax &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- if (_ctx.at(output_index).info().isDynamic())
- return;
-
- const auto input_index{node.getInputs().at(0)};
-
- OP_REQUIRES(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
-}
-
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/ShapeValidator.h b/runtime/onert/core/src/compiler/ShapeValidator.h
deleted file mode 100644
index f40c098d5..000000000
--- a/runtime/onert/core/src/compiler/ShapeValidator.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_SHAPE_VALIDATOR_H__
-#define __ONERT_COMPILER_SHAPE_VALIDATOR_H__
-
-#include "ir/Layout.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-class Graph;
-class Operands;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-
-class ShapeValidator : public ir::OperationVisitor
-{
-public:
- ShapeValidator(void) = delete;
- ShapeValidator(const ir::Graph &graph);
-
-public:
- void operator()();
-
-public:
- void visit(const ir::operation::BatchMatMul &node) override;
- void visit(const ir::operation::BatchToSpaceND &node) override;
- void visit(const ir::operation::BCQFullyConnected &node) override;
- void visit(const ir::operation::BCQGather &node) override;
- void visit(const ir::operation::Comparison &node) override;
- void visit(const ir::operation::Softmax &node) override;
- void visit(const ir::operation::InstanceNorm &node) override;
- void visit(const ir::operation::Permute &node) override;
- void visit(const ir::operation::Pool2D &node) override;
- void visit(const ir::operation::Reduce &node) override;
- void visit(const ir::operation::Transpose &node) override;
- void visit(const ir::operation::RNN &node) override;
- void visit(const ir::operation::SpaceToBatchND &node) override;
- void visit(const ir::operation::SpaceToDepth &node) override;
- void visit(const ir::operation::ElementwiseActivation &node) override;
- void visit(const ir::operation::ElementwiseBinary &node) override;
- void visit(const ir::operation::ElementwiseUnary &node) override;
- void visit(const ir::operation::EmbeddingLookup &node) override;
- void visit(const ir::operation::ExpandDims &node) override;
- void visit(const ir::operation::HashtableLookup &node) override;
- void visit(const ir::operation::TransposeConv &node) override;
- void visit(const ir::operation::Gather &node) override;
- void visit(const ir::operation::DepthToSpace &node) override;
- void visit(const ir::operation::Pack &node) override;
- void visit(const ir::operation::LSTM &node) override;
- void visit(const ir::operation::L2Normalization &node) override;
- void visit(const ir::operation::Unpack &node) override;
- void visit(const ir::operation::Pad &node) override;
- void visit(const ir::operation::Select &node) override;
- void visit(const ir::operation::StridedSlice &node) override;
- void visit(const ir::operation::Split &node) override;
- void visit(const ir::operation::Shape &node) override;
- void visit(const ir::operation::ResizeBilinear &node) override;
- void visit(const ir::operation::Reverse &node) override;
- void visit(const ir::operation::If &node) override;
- void visit(const ir::operation::While &node) override;
- void visit(const ir::operation::SquaredDifference &node) override;
- void visit(const ir::operation::Tile &node) override;
- void visit(const ir::operation::Range &node) override;
- void visit(const ir::operation::MatrixBandPart &node) override;
- void visit(const ir::operation::LogSoftmax &node) override;
-
-private:
- void checkUnaryOp(const ir::Operation &node);
-
-private:
- // TODO Remove _ctx field
- const ir::Graph &_graph;
- const ir::Operands &_ctx;
- ir::Layout _current_op_seq_layout;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_SHAPE_VALIDATOR_H__
diff --git a/runtime/onert/core/src/compiler/StaticShapeInference.cc b/runtime/onert/core/src/compiler/StaticShapeInference.cc
deleted file mode 100644
index df129d98b..000000000
--- a/runtime/onert/core/src/compiler/StaticShapeInference.cc
+++ /dev/null
@@ -1,1302 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler/StaticShapeInference.h"
-#include "util/ShapeInference.h"
-#include "util/logging.h"
-
-#include <sstream>
-
-namespace onert
-{
-namespace compiler
-{
-
-bool StaticShapeInferer::infer(const ir::OpSequence &op_seq)
-{
- bool has_dynamic_tensor = false;
-
- for (const auto &operation_idx : op_seq.operations())
- {
- auto &op = _operations.at(operation_idx);
- auto opcode = op.opcode();
-
- _return_has_dynamic_tensor = false; // this is used as a return value inside operation's visit()
-
- // IF: need shape inference for then, else
- // While: need shape inference for condition, body
- if (opcode == ir::OpCode::If || opcode == ir::OpCode::While)
- {
- op.accept(*this);
- }
- else
- {
- _return_has_dynamic_tensor = checkDynamicInput(op);
-
- if (_return_has_dynamic_tensor)
- {
- setDynamicOutput(op);
- }
- else
- {
- op.accept(*this);
- }
- }
-
- has_dynamic_tensor = has_dynamic_tensor || _return_has_dynamic_tensor;
- }
-
- return has_dynamic_tensor;
-}
-
-bool StaticShapeInferer::checkDynamicInput(const ir::Operation &op)
-{
- for (auto input_idx : op.getInputs() | ir::Remove::UNDEFINED | ir::Remove::DUPLICATED)
- {
- if (_operands.at(input_idx).info().isDynamic())
- {
- return true;
- }
- }
-
- return false;
-}
-
-void StaticShapeInferer::setDynamicOutput(const ir::Operation &op)
-{
- for (auto output_idx : op.getOutputs())
- {
- _operands.at(output_idx).info().setDynamic();
- }
-}
-
-void StaticShapeInferer::handleBinaryArithmeticOp(const ir::Operation &op,
- const ir::OperandIndex lhs_idx,
- const ir::OperandIndex rhs_idx)
-{
- const auto &lhs = _operands.at(lhs_idx);
- const auto &rhs = _operands.at(rhs_idx);
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferEltwiseShape(lhs.info().shape(), rhs.info().shape());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::handleSimpleUnaryOp(const ir::Operation &op,
- const ir::OperandIndex input_idx)
-{
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // re-sizing output shape
- ir::Shape new_shape = input.info().shape();
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::dump()
-{
- auto get_shape_str = [](const ir::Shape &shape) {
- std::stringstream sstream;
- sstream << "shape : {";
- for (int i = 0; i < shape.rank(); i++)
- {
- if (i == 0)
- sstream << shape.dim(i);
- else
- sstream << " " << shape.dim(i);
- }
- sstream << "}";
- return sstream.str();
- };
-
- for (const auto &pair : _lowered_subgs)
- {
- const auto index = pair.first;
- const auto &lowered_subg = pair.second;
- VERBOSE(StaticShapeInferer) << "SubGraph #" << index.value() << std::endl;
- lowered_subg->graph().operands().iterate(
- [&](const ir::OperandIndex &ind, const ir::Operand &operand) {
- VERBOSE(StaticShapeInferer) << "Operand #" << ind.value() << ", "
- << (operand.info().isDynamic() ? "Dynamic" : "Static") << ", "
- << get_shape_str(operand.info().shape()) << std::endl;
- });
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::ArgMax &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::ArgMax::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto axis_idx{op.getInputs().at(ir::operation::ArgMax::Input::AXIS)};
- const auto &axis = _operands.at(axis_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- if (!axis.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- const auto rank = input.info().shape().rank();
- auto axis_value = axis.asScalar<int32_t>();
- axis_value = axis_value < 0 ? axis_value + rank : axis_value;
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferArgMaxShape(input.info().shape(), axis_value, rank);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::BatchMatMul &op)
-{
- const auto lhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::LHS);
- const auto rhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::RHS);
- const auto output_index = op.getOutputs().at(0);
- const auto &lhs = _operands.at(lhs_index);
- const auto &rhs = _operands.at(rhs_index);
- auto &output = _operands.at(output_index);
- auto new_shape = shape_inference::inferBatchMatMulShape(lhs.shape(), rhs.shape(), op.param());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::BCQFullyConnected &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto cluster_idx{
- op.getInputs().at(ir::operation::BCQFullyConnected::Input::WEIGHTS_CLUSTERS)};
- const auto &cluster = _operands.at(cluster_idx);
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- auto cluster_buf = reinterpret_cast<const int32_t *>(cluster.data()->base());
- assert(cluster_buf);
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferBCQFullyConnectedShape(
- input.info().shape(), cluster.info().shape(), cluster_buf);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::BCQGather &op)
-{
- const auto indices_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
- const auto &indices = _operands.at(indices_idx);
-
- const auto input_binary_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_BINARY)};
- const auto &input_binary = _operands.at(input_binary_idx);
-
- const auto cluster_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
- const auto &cluster = _operands.at(cluster_idx);
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- auto cluster_buf = reinterpret_cast<const int32_t *>(cluster.data()->base());
- assert(cluster_buf);
-
- auto rank = input_binary.shape().rank();
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferBCQGatherShape(
- indices.info().shape(), cluster.info().shape(), cluster_buf, rank, op.param());
-
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::BinaryArithmetic &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::BinaryArithmetic::Input::LHS),
- op.getInputs().at(ir::operation::BinaryArithmetic::Input::RHS));
-}
-
-void StaticShapeInferer::visit(const ir::operation::BroadcastTo &op)
-{
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- const auto shape_idx{op.getInputs().at(ir::operation::BroadcastTo::Input::SHAPE)};
- const auto &shape = _operands.at(shape_idx);
-
- if (!shape.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- // assert(shape.typeInfo().type() == ir::DataType::INT32);
- auto shape_buffer = reinterpret_cast<const int32_t *>(shape.data()->base());
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferBroadcastToShape(shape.info().shape(), shape_buffer);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Comparison &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::Comparison::Input::INPUT0),
- op.getInputs().at(ir::operation::Comparison::Input::INPUT1));
-}
-
-void StaticShapeInferer::visit(const ir::operation::Concat &op)
-{
- const auto input_count = op.getInputs().size();
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- shape_inference::Shapes input_shapes;
- for (uint32_t i = 0; i < input_count; i++)
- {
- const auto input_idx{op.getInputs().at(i)};
- const auto &input = _operands.at(input_idx);
- input_shapes.emplace_back(input.shape());
- }
-
- ir::Shape out_shape = shape_inference::inferConcatShape(input_shapes, op.param());
-
- // re-sizing output shape
- output.info().shape(out_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Conv2D &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Conv2D::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
- const auto ker_idx{op.getInputs().at(ir::operation::Conv2D::Input::KERNEL)};
- const auto &ker = _operands.at(ker_idx);
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // re-sizing output shape
- ir::Shape new_shape =
- shape_inference::inferConv2DShape(input.info().shape(), ker.info().shape(), op.param());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::ElementwiseActivation &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::ElementwiseActivation::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::ElementwiseBinary &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS),
- op.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS));
-}
-
-void StaticShapeInferer::visit(const ir::operation::ElementwiseUnary &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::ExpandDims &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::ExpandDims::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
- const auto axis_idx{op.getInputs().at(ir::operation::ExpandDims::Input::AXIS)};
- const auto &axis = _operands.at(axis_idx);
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- if (!axis.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- // even when axis is constant, output shape should be recalculated since user might call
- // nnfw_set_input_tensorinfo(input, some_new_shape)
- auto axis_buf = reinterpret_cast<const int32_t *>(axis.data()->base());
- assert(axis_buf);
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferExpandDimsShape(input.info().shape(), axis_buf[0]);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Fill &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Fill::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- if (!input.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- assert(input.typeInfo().type() == ir::DataType::INT32);
-
- auto input_buf = reinterpret_cast<const int32_t *>(input.data()->base());
- assert(input_buf);
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferFillShape(input.info().shape(), input_buf);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::FullyConnected &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::FullyConnected::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto ker_idx{op.getInputs().at(ir::operation::FullyConnected::Input::WEIGHT)};
- const auto &ker = _operands.at(ker_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
- // re-sizing output shape
- ir::Shape new_shape =
- shape_inference::inferFullyConnectedShape(input.info().shape(), ker.info().shape());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::FusedBatchNorm &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::FusedBatchNorm::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::Gather &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- const auto indices_idx{op.getInputs().at(ir::operation::Gather::Input::INDICES)};
- const auto &indices = _operands.at(indices_idx);
- const auto rank = input.info().shape().rank();
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
-
- assert(0 <= axis && axis < rank);
-
- // re-sizing output shape
- ir::Shape new_shape =
- shape_inference::inferGatherShape(input.info().shape(), indices.info().shape(), axis, rank);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::If &op)
-{
- auto &then_graph = _lowered_subgs.at(op.param().then_subg_index)->graph();
- auto &else_graph = _lowered_subgs.at(op.param().else_subg_index)->graph();
- const std::vector<ir::OperandIndex> inputs{op.getInputs().begin() + 1, op.getInputs().end()};
- const auto &outputs = op.getOutputs();
-
- // re-sizing input shapes of then subgraph
- const auto &then_inputs = then_graph.getInputs();
- assert(inputs.size() == then_inputs.size());
- for (size_t i = 0; i < inputs.size(); ++i)
- {
- auto &then_input = then_graph.operands().at(then_inputs.at(i));
- if (_operands.at(inputs.at(i)).info().isDynamic())
- {
- then_input.info().setDynamic();
- }
- else
- {
- auto new_shape = _operands.at(inputs.at(i)).info().shape();
- then_input.info().shape(new_shape);
- }
- }
-
- // re-sizing input shapes of else subgraph
- const auto &else_inputs = else_graph.getInputs();
- assert(inputs.size() == else_inputs.size());
- for (size_t i = 0; i < inputs.size(); ++i)
- {
- auto &else_input = else_graph.operands().at(else_inputs.at(i));
- if (_operands.at(inputs.at(i)).info().isDynamic())
- {
- else_input.info().setDynamic();
- }
- else
- {
- const auto &new_shape = _operands.at(inputs.at(i)).info().shape();
- else_input.info().shape(new_shape);
- }
- }
-
- // re-sizing operands of then subgraph
- StaticShapeInferer then_inferer(op.param().then_subg_index, _lowered_subgs);
- _lowered_subgs.at(op.param().then_subg_index)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- bool has_dynamic_tensor = then_inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
-
- // re-sizing operands of else subgraph
- StaticShapeInferer else_inferer(op.param().else_subg_index, _lowered_subgs);
- _lowered_subgs.at(op.param().else_subg_index)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- bool has_dynamic_tensor = else_inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
-
- // re-sizing output shapes
- const auto &then_outputs = _lowered_subgs.at(op.param().then_subg_index)->graph().getOutputs();
- const auto &else_outputs = _lowered_subgs.at(op.param().else_subg_index)->graph().getOutputs();
- assert(outputs.size() == then_outputs.size());
- assert(outputs.size() == else_outputs.size());
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- const auto &then_output = then_graph.operands().at(then_outputs.at(i));
- const auto &else_output = else_graph.operands().at(else_outputs.at(i));
- auto &output = _operands.at(outputs.at(i));
- if (!then_output.info().isDynamic() && !else_output.info().isDynamic() &&
- then_output.shape() == else_output.shape())
- {
- output.info().shape(then_output.shape());
- }
- else
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- }
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::L2Normalization &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::L2Normalization::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::LSTM &op)
-{
- const auto output_index{op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
- auto &output = _operands.at(output_index);
-
- const auto output_state_out_index{
- op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)};
-
- const auto cell_state_out_index{op.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
-
- const auto scratch_buffer_index{op.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
-
- if (output.info().isDynamic() || (_operands.exist(output_state_out_index) &&
- _operands.at(output_state_out_index).info().isDynamic()) ||
- (_operands.exist(cell_state_out_index) &&
- _operands.at(cell_state_out_index).info().isDynamic()) ||
- (_operands.exist(scratch_buffer_index) &&
- _operands.at(scratch_buffer_index).info().isDynamic()))
- return;
-
- const auto input_index{op.getInputs().at(ir::operation::LSTM::Input::INPUT)};
- const auto &input = _operands.at(input_index);
-
- const auto input_to_output_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)};
- const auto &input_to_output_weights = _operands.at(input_to_output_weights_index);
-
- const auto recurrent_to_output_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)};
- const auto &recurrent_to_output_weights = _operands.at(recurrent_to_output_weights_index);
-
- // re-sizing outputs
- const int n_batch = (input.shape().rank() == 3 && op.param().time_major) ? input.shape().dim(1)
- : input.shape().dim(0);
- const int n_cell = input_to_output_weights.shape().dim(0);
- const int n_output = recurrent_to_output_weights.shape().dim(1);
- if (input.shape().rank() == 3)
- {
- if (op.param().time_major)
- output.info().shape(ir::Shape{input.shape().dim(0), n_batch, n_output});
- else
- output.info().shape(ir::Shape{n_batch, input.shape().dim(1), n_output});
- }
- else
- {
- assert(input.shape().rank() == 2);
- output.info().shape(ir::Shape{n_batch, n_output});
- }
-
- if (_operands.exist(output_state_out_index))
- {
- auto &output_state_out = _operands.at(output_state_out_index);
- output_state_out.info().shape(ir::Shape{n_batch, n_output});
- }
-
- if (_operands.exist(cell_state_out_index))
- {
- auto &cell_state_out = _operands.at(cell_state_out_index);
- cell_state_out.info().shape(ir::Shape{n_batch, n_cell});
- }
-
- if (_operands.exist(scratch_buffer_index))
- {
- auto &scratch_buffer = _operands.at(scratch_buffer_index);
-
- const auto input_to_input_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)};
- const auto recurrent_to_input_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)};
-
- bool has_input_to_input_weights =
- _operands.at(input_to_input_weights_index).shape().dim(0) != 0 &&
- _operands.at(input_to_input_weights_index).shape().dim(1) != 0;
- bool has_recurrent_to_input_weights =
- _operands.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
- _operands.at(recurrent_to_input_weights_index).shape().dim(1) != 0;
-
- // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
- // true: no CIFG
- // false: CIFG
- bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
- if (has_cifg_param)
- {
- scratch_buffer.info().shape(ir::Shape{n_batch, n_cell * 4});
- }
- else
- {
- scratch_buffer.info().shape(ir::Shape{n_batch, n_cell * 3});
- }
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::MatrixBandPart &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::MatrixBandPart::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::OneHot &op)
-{
- const auto indice_idx{op.getInputs().at(ir::operation::OneHot::Input::INDICES)};
- const auto &indice = _operands.at(indice_idx);
- const auto depth_idx{op.getInputs().at(ir::operation::OneHot::Input::DEPTH)};
- const auto &depth = _operands.at(depth_idx);
-
- const auto axis = op.param().axis;
-
- auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- if (!depth.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- const auto *depth_buf = reinterpret_cast<const int32_t *>(depth.data()->base());
- assert(depth_buf);
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferOnehotShape(indice.info().shape(), *depth_buf, axis);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Pack &op)
-{
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- const auto rank = input.shape().rank() + 1;
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
- const auto num = op.param().num;
-
- assert(0 <= axis && axis < rank);
-
- // re-sizing output shape
- ir::Shape new_shape = shape_inference::inferPackShape(input.info().shape(), axis, rank, num);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Pad &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Pad::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto pad_idx{op.getInputs().at(ir::operation::Pad::Input::PAD)};
- const auto &pad = _operands.at(pad_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // if pad is not constant, output also becomes dynamic
- if (!pad.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- // re-sizing output shape
- const auto new_shape = shape_inference::inferPadShape(
- input.shape(), reinterpret_cast<const int32_t *>(pad.data()->base()),
- pad.shape().num_elements());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Permute &op)
-{
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _operands.at(input_idx);
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // re-sizing output shape
- // Permute is a special operation that layouts of input/output may be different on backend
- // However, it is not applied here, so input/output have the same layout of frontend. Because
- // "ExecutorFactory" would convert shape of input/output accoding to the layouts when registering
- // operand info to "TensorBuilder" after calling "StaticShapeInferer"
- const auto new_shape = input.info().shape();
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Pow &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::Pow::Input::LHS),
- op.getInputs().at(ir::operation::Pow::Input::RHS));
-}
-
-void StaticShapeInferer::visit(const ir::operation::Range &op)
-{
- const auto start_idx{op.getInputs().at(ir::operation::Range::Input::START)};
- const auto limit_idx{op.getInputs().at(ir::operation::Range::Input::LIMIT)};
- const auto delta_idx{op.getInputs().at(ir::operation::Range::Input::DELTA)};
- const auto &start_op = _operands.at(start_idx);
- const auto &limit_op = _operands.at(limit_idx);
- const auto &delta_op = _operands.at(delta_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- ir::Shape new_shape;
- if (start_op.isConstant() && limit_op.isConstant() && delta_op.isConstant())
- {
- assert(start_op.typeInfo().type() == limit_op.typeInfo().type() &&
- start_op.typeInfo().type() == delta_op.typeInfo().type());
- if (output.typeInfo().type() == ir::DataType::FLOAT32)
- {
- new_shape = shape_inference::inferRangeShape<float>(
- start_op.asScalar<float>(), limit_op.asScalar<float>(), delta_op.asScalar<float>());
- }
- else if (output.typeInfo().type() == ir::DataType::INT32)
- {
- new_shape = shape_inference::inferRangeShape<int32_t>(
- start_op.asScalar<int32_t>(), limit_op.asScalar<int32_t>(), delta_op.asScalar<int32_t>());
- }
- assert(output.shape() == new_shape);
- }
- else
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::Reduce &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto axes_idx{op.getInputs().at(ir::operation::Reduce::Input::AXES)};
- const auto &axes = _operands.at(axes_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- std::vector<int32_t> axes_vec;
- for (size_t i = 0; i < axes.shape().num_elements(); ++i)
- {
- switch (axes.typeInfo().type())
- {
- case ir::DataType::INT32:
- {
- axes_vec.emplace_back(reinterpret_cast<const int32_t *>(axes.data()->base())[i]);
- break;
- }
- case ir::DataType::INT64:
- {
- axes_vec.emplace_back(reinterpret_cast<const int64_t *>(axes.data()->base())[i]);
- break;
- }
- default:
- throw std::runtime_error("StaticShapeInferer " + op.name() + ": Not supported data type");
- break;
- }
- }
- const auto keep_dims = op.param().keep_dims;
-
- // re-sizing output shape
- ir::Shape new_shape =
- shape_inference::inferReduceShape(input.info().shape(), axes_vec, keep_dims);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Reshape &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Reshape::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // New shape is given by second input tensor
- if (op.getInputs().size() == 2)
- {
- // Let's check the second input
- const auto shape_idx{op.getInputs().at(ir::operation::Reshape::Input::SHAPE)};
- const auto &shape = _operands.at(shape_idx);
-
- if (shape.isConstant())
- {
- const auto *shape_buf = reinterpret_cast<const int32_t *>(shape.data()->base());
- assert(shape_buf);
-
- ir::Shape new_shape = shape_inference::inferReshapeShape(
- shape_buf, shape.shape().num_elements(), input.shape().num_elements());
-
- // if shape is from Const, TFLC put the shape of output into tensor
- if (new_shape != output.shape())
- {
- // change on output shape
- output.info().shape(new_shape);
- }
- }
- else
- {
- // if shape is NOT Const, set output shape to be dynamic_
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- }
- }
- // New shape is given by option
- else if (op.param().new_shape.size() != 0)
- {
- // Let's check the new_shape option
- auto shape = op.param().new_shape;
- ir::Shape new_shape = shape_inference::inferReshapeShape(shape.data(), shape.size(),
- input.shape().num_elements());
-
- if (new_shape != output.shape())
- {
- // change on output shape
- output.info().shape(new_shape);
- }
- }
- else
- {
- throw std::runtime_error("Reshape: new shape is missing");
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::ResizeBilinear &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- int32_t height_out, width_out;
- if (op.getInputs().size() == 2)
- {
- auto &size = _operands.at(op.getInputs().at(ir::operation::ResizeBilinear::Input::SIZE));
- if (!size.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
- const auto size_v = size.asVector<std::int32_t>();
- height_out = size_v[0];
- width_out = size_v[1];
- }
- else
- {
- height_out = op.param().height_out;
- width_out = op.param().width_out;
- }
-
- // Shape inferencing logic based on Params
- ir::Shape new_shape =
- shape_inference::inferResizeBilinearShape(input.shape(), height_out, width_out);
-
- // if size_op is from Const, TFLC put the shape of output into tensor
- if (new_shape != output.shape())
- {
- // change on output shape
- output.info().shape(new_shape);
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::Reverse &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Reverse::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::Select &op)
-{
- const auto input_cond_idx{op.getInputs().at(ir::operation::Select::Input::CONDITION)};
- const auto &input_cond = _operands.at(input_cond_idx);
-
- const auto input_true_idx{op.getInputs().at(ir::operation::Select::Input::INPUT_TRUE)};
- const auto &input_true = _operands.at(input_true_idx);
-
- const auto input_false_idx{op.getInputs().at(ir::operation::Select::Input::INPUT_FALSE)};
- const auto &input_false = _operands.at(input_false_idx);
-
- auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // Select output shpae
- ir::Shape new_shape = shape_inference::inferSelectShape(
- input_cond.info().shape(), input_true.info().shape(), input_false.info().shape());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Shape &op)
-{
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _operands.at(input_idx);
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // re-sizing output shape
- ir::Shape output_shape;
- output_shape.append(input.info().shape().rank());
-
- output.info().shape(output_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Slice &op)
-{
- const auto input_index{op.getInputs().at(ir::operation::Slice::Input::INPUT)};
- const auto &input = _operands.at(input_index);
- const auto begins_index{op.getInputs().at(ir::operation::Slice::Input::BEGINS)};
- const auto &begins = _operands.at(begins_index);
- const auto sizes_index{op.getInputs().at(ir::operation::Slice::Input::SIZES)};
- const auto &sizes = _operands.at(sizes_index);
- const auto output_index = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_index);
-
- // Whether input is constant or not does not affect whether output is dynamic or not
- if (!(begins.isConstant() && sizes.isConstant()))
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- auto begins_buf = reinterpret_cast<const int32_t *>(begins.data()->base());
- auto sizes_buf = reinterpret_cast<const int32_t *>(sizes.data()->base());
-
- ir::Shape new_shape =
- shape_inference::inferSliceShape(input.info().shape(), begins_buf, sizes_buf);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Softmax &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Softmax::Input::INPUT));
-}
-
-void StaticShapeInferer::visit(const ir::operation::SpaceToBatchND &op)
-{
- const auto output_index = op.getOutputs().at(0);
- const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
-
- ir::Operand &output = _operands.at(output_index);
- const auto &input = _operands.at(input_idx);
- const auto &block_shape = _operands.at(block_shape_idx);
- const auto &padding = _operands.at(padding_idx);
-
- // Whether input is constant or not does not affect whether output is dynamic or not
- if (!(block_shape.isConstant() && padding.isConstant()))
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- auto input_shape = input.info().shape();
- auto block_shape_shape = block_shape.info().shape();
- auto padding_shape = padding.info().shape();
-
- auto block_shape_data = reinterpret_cast<const int32_t *>(block_shape.data()->base());
- auto padding_data = reinterpret_cast<const int32_t *>(padding.data()->base());
-
- ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
- input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
-
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Split &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto axis_idx{op.getInputs().at(ir::operation::Split::Input::AXIS)};
- const auto &axis = _operands.at(axis_idx);
-
- auto outputs = op.getOutputs();
- if (!axis.isConstant())
- {
- for (auto output_idx : outputs)
- {
- ir::Operand &output = _operands.at(output_idx);
- output.info().setDynamic();
- }
- _return_has_dynamic_tensor = true;
- return;
- }
-
- const auto num_splits = op.param().num_splits;
-
- const auto rank = input.info().shape().rank();
- auto axis_value = axis.asScalar<int32_t>();
- axis_value = axis_value < 0 ? axis_value + rank : axis_value;
-
- assert(0 <= axis_value && axis_value < rank);
-
- ir::Shape new_shape =
- shape_inference::inferSplitShape(input.info().shape(), axis_value, num_splits);
- for (auto output_idx : outputs)
- {
- ir::Operand &output = _operands.at(output_idx);
- output.info().shape(new_shape);
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::SquaredDifference &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::SquaredDifference::Input::LHS),
- op.getInputs().at(ir::operation::SquaredDifference::Input::RHS));
-}
-
-void StaticShapeInferer::visit(const ir::operation::Squeeze &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- // Squeeze output shpae
- ir::Shape new_shape = shape_inference::inferSqueezeShape(input.info().shape(), op.param());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::StridedSlice &op)
-{
- const auto input_index{op.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
- const auto &input = _operands.at(input_index);
- const auto starts_index{op.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
- const auto &starts = _operands.at(starts_index);
- const auto ends_index{op.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
- const auto &ends = _operands.at(ends_index);
- const auto strides_index{op.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
- const auto &strides = _operands.at(strides_index);
- const auto output_index = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_index);
-
- if (!(starts.isConstant() && ends.isConstant() && strides.isConstant()))
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- const auto begin_mask = op.param().begin_mask;
- const auto end_mask = op.param().end_mask;
- const auto shrink_axis_mask = op.param().shrink_axis_mask;
- const auto rank = input.info().shape().rank();
-
- auto starts_buf = reinterpret_cast<const uint32_t *>(starts.data()->base());
- auto ends_buf = reinterpret_cast<const uint32_t *>(ends.data()->base());
- auto strides_buf = reinterpret_cast<const uint32_t *>(strides.data()->base());
-
- auto op_params = shape_inference::buildStridedSliceParams(
- starts_buf, ends_buf, strides_buf, begin_mask, end_mask, shrink_axis_mask, rank);
-
- ir::Shape new_shape =
- shape_inference::inferStridedSliceShape(input.info().shape(), op_params, rank);
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Tile &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Tile::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto multiplier_idx{op.getInputs().at(ir::operation::Tile::Input::MULTIPLES)};
- const auto &multiplier = _operands.at(multiplier_idx);
-
- const auto output_idx = op.getOutputs().at(0);
- ir::Operand &output = _operands.at(output_idx);
-
- if (!multiplier.isConstant())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- auto multiplier_buffer = reinterpret_cast<const int32_t *>(multiplier.data()->base());
- assert(multiplier_buffer);
-
- // re-sizing output shape
- auto new_shape = shape_inference::inferTileShape(input.info().shape(), multiplier_buffer,
- multiplier.shape().num_elements());
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Transpose &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Transpose::Input::INPUT)};
- const auto &input = _operands.at(input_idx);
-
- const auto perm_idx{op.getInputs().at(ir::operation::Transpose::Input::PERMUTATION)};
- const auto &perm = _operands.at(perm_idx);
-
- // perm.shape() != ir::Shape{0} means that perm is (n-1...0)
- // TODO This condition changes to perm.num_elements() == 0
- const auto is_regular_transpose = perm.shape() == ir::Shape{0};
-
- // get mutable output operand
- const auto output_idx = op.getOutputs().at(0);
- auto &output = _operands.at(output_idx);
- if (!perm.isConstant() && !is_regular_transpose)
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- return;
- }
-
- ir::Shape new_shape;
- if (is_regular_transpose)
- {
- // Call by (n-1...0)
- new_shape = shape_inference::inferTransposeShape(input.info().shape(), nullptr, 0);
- }
- else
- {
- // Check rank
- if (input.info().shape().rank() != static_cast<int>(perm.info().shape().num_elements()))
- {
- throw std::runtime_error("StaticShapeInferer failed, bad rank size: " +
- std::to_string(perm.info().shape().num_elements()));
- }
-
- // set output shape, based on input and params
- const auto perm_buf = reinterpret_cast<const int32_t *>(perm.data()->base());
- new_shape = shape_inference::inferTransposeShape(input.info().shape(), perm_buf,
- perm.shape().num_elements());
- }
- output.info().shape(new_shape);
-}
-
-void StaticShapeInferer::visit(const ir::operation::Unpack &op)
-{
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _operands.at(input_idx);
- const auto num = op.param().num;
- const auto rank = input.shape().rank();
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
-
- assert(axis < rank);
- if (axis < 0)
- {
- for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
- {
- const auto output_idx = op.getOutputs().at(out_tensor_idx);
- ir::Operand &output = _operands.at(output_idx);
- output.info().setDynamic();
- }
- _return_has_dynamic_tensor = true;
- return;
- }
-
- ir::Shape new_shape = shape_inference::inferUnpackShape(input.info().shape(), axis, rank);
-
- // re-sizing output shape
- for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
- {
- const auto output_idx = op.getOutputs().at(out_tensor_idx);
- ir::Operand &output = _operands.at(output_idx);
- output.info().shape(new_shape);
- }
-}
-
-void StaticShapeInferer::visit(const ir::operation::While &op)
-{
- auto &cond_graph = _lowered_subgs.at(op.param().cond_subg_index)->graph();
- auto &body_graph = _lowered_subgs.at(op.param().body_subg_index)->graph();
- const auto inputs = op.getInputs();
- const auto &outputs = op.getOutputs();
-
- // re-sizing input shapes of then subgraph
- const auto &cond_inputs = cond_graph.getInputs();
- assert(inputs.size() == cond_inputs.size());
- for (size_t i = 0; i < inputs.size(); ++i)
- {
- const auto &input = _operands.at(inputs.at(i));
- auto &cond_input = cond_graph.operands().at(cond_inputs.at(i));
- if (input.info().isDynamic())
- {
- cond_input.info().setDynamic();
- }
- else
- {
- auto new_shape = input.info().shape();
- cond_input.info().shape(new_shape);
- }
- }
-
- // re-sizing input shapes of body subgraph
- const auto &body_inputs = body_graph.getInputs();
- assert(cond_inputs.size() == body_inputs.size());
- for (size_t i = 0; i < cond_inputs.size(); ++i)
- {
- const auto &cond_input = cond_graph.operands().at(cond_inputs.at(i));
- auto &body_input = body_graph.operands().at(body_inputs.at(i));
- if (cond_input.info().isDynamic())
- {
- body_input.info().setDynamic();
- }
- else
- {
- const auto &new_shape = cond_input.info().shape();
- body_input.info().shape(new_shape);
- }
- }
-
- // re-sizing operands of body subgraph
- StaticShapeInferer body_inferer(op.param().body_subg_index, _lowered_subgs);
- _lowered_subgs.at(op.param().body_subg_index)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- bool has_dynamic_tensor = body_inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
-
- // Check whether while operation's shapes are predictable
- // If any of shape of body outputs and cond inputs are different, non-constant operands would be
- // set to dynamic
- bool check_unpredictable_dynamic = false;
- const auto &body_outputs = body_graph.getOutputs();
- assert(body_outputs.size() == cond_inputs.size());
- for (size_t i = 0; i < body_outputs.size(); ++i)
- {
- const auto &body_output = body_graph.operands().at(body_outputs.at(i));
- auto &cond_input = cond_graph.operands().at(cond_inputs.at(i));
- if ((cond_input.info().isDynamic() != body_output.info().isDynamic()) ||
- (cond_input.shape() != body_output.shape()))
- {
- check_unpredictable_dynamic = true;
- break;
- }
- }
-
- if (check_unpredictable_dynamic)
- {
- // Set inputs of body subgraph
- for (const auto &input_index : body_inputs)
- {
- auto &input = body_graph.operands().at(input_index);
- if (!input.isConstant())
- {
- input.info().setDynamic();
- }
- }
-
- // Set inputs of cond subgraph
- for (const auto &input_index : cond_inputs)
- {
- auto &input = cond_graph.operands().at(input_index);
- if (!input.isConstant())
- {
- input.info().setDynamic();
- }
- }
-
- // Set non-constant operands of body subgraph to dynamic
- StaticShapeInferer body_inferer(op.param().body_subg_index, _lowered_subgs);
- _lowered_subgs.at(op.param().body_subg_index)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- bool has_dynamic_tensor = body_inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
- }
-
- // re-sizing operands of cond subgraph
- // If check_unpredictable_dynamic is true, non-constant operands of cond subgraph would be set to
- // dynamic
- StaticShapeInferer cond_inferer(op.param().cond_subg_index, _lowered_subgs);
- _lowered_subgs.at(op.param().cond_subg_index)
- ->iterateTopolOpSeqs([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- bool has_dynamic_tensor = cond_inferer.infer(op_seq);
- op_seq.has_dynamic_tensor(has_dynamic_tensor);
- });
-
- // re-sizing outputs of while operation
- // If check_unpredictable_dynamic is true, outputs of while operation would be set to dynamic
- assert(cond_inputs.size() == outputs.size());
- for (size_t i = 0; i < cond_inputs.size(); ++i)
- {
- const auto &cond_input = cond_graph.operands().at(cond_inputs.at(i));
- auto &output = _operands.at(outputs.at(i));
- if (cond_input.info().isDynamic())
- {
- output.info().setDynamic();
- _return_has_dynamic_tensor = true;
- }
- else
- {
- const auto new_shape = cond_input.info().shape();
- output.info().shape(new_shape);
- }
- }
-}
-
-} // namespace compiler
-
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/TensorBuilders.h b/runtime/onert/core/src/compiler/TensorBuilders.h
deleted file mode 100644
index 3b0360b4b..000000000
--- a/runtime/onert/core/src/compiler/TensorBuilders.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_TENSOR_BUILDERS_H__
-#define __ONERT_COMPILER_TENSOR_BUILDERS_H__
-
-#include <unordered_set>
-#include <memory>
-#include "backend/BackendContext.h"
-#include "backend/Backend.h"
-#include "backend/controlflow/Config.h"
-#include "backend/controlflow/TensorBuilder.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class TensorBuilders
-{
-public:
- TensorBuilders() = default;
-
- TensorBuilders(const onert::backend::BackendContexts &backend_contexts, bool include_controlflow)
- {
- for (const auto &e : backend_contexts)
- {
- if (e.first->config()->id() == backend::controlflow::Config::ID)
- {
- _cf_tensor_builder = std::dynamic_pointer_cast<backend::controlflow::TensorBuilder>(
- e.second->tensor_builder);
- if (include_controlflow)
- _tensor_builders.insert(e.second->tensor_builder);
- }
- else
- {
- _tensor_builders.insert(e.second->tensor_builder);
- }
- }
- }
-
- std::unordered_set<std::shared_ptr<onert::backend::ITensorBuilder>>::const_iterator begin() const
- {
- return _tensor_builders.cbegin();
- }
- std::unordered_set<std::shared_ptr<onert::backend::ITensorBuilder>>::const_iterator end() const
- {
- return _tensor_builders.cend();
- }
-
- std::shared_ptr<backend::controlflow::TensorBuilder> getControlflowTensorBuilder() const
- {
- return _cf_tensor_builder;
- }
-
-private:
- std::unordered_set<std::shared_ptr<backend::ITensorBuilder>> _tensor_builders;
- std::shared_ptr<backend::controlflow::TensorBuilder> _cf_tensor_builder;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_TENSOR_BUILDERS_H__
diff --git a/runtime/onert/core/src/compiler/TensorRegistries.h b/runtime/onert/core/src/compiler/TensorRegistries.h
deleted file mode 100644
index e42225cbf..000000000
--- a/runtime/onert/core/src/compiler/TensorRegistries.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_TENSOR_REGISTRIES_H__
-#define __ONERT_COMPILER_TENSOR_REGISTRIES_H__
-
-#include <unordered_set>
-#include <memory>
-#include "backend/BackendContext.h"
-#include "backend/Backend.h"
-#include "backend/controlflow/Config.h"
-#include "backend/controlflow/TensorBuilder.h"
-#include "backend/controlflow/TensorRegistry.h"
-
-namespace onert
-{
-namespace compiler
-{
-
-class TensorRegistries
-{
-public:
- TensorRegistries() = default;
-
- TensorRegistries(const onert::backend::BackendContexts &backend_contexts,
- bool include_controlflow)
- {
- for (const auto &e : backend_contexts)
- {
- auto tensor_reg = e.second->tensor_registry;
- if (e.first->config()->id() == backend::controlflow::Config::ID)
- {
- _cf_tensor_reg =
- std::dynamic_pointer_cast<backend::controlflow::TensorRegistry>(tensor_reg);
- if (include_controlflow)
- _tensor_regs.insert(tensor_reg);
- }
- else
- {
- _tensor_regs.insert(tensor_reg);
- }
- }
- }
-
- std::unordered_set<std::shared_ptr<onert::backend::ITensorRegistry>>::const_iterator begin() const
- {
- return _tensor_regs.cbegin();
- }
- std::unordered_set<std::shared_ptr<onert::backend::ITensorRegistry>>::const_iterator end() const
- {
- return _tensor_regs.cend();
- }
-
- std::shared_ptr<backend::controlflow::TensorRegistry> getControlflowTensorRegistry() const
- {
- return _cf_tensor_reg;
- }
-
- backend::ITensor *getITensor(ir::OperandIndex ind) const
- {
- for (auto &tensor_reg : _tensor_regs)
- {
- auto tensor = tensor_reg->getITensor(ind);
- if (tensor)
- return tensor;
- }
- return nullptr;
- }
-
-private:
- std::unordered_set<std::shared_ptr<backend::ITensorRegistry>> _tensor_regs;
- std::shared_ptr<backend::controlflow::TensorRegistry> _cf_tensor_reg;
-};
-
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_TENSOR_REGISTRIES_H__
diff --git a/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.cc b/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.cc
deleted file mode 100644
index ef6240894..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantInsertionPass.h"
-
-#include "backend/Backend.h"
-#include <ir/Graph.h>
-#include <util/Utils.h>
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void ConstantInsertionPass::callback(const ir::OperationIndex &node_index, ir::Operation &node)
-{
- const auto &op_sequence_index = _lowered_graph.op_seqs().getOperation(node_index);
- const auto op_seq_lower_info = _lowered_graph.getLowerInfo(op_sequence_index);
- const auto backend = op_seq_lower_info->backend();
- const auto layout = op_seq_lower_info->layout();
- const auto factor = ir::operand::PermuteFactor{backend, layout};
-
- for (const auto input : node.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- auto &object = _graph.operands().at(input);
-
- if (object.isConstant())
- {
- const auto key = ReplaceKey{input, factor};
- if (_replace_operands_map.count(key) == 0)
- {
- ir::Operand new_object(object);
- new_object.unsetDef();
- // TODO Remove const_case
- const_cast<ir::OperationIndexSet &>(new_object.getUses()).clear();
- const auto new_index = _graph.operands().emplace(new_object);
- _replace_operands_map[key] = new_index;
- }
-
- const auto replaced_input = _replace_operands_map[key];
- // Update op_seq
- if (_lowered_graph.op_seqs().at(op_sequence_index).getInputs().contains(input))
- {
- // All inputs of op_seq have the same PermuteFactor because those inputs are inputs of first
- // operation
- _lowered_graph.op_seqs().at(op_sequence_index).replaceInputs(input, replaced_input);
- }
-
- // Update the same inputs of a node at once because inputs of an operation have the same
- // PermuteFactor
- node.replaceInputs(input, replaced_input);
-
- // Update operand
- auto &replaced_object = _graph.operands().at(replaced_input);
- replaced_object.insertUse(node_index);
-
- // Remove this node from uses of origin operand
- // Constant operand has no def.
- assert(!object.getDef().valid());
- object.removeUse(node_index);
-
- // Remove origin operand
- if (object.getUses().size() == 0)
- _graph.removeOperand(input);
- }
- }
-
- // Now this runtime does not support the node making output as constant
- for (const auto &output : node.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- UNUSED_RELEASE(output);
- assert(!_graph.operands().at(output).isConstant());
- }
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.h b/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.h
deleted file mode 100644
index 052883c92..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantInsertionPass.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_CONSTANT_INSERTION_PASS_H__
-#define __ONERT_COMPILER_PASS_CONSTANT_INSERTION_PASS_H__
-
-#include <ir/operand/PermuteFactor.h>
-#include <ir/Index.h>
-#include "LoweredOperationPass.h"
-#include <unordered_map>
-#include <utility>
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class ConstantInsertionPass : public LoweredOperationPass
-{
-public:
- using LoweredOperationPass::LoweredOperationPass;
-
-public:
- std::string id() final { return "ConstantInsertionPass"; }
-
-public:
- void callback(const ir::OperationIndex &index, ir::Operation &node) final;
-
-private:
- struct ReplaceKey
- {
- ir::OperandIndex index;
- ir::operand::PermuteFactor factor;
-
- bool operator==(const ReplaceKey &other) const
- {
- return index == other.index && factor == other.factor;
- }
- };
-
- /**
- * @brief Structure that provides hash function of ReplaceKey
- */
- struct KeyHasher
- {
- std::size_t operator()(const ReplaceKey &key) const noexcept
- {
- using std::hash;
- return hash<ir::OperandIndex>()(key.index) ^
- (hash<ir::operand::PermuteFactor>()(key.factor) << 1);
- }
- };
-
- std::unordered_map<ReplaceKey, ir::OperandIndex, KeyHasher> _replace_operands_map;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_CONSTANT_INSERTION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.cc b/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.cc
deleted file mode 100644
index 1c1dbe0ee..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantLoweringPass.h"
-
-#include "backend/Backend.h"
-#include <ir/Graph.h>
-#include <ir/operand/PermuteFactor.h>
-#include <util/Utils.h>
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void ConstantLoweringPass::callback(const ir::OperationIndex &node_index, ir::Operation &node)
-{
- const auto &op_sequence_index = _lowered_graph.op_seqs().getOperation(node_index);
- const auto op_seq_lower_info = _lowered_graph.getLowerInfo(op_sequence_index);
- const auto backend = op_seq_lower_info->backend();
- const auto layout = op_seq_lower_info->layout();
- const auto factor = ir::operand::PermuteFactor{backend, layout};
-
- // Now this runtime does not support the node making output of operation as constant
- for (const auto input : node.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED)
- {
- auto &object = _graph.operands().at(input);
- if (object.isConstant())
- {
- // All constant operand are already assinged at each backend by ContantInsertionPass. So a
- // constant has `def` and `use` as the same PermuteFactor
- _lowered_graph.setLowerInfo(input, std::make_unique<ir::operand::LowerInfo>());
- _lowered_graph.getLowerInfo(input)->addDefPermuteFactor(factor);
- _lowered_graph.getLowerInfo(input)->addUsePermuteFactor(factor);
- }
- }
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.h b/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.h
deleted file mode 100644
index e17d776d1..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantLoweringPass.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_CONSTANT_LOWERING_PASS_H__
-#define __ONERT_COMPILER_PASS_CONSTANT_LOWERING_PASS_H__
-
-#include <ir/Index.h>
-#include "LoweredOperationPass.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class ConstantLoweringPass : public LoweredOperationPass
-{
-public:
- using LoweredOperationPass::LoweredOperationPass;
-
-public:
- std::string id() final { return "ConstantLoweringPass"; }
-
-public:
- void callback(const ir::OperationIndex &index, ir::Operation &node) final;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_CONSTANT_LOWERING_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/ConstantOutputPass.cc b/runtime/onert/core/src/compiler/pass/ConstantOutputPass.cc
deleted file mode 100644
index c176f6ffb..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantOutputPass.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantOutputPass.h"
-
-#include "ir/Graph.h"
-#include "ir/operation/Permute.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void ConstantOutputPass::callback(const ir::OperandIndex &ind, ir::Operand &obj)
-{
- if (!_graph.getOutputs().contains(ind) || !obj.isConstant())
- return;
-
- auto permute_input_ind = _graph.addOperand(obj.shape(), obj.typeInfo());
- auto &permute_input_obj = _graph.operands().at(permute_input_ind);
-
- // Move the const data
- permute_input_obj.data(obj.shareData());
- obj.releaseData();
- obj.info().setAsNonConst();
-
- using ir::operation::Permute;
- auto permute_obj = std::make_unique<Permute>(permute_input_ind, ind, Permute::Type::COPY);
- auto permute_ind = _graph.operations().push(std::move(permute_obj));
-
- permute_input_obj.insertUse(permute_ind);
- obj.setDef(permute_ind);
-
- // Make the operations that uses this operand to use the generated operand
- auto orig_uses = obj.getUses();
- for (auto use : orig_uses)
- {
- permute_input_obj.insertUse(use);
- obj.removeUse(use);
- _graph.operations().at(use).replaceInputs(ind, permute_input_ind);
- }
-
- VERBOSE(ConstantOutputPass) << "Permute Op inserted for a constant ouput, node index : "
- << permute_ind << std::endl;
- VERBOSE(ConstantOutputPass) << " - Input (inserted) Operand : " << permute_input_ind
- << std::endl;
- VERBOSE(ConstantOutputPass) << " - Output(original) Operand : " << ind << std::endl;
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/ConstantOutputPass.h b/runtime/onert/core/src/compiler/pass/ConstantOutputPass.h
deleted file mode 100644
index 193dd3a68..000000000
--- a/runtime/onert/core/src/compiler/pass/ConstantOutputPass.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_CONSTANT_OUTPUT_PASS_H__
-#define __ONERT_COMPILER_PASS_CONSTANT_OUTPUT_PASS_H__
-
-#include "OperandPass.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-/**
- * @brief Pass to specially handle constant model outputs
- *
- * As an output buffer is given right before an execution but constant initialization is done at
- * prepare phase, the current runtime structure cannot handle when an output is constant.
- * To resolve this problem, this pass inserts a Permute layer with a const input and make the model
- * output tensor to be its output.
- *
- * e.g.)
- *
- * ((Const Output))
- *
- * becomes
- *
- * (Const) -> [Permute] -> ((Output))
- *
- * Note that this is a mandatory pass for Graph.
- */
-class ConstantOutputPass : public OperandPass
-{
-public:
- using OperandPass::OperandPass;
-
-public:
- std::string id() final { return "ConstantOutputPass"; }
-
-public:
- void callback(const ir::OperandIndex &i, ir::Operand &o) final;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_CONSTANT_INSERTION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/LoweredOperandPass.h b/runtime/onert/core/src/compiler/pass/LoweredOperandPass.h
deleted file mode 100644
index 0c5f7d745..000000000
--- a/runtime/onert/core/src/compiler/pass/LoweredOperandPass.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_PASS_LOWERED_OPERAND_PASS_H__
-#define __ONERT_IR_PASS_LOWERED_OPERAND_PASS_H__
-
-#include "OperandPass.h"
-#include "compiler/LoweredGraph.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class LoweredOperandPass : public OperandPass
-{
-public:
- LoweredOperandPass(compiler::LoweredGraph &lowered_graph)
- : OperandPass{lowered_graph.graph()}, _lowered_graph{lowered_graph}
- {
- // DO NOTHING
- }
-
- virtual ~LoweredOperandPass() = default;
-
- std::string id() override = 0;
- void callback(const ir::OperandIndex &i, ir::Operand &o) override = 0;
-
-protected:
- compiler::LoweredGraph &_lowered_graph;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_IR_PASS_LOWERED_OPERAND_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/LoweredOperationPass.h b/runtime/onert/core/src/compiler/pass/LoweredOperationPass.h
deleted file mode 100644
index 5c8569be2..000000000
--- a/runtime/onert/core/src/compiler/pass/LoweredOperationPass.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_PASS_LOWERED_OPERATION_PASS_H__
-#define __ONERT_IR_PASS_LOWERED_OPERATION_PASS_H__
-
-#include "OperationPass.h"
-#include "compiler/LoweredGraph.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class LoweredOperationPass : public OperationPass
-{
-public:
- LoweredOperationPass(LoweredGraph &lowered_graph)
- : OperationPass{lowered_graph.graph()}, _lowered_graph{lowered_graph}
- {
- // DO NOTHING
- }
-
- virtual ~LoweredOperationPass() = default;
-
- std::string id() override = 0;
- void callback(const ir::OperationIndex &i, ir::Operation &o) override = 0;
-
-protected:
- LoweredGraph &_lowered_graph;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_IR_PASS_LOWERED_OPERATION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/OddOutputPass.cc b/runtime/onert/core/src/compiler/pass/OddOutputPass.cc
deleted file mode 100644
index f50fae0d3..000000000
--- a/runtime/onert/core/src/compiler/pass/OddOutputPass.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OddOutputPass.h"
-
-#include "ir/Graph.h"
-#include "ir/operation/Permute.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void OddOutputPass::run()
-{
- auto &outputs = _graph.getOutputs();
-
- VERBOSE(OddOutputPass) << "Case 1 : An operand which is a model output and a model input"
- << std::endl;
- for (auto &ind : outputs)
- {
- if (_graph.getInputs().contains(ind))
- {
- auto permute_output_ind = insertPermute(ind);
- // Update the output to be newly added operand
- _graph.getOutputs().replace(ind, permute_output_ind);
- }
- }
-
- VERBOSE(OddOutputPass) << "Case 2 : Two or more duplicated outputs" << std::endl;
- std::unordered_set<ir::OperandIndex> occurence;
- for (auto &ind : outputs)
- {
- auto &obj = _graph.operands().at(ind);
- if (occurence.count(ind) == 0)
- {
- occurence.insert(ind);
- continue;
- }
-
- // Panic when it is const, it must have been handled earlier in another pass
- UNUSED_RELEASE(obj);
- assert(!obj.isConstant());
-
- auto permute_output_ind = insertPermute(ind);
- ind = permute_output_ind; // Replace output index to fix output duplication
- }
-}
-
-ir::OperandIndex OddOutputPass::insertPermute(ir::OperandIndex ind)
-{
- auto &obj = _graph.operands().at(ind);
- auto output_ind = _graph.addOperand(obj.shape(), obj.typeInfo());
- auto &output_obj = _graph.operands().at(output_ind);
-
- using ir::operation::Permute;
- auto permute_obj = std::make_unique<Permute>(ind, output_ind, Permute::Type::COPY);
- auto permute_ind = _graph.operations().push(std::move(permute_obj));
-
- output_obj.setDef(permute_ind);
- obj.insertUse(permute_ind);
-
- VERBOSE(OddOutputPass) << "Permute Op inserted for a constant output, node index : "
- << permute_ind << std::endl;
- VERBOSE(OddOutputPass) << " - Input (original) Operand : " << ind << std::endl;
- VERBOSE(OddOutputPass) << " - Output(inserted) Operand : " << output_ind << std::endl;
-
- return output_ind;
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/OddOutputPass.h b/runtime/onert/core/src/compiler/pass/OddOutputPass.h
deleted file mode 100644
index 2accbac60..000000000
--- a/runtime/onert/core/src/compiler/pass/OddOutputPass.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_ODD_OUTPUT_PASS_H__
-#define __ONERT_COMPILER_PASS_ODD_OUTPUT_PASS_H__
-
-#include <unordered_set>
-
-#include "Pass.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-/**
- * @brief Pass to specially handle odd outputs in a subgraph
- *
- * Runtime Graph IR requires every input or output must have distinct tensor index, this is onert's
- * restriction. However we allow duplication of indices in the models(or API). So we should
- * transform the graph after model-loading.
- *
- * This is necessary since our API lets users to set different buffers for each input and output so
- * it is unavoidable that we must copy the value at runtime.
- *
- * Note that this is a mandatory pass for Graph.
- *
- * Case 1 : An operand which is a model output and a model input
- *
- * Create an operand and insert a Permute(copy) op between them. And change the output to be the
- * newly generated operand.
- *
- * e.g.)
- *
- * ```
- * ((#0 Input0 and also Output0))
- * becomes
- * ((#0 Input0)) -> [#0 Permute] -> ((#1 Output0))
- * ```
- *
- * Case 2 : Two or more duplicated outputs
- *
- * Do the same with Case 1, but between two outputs of the same tensor index.
- *
- * e.g.)
- *
- * ```
- * ((#0 Input0)) -> [#0 Some Operation] -> ((#1 Output0 and also Output1))
- * becomes
- * ((#0 Input0)) -> [#0 Some Operation] -> ((#1 Output0)) [#1 Permute] -> ((#2 Output1))
- * ```
- *
- */
-class OddOutputPass : public Pass
-{
-public:
- using Pass::Pass;
-
-public:
- std::string id() final { return "OddOutputPass"; }
-
-public:
- void run() override;
-
-private:
- ir::OperandIndex insertPermute(ir::OperandIndex input);
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_ODD_OUTPUT_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/OperandPass.cc b/runtime/onert/core/src/compiler/pass/OperandPass.cc
deleted file mode 100644
index 50c001c30..000000000
--- a/runtime/onert/core/src/compiler/pass/OperandPass.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperandPass.h"
-
-#include "ir/Graph.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void OperandPass::run()
-{
- _graph.operands().iterate(
- [&](const ir::OperandIndex &index, ir::Operand &object) { callback(index, object); });
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/OperandPass.h b/runtime/onert/core/src/compiler/pass/OperandPass.h
deleted file mode 100644
index b094879c5..000000000
--- a/runtime/onert/core/src/compiler/pass/OperandPass.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_OPERAND_PASS_H__
-#define __ONERT_COMPILER_PASS_OPERAND_PASS_H__
-
-#include "Pass.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-class Operand;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class OperandPass : public Pass
-{
-public:
- using Pass::Pass;
- virtual ~OperandPass() = default;
-
-public:
- std::string id() override = 0;
- void run() override final;
- virtual void callback(const ir::OperandIndex &i, ir::Operand &o) = 0;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_OPERAND_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/OperationPass.cc b/runtime/onert/core/src/compiler/pass/OperationPass.cc
deleted file mode 100644
index d7a55cb22..000000000
--- a/runtime/onert/core/src/compiler/pass/OperationPass.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationPass.h"
-
-#include "ir/Index.h"
-#include "ir/Operation.h"
-#include "ir/Graph.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void OperationPass::run()
-{
- _graph.operations().iterate(
- [&](const ir::OperationIndex &index, ir::Operation &node) { callback(index, node); });
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/OperationPass.h b/runtime/onert/core/src/compiler/pass/OperationPass.h
deleted file mode 100644
index ac4d818a2..000000000
--- a/runtime/onert/core/src/compiler/pass/OperationPass.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file OperationPass.h
- * @brief This file contains OperationPass class
- */
-
-#ifndef __ONERT_COMPILER_PASS_OPERATION_PASS_H__
-#define __ONERT_COMPILER_PASS_OPERATION_PASS_H__
-
-#include "Pass.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace ir
-{
-class Operation;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-/**
- * @brief Class to iterate over operations and calls callback() method
- */
-class OperationPass : public Pass
-{
-public:
- using Pass::Pass;
- virtual ~OperationPass() = default;
-
-public:
- /**
- * @brief Returns string id for this pass. Same with class name.
- *
- * @return string id
- */
- std::string id() override = 0;
-
- /**
- * @brief Be called for all nodes of graph.
- * @param index is the index of a node in graph
- * @param node is the node in graph
- */
- virtual void callback(const ir::OperationIndex &index, ir::Operation &node) = 0;
-
- /**
- * @brief Run the pass
- */
- void run() final;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_OPERATION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/Pass.h b/runtime/onert/core/src/compiler/pass/Pass.h
deleted file mode 100644
index 3f356c337..000000000
--- a/runtime/onert/core/src/compiler/pass/Pass.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_PASS_H__
-#define __ONERT_COMPILER_PASS_PASS_H__
-
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-class Graph;
-} // namespace compiler
-} // namespace onert
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class Pass
-{
-public:
- Pass(ir::Graph &graph) : _graph{graph} {}
- virtual ~Pass() = default;
-
-public:
- virtual std::string id() = 0;
- virtual void run() = 0;
-
-protected:
- ir::Graph &_graph;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/PassRunner.cc b/runtime/onert/core/src/compiler/pass/PassRunner.cc
deleted file mode 100644
index 2a058c8ac..000000000
--- a/runtime/onert/core/src/compiler/pass/PassRunner.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PassRunner.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-PassRunner &PassRunner::append(std::unique_ptr<Pass> pass)
-{
- _passes.emplace_back(std::move(pass));
- return *this;
-}
-
-void PassRunner::run()
-{
- for (auto &pass : _passes)
- {
- VERBOSE(PassRunner) << "Start running '" << pass->id() << "'" << std::endl;
- pass->run();
- VERBOSE(PassRunner) << "Finished running '" << pass->id() << "'" << std::endl;
- // TODO Dump graph(LowerInfo, OpSequence, ...)?
- }
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/PassRunner.h b/runtime/onert/core/src/compiler/pass/PassRunner.h
deleted file mode 100644
index a43c83f89..000000000
--- a/runtime/onert/core/src/compiler/pass/PassRunner.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_PASS_RUNNER_H__
-#define __ONERT_COMPILER_PASS_PASS_RUNNER_H__
-
-#include <initializer_list>
-#include <memory>
-#include <vector>
-
-#include "Pass.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-/**
- * @brief Composite passes with logging
- */
-class PassRunner
-{
-public:
- PassRunner() = default;
- PassRunner &append(std::unique_ptr<Pass> pass);
-
- void run();
-
-private:
- std::vector<std::unique_ptr<Pass>> _passes;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_PASS_RUNNER_H__
diff --git a/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.cc b/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.cc
deleted file mode 100644
index 504f1b995..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PermutationEliminationPass.h"
-#include "backend/controlflow/Config.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void PermutationEliminationPass::callback(const ir::OperationIndex &ind, ir::Operation &node)
-{
- _op_ind = ind;
- node.accept(*this);
-};
-
-void PermutationEliminationPass::visit(const ir::operation::Permute &node)
-{
- auto in_operand = node.getInputs().at(0);
- auto out_operand = node.getOutputs().at(0);
-
- // Check if two tensors are both portable if not, we can't eliminate the node
- {
- auto in_def_factor = _lowered_graph.getLowerInfo(in_operand)->def_factors().getOnlyElement();
- auto out_def_factor = _lowered_graph.getLowerInfo(out_operand)->def_factors().getOnlyElement();
-
- auto in_config = in_def_factor.backend()->config();
- auto out_config = out_def_factor.backend()->config();
-
- // FIXME Supporting dynamic tensor does not exactly mean those are portable.
- // It may need to have another config option for checking if each uses `IPortableTensor`.
- if (!(in_config->supportDynamicTensor() && out_config->supportDynamicTensor()))
- return;
- }
-
- if (_graph.getOutputs().contains(out_operand))
- {
- // If the input is a const, we cannot remove it since we cannot put the constant data in the
- // output buffer during prepare phase.
- auto permute_input = node.getInputs().at(0);
- if (_graph.operands().at(permute_input).isConstant())
- return;
- // If the input is a model input, we cannot remove it since our API lets users to set different
- // buffers for inputs and outputs even though one tensor is both at the same time.
- auto permute_output = node.getOutputs().at(0);
- if (_graph.getInputs().contains(permute_input) && _graph.getOutputs().contains(permute_output))
- return;
- // Likewise, if copying between outputs to outputs, keep it.
- if (_graph.getOutputs().contains(permute_input) && _graph.getOutputs().contains(permute_output))
- return;
-
- // Exceptional case : When the output operand is a model output
- // In this case we keep the output and remove the input
-
- auto &out_operand_obj = _graph.operands().at(out_operand);
- assert(out_operand_obj.getDef() == _op_ind);
- out_operand_obj.unsetDef();
- _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- if (!op_seq.getOutputs().contains(in_operand))
- return;
-
- // Update OpSequence/ir::Operation edges and ir::Operand edges
- op_seq.replaceOutputs(in_operand, out_operand);
- for (auto op : op_seq.operations())
- {
- auto &operation_obj = _graph.operations().at(op);
- if (operation_obj.getOutputs().contains(in_operand))
- {
- operation_obj.replaceOutputs(in_operand, out_operand);
- out_operand_obj.setDef(op);
- }
- }
- });
-
- // Remove Permute operation, enclosing OpSequence and the operand
- {
- _graph.removeOperand(in_operand);
-
- auto op_seq_ind = _lowered_graph.op_seqs().getOperation(_op_ind);
- // Assumes enclosing OpSequence contatins just this Permute operation
- assert(_lowered_graph.op_seqs().at(op_seq_ind).size() == 1);
- _lowered_graph.op_seqs().remove(op_seq_ind);
- _graph.operations().remove(_op_ind);
- }
-
- _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- if (!op_seq.getInputs().contains(in_operand))
- return;
-
- op_seq.replaceInputs(in_operand, out_operand);
- for (auto op : op_seq.operations())
- {
- auto &operation_obj = _graph.operations().at(op);
- if (operation_obj.getInputs().contains(in_operand))
- {
- operation_obj.replaceInputs(in_operand, out_operand);
- out_operand_obj.insertUse(op);
- }
- }
- });
-
- VERBOSE(removePermute) << "Permute Op removed, node index : " << _op_ind << std::endl;
- VERBOSE(removePermute) << " - Input (removed) ir::Operand : " << in_operand << std::endl;
- VERBOSE(removePermute) << " - Output(kept) ir::Operand : " << out_operand << std::endl;
- }
- else
- {
- // Otherwise keep the input and remove the output
-
- auto &in_operand_obj = _graph.operands().at(in_operand);
- in_operand_obj.removeUse(_op_ind);
-
- // Make OpSequences(that use the output) use the input
- _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) {
- if (!op_seq.getInputs().contains(out_operand))
- return;
-
- op_seq.replaceInputs(out_operand, in_operand);
- for (auto op : op_seq.operations())
- {
- auto &operation_obj = _graph.operations().at(op);
- if (operation_obj.getInputs().contains(out_operand))
- {
- operation_obj.replaceInputs(out_operand, in_operand);
- in_operand_obj.insertUse(op);
- }
- }
- });
-
- // Remove Permute operation, enclosing OpSequence and the operand
- {
- _graph.removeOperand(out_operand);
-
- auto op_seq_ind = _lowered_graph.op_seqs().getOperation(_op_ind);
- // Assumes enclosing OpSequence contatins just this Permute operation
- assert(_lowered_graph.op_seqs().at(op_seq_ind).size() == 1);
- _lowered_graph.op_seqs().remove(op_seq_ind);
- _graph.operations().remove(_op_ind);
- }
-
- VERBOSE(removePermute) << "Permute Op removed, node index : " << _op_ind << std::endl;
- VERBOSE(removePermute) << " - Input (kept) ir::Operand : " << in_operand << std::endl;
- VERBOSE(removePermute) << " - Output(removed) ir::Operand : " << out_operand << std::endl;
- }
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.h b/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.h
deleted file mode 100644
index 29daf1a82..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationEliminationPass.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_PERMUTATION_ELIMINATION_PASS_H__
-#define __ONERT_COMPILER_PASS_PERMUTATION_ELIMINATION_PASS_H__
-
-#include "ir/OperationVisitor.h"
-#include "LoweredOperationPass.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-/**
- * @brief An optimization pass that removes Permute operations if possible
- *
- * There may be some Permute operations that are inserted by PermutationInsertionPass or other
- * passes. This pass checks all Permute operations and eliminates them if Permute in/out tensors
- * are compatible and layouts match.
- *
- * Permute input tensor is kept and the output is removed for all the cases, except model outputs.
- * As all output tensors have to be controlflow backend, so the output is kept.
- *
- * @note This is an optimization pass which means that everything should work fine even if this pass
- * was skipped.
- */
-class PermutationEliminationPass : public LoweredOperationPass, public ir::OperationVisitor
-{
-public:
- using LoweredOperationPass::LoweredOperationPass;
-
-public:
- std::string id() final { return "PermutationEliminationPass"; }
-
-public:
- void callback(const ir::OperationIndex &i, ir::Operation &n) final;
-
-private:
- void visit(const ir::operation::Permute &) final;
-
-private:
- ir::OperationIndex _op_ind;
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_PERMUTATION_ELIMINATION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc
deleted file mode 100644
index c83a72ada..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PermutationInsertionPass.h"
-
-#include <cassert>
-#include <utility>
-#include <unordered_map>
-
-#include "backend/controlflow/Config.h"
-#include "ir/Operand.h"
-#include "ir/operation/LowerInfo.h"
-#include "ir/Graph.h"
-#include "backend/IConfig.h"
-#include "util/logging.h"
-#include <memory>
-#include "ir/operation/Permute.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Operand &object)
-{
- auto &&operand_li = _lowered_graph.getLowerInfo(index);
- assert(operand_li);
-
- // NOTE Later, constants also will have Def
- // Ignore constants
- if (operand_li->def_factors().size() == 0)
- {
- return;
- }
-
- std::list<ir::OperationIndex> permute_indexes;
-
- // Build a map for all necessary type of operands
- std::unordered_map<ir::operand::PermuteFactor, ir::OperandIndex> factor_to_index;
- {
- assert(operand_li->def_factors().size() == 1);
- for (auto factor : operand_li->def_factors())
- {
- factor_to_index.emplace(factor, index);
- }
-
- auto insert_set = operand_li->use_factors() - operand_li->def_factors();
- for (auto factor : insert_set)
- {
- const auto permute_operation_index = insertPermute(index, factor);
- permute_indexes.push_back(permute_operation_index);
- const auto &permute_operation = _graph.operations().at(permute_operation_index);
- const auto permuted_operand_index = permute_operation.getOutputs().at(0);
- factor_to_index.emplace(factor, permuted_operand_index);
- }
- }
-
- // Update operations' input that uses this operand
- {
- std::list<ir::OperationIndex> remove_list;
-
- auto uses = object.getUses();
- for (auto use : uses)
- {
- // If permute operation, ignore it
- if (std::find(permute_indexes.begin(), permute_indexes.end(), use) != permute_indexes.end())
- continue;
-
- auto &operation = _graph.operations().at(use);
- assert(_lowered_graph.op_seqs().containsOperation(use));
- auto op_seq_index = _lowered_graph.op_seqs().getOperation(use);
- auto op_seq_li = _lowered_graph.getLowerInfo(op_seq_index);
- assert(op_seq_li);
- const auto op_seq_layout = op_seq_li->layout();
- const backend::Backend *backend = op_seq_li->backend();
- assert(backend);
- auto use_node_inputs = operation.getInputs();
- assert(use_node_inputs.contains(index));
-
- auto new_index = factor_to_index.at({backend, op_seq_layout});
- if (index != new_index)
- {
- // Update from op_seq
- // Replace the same inputs of an OpSequence at once for the following reasons:
- // 1. An OpSequence's inputs are the same inputs of first operation
- // 2. An OpSequence may have inputs as the same operand (2 or more).
- // 3. The same inputs of OpSequence have the same PermuteFactor.
- _lowered_graph.op_seqs().at(op_seq_index).replaceInputs(index, new_index);
-
- // Update from operation
- // Replace the same inputs of an operation at once for the following reasons:
- // No. 2 and 3 above
- operation.replaceInputs(index, new_index);
-
- // Update from operand
- remove_list.push_back(
- use); // Removal should be done in another loop since we are in the loop
- _graph.operands().at(new_index).insertUse(use);
- }
- }
-
- for (auto &operation : remove_list)
- {
- object.removeUse(operation);
- }
- }
-}
-
-ir::OperationIndex PermutationInsertionPass::insertPermute(const ir::OperandIndex &operand_index,
- const ir::operand::PermuteFactor &factor)
-{
- assert(!_graph.isBuildingPhase());
-
- auto &operand = _graph.operands().at(operand_index);
-
- // Generate output operand and permute operation
- auto out_operand_index = _graph.addOperand(operand.shape(), operand.typeInfo());
- // change model output if operand_index is model output index
- auto &model_outputs = _graph.getOutputs();
- if (model_outputs.contains(operand_index))
- {
- model_outputs.replace(operand_index, out_operand_index);
- }
-
- // Find Permute information
- auto input_factor = _lowered_graph.getLowerInfo(operand_index)->def_factors().getOnlyElement();
- auto input_backend = input_factor.backend();
- auto output_backend = factor.backend();
- // NOTE Permute may not have specific layout because the layout of input and output may be
- // different.
- const auto permute_node_layout = ir::Layout::UNKNOWN;
- // NOTE If one backend supports several layout, the backend must support Permute operation
- const backend::Backend *permute_node_backend = compiler::BackendManager::get().getControlflow();
- if (input_backend == output_backend)
- {
- permute_node_backend = input_backend;
- }
- const ir::operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout};
-
- // Update LowerInfo of input operand
- auto operand_lower_info = _lowered_graph.getLowerInfo(operand_index);
- operand_lower_info->removeUsePermuteFactor(factor);
- operand_lower_info->addUsePermuteFactor(permute_node_factor);
-
- // Update LowerInfo of output operand
- auto out_operand_li = std::make_unique<ir::operand::LowerInfo>();
-
- // The input and output factors of all nodes will be the same except Permute. So Tensor's
- // allocators allocates memory using only the information of def permutation factor now.
- // TODO Change param to permute_node_factor
- out_operand_li->addDefPermuteFactor(factor);
- out_operand_li->addUsePermuteFactor(factor);
- _lowered_graph.setLowerInfo(out_operand_index, std::move(out_operand_li));
-
- // Insert permute operation to the graph
- const auto input_layout = input_factor.layout();
- const auto output_layout = factor.layout();
- using Permute = ir::operation::Permute;
- const auto permute_type = [&]() {
- if (input_layout == ir::Layout::NHWC && output_layout == ir::Layout::NCHW)
- {
- return Permute::Type::NHWC_TO_NCHW;
- }
- else if (input_layout == ir::Layout::NCHW && output_layout == ir::Layout::NHWC)
- {
- return Permute::Type::NCHW_TO_NHWC;
- }
- else
- {
- return Permute::Type::COPY;
- }
- }();
- auto insert_node = std::make_unique<Permute>(operand_index, out_operand_index, permute_type);
-
- auto node_index = _graph.operations().push(std::move(insert_node));
- const auto &node = _graph.operations().at(node_index);
-
- VERBOSE_F() << "Permute Op inserted, node index : " << node_index << std::endl;
- VERBOSE_F() << " - Input (original) Operand : " << operand_index << std::endl;
- VERBOSE_F() << " - Output(inserted) Operand : " << out_operand_index << std::endl;
-
- // OpSequence
- {
- auto op_seq_index = _lowered_graph.op_seqs().emplace(node_index, permute_node_layout);
- auto &op_seq = _lowered_graph.op_seqs().at(op_seq_index);
- op_seq.setInputs(node.getInputs());
- op_seq.setOutputs(node.getOutputs());
- _lowered_graph.setLowerInfo(op_seq_index, std::make_unique<ir::operation::LowerInfo>(
- permute_node_backend, permute_node_layout));
- }
-
- // Update Use/Def info
- {
- _graph.operands().at(operand_index).insertUse(node_index);
- _graph.operands().at(out_operand_index).setDef(node_index);
- }
- return node_index;
-}
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.h b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.h
deleted file mode 100644
index 758515385..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_PERMUTATION_INSERTION_PASS_H__
-#define __ONERT_COMPILER_PASS_PERMUTATION_INSERTION_PASS_H__
-
-#include "LoweredOperandPass.h"
-#include "compiler/BackendManager.h"
-#include "ir/Operand.h"
-#include "ir/operand/PermuteFactor.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class PermutationInsertionPass : public LoweredOperandPass
-{
-public:
- using LoweredOperandPass::LoweredOperandPass;
-
-public:
- std::string id() override { return "PermutationInsertionPass"; }
- void callback(const ir::OperandIndex &index, ir::Operand &object) override;
-
-private:
- /**
- * @brief Insert Permute operation that has given operand as input
- *
- * @param operand_index is the target operand index for the insertion
- * @param factor is the output operand's backend type and layout
- *
- * @return ir::OperationIndex
- */
- ir::OperationIndex insertPermute(const ir::OperandIndex &operand_index,
- const ir::operand::PermuteFactor &factor);
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_PERMUTATION_INSERTION_PASS_H__
diff --git a/runtime/onert/core/src/compiler/pass/PermutationOperationPass.cc b/runtime/onert/core/src/compiler/pass/PermutationOperationPass.cc
deleted file mode 100644
index 93d125307..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationOperationPass.cc
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "PermutationOperationPass.h"
-
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-#include "ir/Graph.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-using namespace ir;
-
-void PermutationOperationPass::callback(const OperationIndex &, Operation &node)
-{
- node.accept(*this);
-};
-
-// TODO Remove this. Expanding ranks of Operand is dangerous
-void PermutationOperationPass::applyExpandRanks(const Operation &node)
-{
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output = _graph.operands().at(output_ind);
-
- assert(output.getDef().valid());
- const auto node_index = output.getDef();
- const auto &op_seq_index = _lowered_graph.op_seqs().getOperation(node_index);
- const auto frontend_layout = _lowered_graph.op_seqs().at(op_seq_index).getLayout();
- const auto backend_layout = _lowered_graph.getLowerInfo(op_seq_index)->layout();
-
- if (frontend_layout == backend_layout)
- {
- return;
- }
-
- int32_t expanded_rank = 0;
- for (const auto &index :
- (node.getInputs() + node.getOutputs()) | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- expanded_rank = std::max(expanded_rank, _graph.operands().at(index).shape().rank());
- }
- if (expanded_rank < 4)
- return;
-
- for (const auto &index :
- (node.getInputs() + node.getOutputs()) | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- const auto &operand = _graph.operands().at(index);
- if (operand.shape().rank() < expanded_rank)
- {
- if (operand.getUses().size() > 1)
- throw std::runtime_error("PermutationOperationPass: not supported expanding rank of "
- "operand used in more than one node");
- // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
- // a node to extend shape may be inserted in front of this operation
- const_cast<Shape &>(operand.shape()).extendRank(expanded_rank);
- }
- }
-}
-
-void PermutationOperationPass::changeToKeepLayout(const Operation &node)
-{
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
-
- assert(output_obj.getDef().valid());
- const auto node_index = output_obj.getDef();
- const auto &op_seq_index = _lowered_graph.op_seqs().getOperation(node_index);
-
- const auto frontend_layout = _lowered_graph.op_seqs().at(op_seq_index).getLayout();
- const auto backend_layout = _lowered_graph.getLowerInfo(op_seq_index)->layout();
-
- if (frontend_layout == backend_layout)
- {
- return;
- }
-
- // Permutation changing layout beyond 4-D is not supported yet
- assert(output_obj.shape().rank() <= 4);
-
- // Divide op_seq based on target operation
- {
- auto &prev_op_seq = _lowered_graph.op_seqs().at(op_seq_index);
- auto &operations = _lowered_graph.graph().operations();
-
- // Create new op_seq and move information from existing op_seq to new op_seq if target
- // node is the end of op_seq
- auto it = prev_op_seq.begin();
- // Find iterator of target node in op_seq
- while (*(it++) != node_index)
- ;
- if (it != prev_op_seq.end())
- {
- const auto &target_op_idx = *it;
- const auto &target_node = operations.at(target_op_idx);
- const auto &next_op_seq_index =
- _lowered_graph.op_seqs().emplace(target_op_idx, prev_op_seq.getLayout());
- auto &next_op_seq = _lowered_graph.op_seqs().at(next_op_seq_index);
- next_op_seq.setInputs(target_node.getInputs());
- next_op_seq.setOutputs(target_node.getOutputs());
-
- std::vector<OperationIndex> remove_list;
- remove_list.emplace_back(target_op_idx);
- while (++it != prev_op_seq.end())
- {
- next_op_seq.appendOperation(target_op_idx);
- next_op_seq.setOutputs(target_node.getOutputs());
- remove_list.emplace_back(target_op_idx);
- }
-
- prev_op_seq.setOutputs(node.getOutputs());
- for (const auto &index : remove_list)
- {
- prev_op_seq.remove(index);
- }
-
- const auto op_seq_li = _lowered_graph.getLowerInfo(op_seq_index);
- _lowered_graph.setLowerInfo(
- next_op_seq_index,
- std::make_unique<ir::operation::LowerInfo>(op_seq_li->backend(), op_seq_li->layout()));
- }
- }
-
- // Remove target operation from op_seq and insert the target operation to new op_seq
- {
- const auto backend = _lowered_graph.getLowerInfo(op_seq_index)->backend();
-
- // Remove target operation from op_sequence
- _lowered_graph.op_seqs().removeFromOpSequence(node_index);
-
- if (!_lowered_graph.op_seqs().exist(op_seq_index))
- {
- // Remove lowerinfo for op_seq of target operation if the op_seq does not exist
- _lowered_graph.removeLowerInfo(op_seq_index);
- }
- else
- {
- // Update op_seq of target operation if the op_seq exists
- auto &prev_op_seq = _lowered_graph.op_seqs().at(op_seq_index);
- const auto &last_node_idx = *(--prev_op_seq.end());
- const auto &last_node = _lowered_graph.graph().operations().at(last_node_idx);
- prev_op_seq.setOutputs(last_node.getOutputs());
- }
-
- // Create new op_seq and set information to the op_seq
- auto new_op_seq_index = _lowered_graph.op_seqs().emplace(node_index, frontend_layout);
- auto &new_op_seq = _lowered_graph.op_seqs().at(new_op_seq_index);
- new_op_seq.setInputs(node.getInputs());
- new_op_seq.setOutputs(node.getOutputs());
- _lowered_graph.setLowerInfo(
- new_op_seq_index, std::make_unique<ir::operation::LowerInfo>(backend, frontend_layout));
- }
-
- // Change PermuteFactors of operands of target node
- {
- const auto &op_seq_index = _lowered_graph.op_seqs().getOperation(node_index);
- const auto op_seq_li = _lowered_graph.getLowerInfo(op_seq_index);
- const auto backend = op_seq_li->backend();
- const operand::PermuteFactor removed_factor{backend, backend_layout};
- const operand::PermuteFactor new_factor{backend, frontend_layout};
- for (const auto &input : node.getInputs() | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- bool canRemove = true;
- for (const auto &use : _graph.operands().at(input).getUses())
- {
- if (use != node_index)
- {
- const auto &use_op_seq_index = _lowered_graph.op_seqs().getOperation(use);
- auto use_op_seq_li = _lowered_graph.getLowerInfo(use_op_seq_index);
- if (use_op_seq_li->backend() == backend && use_op_seq_li->layout() == backend_layout)
- {
- canRemove = false;
- break;
- }
- }
- }
-
- auto lower_info = _lowered_graph.getLowerInfo(input);
- if (canRemove)
- {
- lower_info->removeUsePermuteFactor(removed_factor);
- }
- lower_info->addUsePermuteFactor(new_factor);
-
- // Whether if node's input is an input of model or a constant
- if (!_graph.operands().at(input).getDef().valid() &&
- (lower_info->def_factors().size() == 1 &&
- lower_info->def_factors().getOnlyElement() == removed_factor))
- {
- assert(_graph.getInputs().contains(input) || _graph.operands().at(input).isConstant());
- lower_info->removeDefPermuteFactor(removed_factor);
- lower_info->addDefPermuteFactor(new_factor);
- }
- }
-
- for (const auto &output : node.getOutputs() | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- auto lower_info = _lowered_graph.getLowerInfo(output);
- lower_info->removeDefPermuteFactor(removed_factor);
- lower_info->addDefPermuteFactor(new_factor);
-
- // Whether if node's output is an output of model
- if (_graph.operands().at(output).getUses().size() == 0)
- {
- assert(_graph.getOutputs().contains(output));
- lower_info->removeUsePermuteFactor(removed_factor);
- lower_info->addUsePermuteFactor(new_factor);
- }
- }
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::BinaryArithmetic &node)
-{
- applyExpandRanks(node);
-}
-
-void PermutationOperationPass::visit(const ir::operation::Concat &node) { applyExpandRanks(node); }
-
-void PermutationOperationPass::visit(const ir::operation::Comparison &node)
-{
- applyExpandRanks(node);
-}
-
-void PermutationOperationPass::visit(const ir::operation::ElementwiseBinary &node)
-{
- applyExpandRanks(node);
-}
-
-void PermutationOperationPass::visit(const ir::operation::ElementwiseUnary &node)
-{
- applyExpandRanks(node);
-}
-
-void PermutationOperationPass::visit(const ir::operation::FullyConnected &node)
-{
- const auto &input_ind = node.getInputs().at(ir::operation::FullyConnected::Input::INPUT);
- const auto &input_obj = _graph.operands().at(input_ind);
- const auto &input_shape = input_obj.shape();
-
- if (input_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::Gather &node)
-{
- const auto &input_ind = node.getInputs().at(ir::operation::Gather::Input::INPUT);
- const auto &input_obj = _graph.operands().at(input_ind);
- const auto &input_shape = input_obj.shape();
-
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
- const auto &output_shape = output_obj.shape();
-
- if (input_shape.rank() >= 4 || output_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::OneHot &node)
-{
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
- const auto &output_shape = output_obj.shape();
-
- if (output_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::Pack &node)
-{
- const auto &input_ind = node.getInputs().at(ir::operation::Reshape::Input::INPUT);
- const auto &input_obj = _graph.operands().at(input_ind);
- const auto &input_shape = input_obj.shape();
-
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
- const auto &output_shape = output_obj.shape();
-
- if (input_shape.rank() < 4 || output_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::PReLU &node) { applyExpandRanks(node); }
-
-void PermutationOperationPass::visit(const ir::operation::Reshape &node)
-{
- const auto &input_ind = node.getInputs().at(ir::operation::Reshape::Input::INPUT);
- const auto &input_obj = _graph.operands().at(input_ind);
- const auto &input_shape = input_obj.shape();
-
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
- const auto &output_shape = output_obj.shape();
-
- if (input_shape.rank() >= 4 || output_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-void PermutationOperationPass::visit(const ir::operation::SquaredDifference &node)
-{
- applyExpandRanks(node);
-}
-
-void PermutationOperationPass::visit(const ir::operation::Unpack &node)
-{
- const auto &input_ind = node.getInputs().at(ir::operation::Reshape::Input::INPUT);
- const auto &input_obj = _graph.operands().at(input_ind);
- const auto &input_shape = input_obj.shape();
-
- const auto &output_ind = node.getOutputs().at(0);
- const auto &output_obj = _graph.operands().at(output_ind);
- const auto &output_shape = output_obj.shape();
-
- if (input_shape.rank() < 4 || output_shape.rank() >= 4)
- {
- changeToKeepLayout(node);
- }
-}
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
diff --git a/runtime/onert/core/src/compiler/pass/PermutationOperationPass.h b/runtime/onert/core/src/compiler/pass/PermutationOperationPass.h
deleted file mode 100644
index cea5de288..000000000
--- a/runtime/onert/core/src/compiler/pass/PermutationOperationPass.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_COMPILER_PASS_PERMUTATION_OPERATION_PASS_H__
-#define __ONERT_COMPILER_PASS_PERMUTATION_OPERATION_PASS_H__
-
-#include "ir/OperationVisitor.h"
-#include "LoweredOperationPass.h"
-
-namespace onert
-{
-namespace compiler
-{
-namespace pass
-{
-
-class PermutationOperationPass : public LoweredOperationPass, public ir::OperationVisitor
-{
-public:
- using LoweredOperationPass::LoweredOperationPass;
-
-public:
- std::string id() final { return "PermutationOperationPass"; }
-
-public:
- void callback(const ir::OperationIndex &i, ir::Operation &n) final;
-
-public:
- void visit(const ir::operation::BinaryArithmetic &) final;
- void visit(const ir::operation::Comparison &) final;
- void visit(const ir::operation::Concat &) final;
- void visit(const ir::operation::ElementwiseBinary &) final;
- void visit(const ir::operation::ElementwiseUnary &) final;
- void visit(const ir::operation::OneHot &) final;
- void visit(const ir::operation::Pack &) final;
- void visit(const ir::operation::PReLU &) final;
- void visit(const ir::operation::SquaredDifference &) final;
- void visit(const ir::operation::Unpack &) final;
- void visit(const ir::operation::FullyConnected &) final;
- void visit(const ir::operation::Gather &) final;
- void visit(const ir::operation::Reshape &) final;
-
-private:
- void applyExpandRanks(const ir::Operation &);
- void changeToKeepLayout(const ir::Operation &);
-};
-
-} // namespace pass
-} // namespace compiler
-} // namespace onert
-
-#endif // __ONERT_COMPILER_PASS_PERMUTATION_OPERATION_PASS_H__
diff --git a/runtime/onert/core/src/dumper/dot/DotBuilder.cc b/runtime/onert/core/src/dumper/dot/DotBuilder.cc
deleted file mode 100644
index 38a69696e..000000000
--- a/runtime/onert/core/src/dumper/dot/DotBuilder.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "DotBuilder.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-// DotDumper
-DotBuilder::DotBuilder() {}
-
-void DotBuilder::update(const Node &node_info)
-{
- add(node_info);
- for (auto edge : node_info.out_edges())
- {
- addEdge(node_info, *edge);
- }
-}
-
-void DotBuilder::addOpSequence(const DotSubgraphInfo &subgraph_info)
-{
- _dot << "subgraph cluster_" << subgraph_info.index().value() << " {\n";
- _dot << " label=\"" << subgraph_info.label() << "\";\n";
- _dot << " style=filled;\n";
- _dot << " color=lightgrey;\n";
- _dot << " ";
- for (auto op : subgraph_info.operations())
- {
- _dot << "operation" << op.value() << "; ";
- }
- for (auto op : subgraph_info.operands())
- {
- _dot << "operand" << op.value() << "; ";
- }
- _dot << "\n";
- _dot << "}\n";
-}
-
-void DotBuilder::writeDot(std::ostream &os)
-{
- os << "digraph D {\n"
- << _dot.str() << "\n"
- << "}\n";
-}
-
-void DotBuilder::add(const Node &node)
-{
- _dot << node.id();
- std::stringstream ss;
- _dot << "[";
- for (auto attr : node.attributes())
- {
- _dot << attr.first << "=\"" << attr.second << "\" ";
- }
- _dot << "];\n";
-}
-
-void DotBuilder::addEdge(const Node &node1, const Node &node2)
-{
- _dot << node1.id() << " -> " << node2.id() << ";\n";
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/DotBuilder.h b/runtime/onert/core/src/dumper/dot/DotBuilder.h
deleted file mode 100644
index 681cbbf5d..000000000
--- a/runtime/onert/core/src/dumper/dot/DotBuilder.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_DUMPER_DOT_DOT_BUILDER_H__
-#define __ONERT_DUMPER_DOT_DOT_BUILDER_H__
-
-#include <sstream>
-
-#include "ir/Index.h"
-#include "ir/Operation.h"
-#include "ir/Operand.h"
-
-#include "OperationNode.h"
-#include "OperandNode.h"
-#include "DotSubgraphInfo.h"
-
-using Operation = onert::ir::Operation;
-using Object = onert::ir::Operand;
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-class DotBuilder
-{
-public:
- DotBuilder();
-
-public:
- void update(const Node &dotinfo);
- void addOpSequence(const DotSubgraphInfo &subgraph_info);
-
- void writeDot(std::ostream &os);
-
-private:
- void add(const Node &dotinfo);
- void addEdge(const Node &dotinfo1, const Node &dotinfo2);
-
- std::stringstream _dot;
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_DUMPER_DOT_DOT_BUILDER_H__
diff --git a/runtime/onert/core/src/dumper/dot/DotDumper.cc b/runtime/onert/core/src/dumper/dot/DotDumper.cc
deleted file mode 100644
index 8f3cf328c..000000000
--- a/runtime/onert/core/src/dumper/dot/DotDumper.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <fstream>
-#include <unordered_map>
-
-#include "DotDumper.h"
-#include "DotBuilder.h"
-#include "DotSubgraphInfo.h"
-#include "ir/OpSequence.h"
-#include "ir/OperationIndexMap.h"
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-#include "compiler/BackendManager.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-void DotDumper::dump(const std::string &tag)
-{
- if (_level == Level::OFF)
- {
- return;
- }
-
- onert::dumper::dot::DotBuilder dot_builder;
-
- auto &operations = _graph.operations();
- auto &operands = _graph.operands();
-
- ir::OperationIndexMap<std::unique_ptr<Operation>> operation_nodes;
- std::unordered_map<ir::OperandIndex, std::unique_ptr<Operand>> operand_nodes;
-
- auto backend_to_fillcolor = [](const backend::Backend *backend) {
- static const auto map = []() {
- std::unordered_map<const backend::Backend *, std::string> ret;
- uint32_t index = 1; // Start from 1 to avoid 0(red) which is too dark :(
- for (const auto backend : compiler::BackendManager::get().getAll())
- {
- ret.emplace(backend, Node::BG_COLORS[index]);
- index = (index + 1) % (sizeof(Node::BG_COLORS) / sizeof(Node::BG_COLORS[0]));
- }
- return ret;
- }();
-
- auto itr = map.find(backend);
- if (itr == map.end())
- {
- return Node::DEFAULT_FILLCOLOR;
- }
- else
- {
- return itr->second;
- }
- };
-
- util::Set<ir::OperandIndex> shown_operand_set;
-
- operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) {
- bool showing_cond = false;
- if (_level == Level::ALL)
- {
- showing_cond = true;
- }
- else
- {
- showing_cond =
- !object.isConstant() || (_graph.getInputs() + _graph.getOutputs()).contains(index);
- }
- if (showing_cond)
- {
- shown_operand_set.add(index);
-
- auto type = [&]() {
- using onert::dumper::dot::Operand;
- if (_graph.getInputs().contains(index))
- return Operand::Type::MODEL_INPUT;
- if (_graph.getOutputs().contains(index))
- return Operand::Type::MODEL_OUTPUT;
- return Operand::Type::INTERNAL;
- }();
-
- auto node = std::make_unique<Operand>(index, type);
-
- {
- // Display LowerInfo attributes
- std::string label = std::to_string(index.value());
- std::string fillcolor = "";
- if (_lowered_graph)
- {
- auto lower_info = _lowered_graph->getLowerInfo(index);
- const auto &def_factors = lower_info->def_factors();
- if (def_factors.size() > 0)
- {
- label += "\\n[";
- label += def_factors.getOnlyElement().backend()->config()->id();
- label += "]";
-
- fillcolor = backend_to_fillcolor(lower_info->def_factors().getOnlyElement().backend());
- }
- }
- node->setAttribute("label", label);
- node->setAttribute("fillcolor", fillcolor);
- }
-
- operand_nodes.emplace(index, std::move(node));
- }
- });
-
- operations.iterate([&](const ir::OperationIndex &index, const ir::Operation &op) {
- auto node = std::make_unique<Operation>(index, op);
-
- for (auto input : op.getInputs())
- {
- using onert::dumper::dot::Operand;
-
- // Constant input and dump level is ALL_BUT_CONSTANTS
- if (operand_nodes.find(input) == operand_nodes.end())
- continue;
-
- auto &input_node = operand_nodes.at(input);
- input_node->addOutEdge(node.get());
- }
-
- for (auto output : op.getOutputs())
- {
- using onert::dumper::dot::Operand;
- auto &output_node = operand_nodes.at(output);
- node->addOutEdge(output_node.get());
- }
-
- operation_nodes.emplace(index, std::move(node));
- });
-
- if (_lowered_graph)
- {
- const auto &op_seqs = _lowered_graph->op_seqs();
- op_seqs.iterate([&](const ir::OpSequenceIndex &index, const ir::OpSequence &op_seq) {
- const auto lower_info = _lowered_graph->getLowerInfo(index);
- auto fillcolor = backend_to_fillcolor(lower_info->backend());
- std::string label =
- std::to_string(index.value()) + " [" + lower_info->backend()->config()->id() + "]";
- DotSubgraphInfo subgraph_info{index, op_seq, shown_operand_set, _graph.operations()};
- subgraph_info.label(label);
- subgraph_info.fillcolor(fillcolor);
- dot_builder.addOpSequence(subgraph_info);
-
- // Set fillcolor of all operations in the op_seq
- for (const auto &op_idx : op_seq.operations())
- {
- auto found = operation_nodes.find(op_idx);
- if (found != operation_nodes.end())
- {
- auto &&op = found->second;
- op->setAttribute("fillcolor", fillcolor);
- }
- }
- });
- }
-
- for (const auto &e : operation_nodes)
- dot_builder.update(*e.second);
- for (const auto &e : operand_nodes)
- dot_builder.update(*e.second);
-
- // Dump to file
- {
- std::string file_name;
- file_name += tag;
- file_name += ".dot";
- std::filebuf fb;
-
- fb.open(file_name, std::ios::out);
- std::ostream os(&fb);
-
- dot_builder.writeDot(os);
-
- fb.close();
- }
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/DotDumper.h b/runtime/onert/core/src/dumper/dot/DotDumper.h
deleted file mode 100644
index fdbca1642..000000000
--- a/runtime/onert/core/src/dumper/dot/DotDumper.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Graph.h"
-#include "compiler/LoweredGraph.h"
-
-#ifndef __ONERT_DUMPER_DOT_DOT_DUMPER_H__
-#define __ONERT_DUMPER_DOT_DOT_DUMPER_H__
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-class DotDumper
-{
-public:
- enum Level
- {
- OFF = 0, //< Do not dump
- ALL_BUT_CONSTANTS = 1, //< Emit all operations and operands but constants
- ALL = 2 //< Emit all operations and operands
- };
-
-public:
- DotDumper(const ir::Graph &graph, Level level)
- : _lowered_graph{nullptr}, _graph(graph), _level{level}
- {
- }
- DotDumper(const compiler::LoweredGraph *lowered_graph, Level level)
- : _lowered_graph{lowered_graph}, _graph(_lowered_graph->graph()), _level{level}
- {
- }
-
-public:
- /**
- * @brief Dump to dot file as tag name if "GRAPH_DOT_DUMP" is set
- *
- * @param[in] tag The name of dot file that would be created
- * @return N/A
- */
- void dump(const std::string &tag);
-
-private:
- const compiler::LoweredGraph *_lowered_graph;
- const ir::Graph &_graph;
- Level _level;
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_DUMPER_DOT_DOT_DUMPER_H__
diff --git a/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.cc b/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.cc
deleted file mode 100644
index 52e9c758d..000000000
--- a/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "DotSubgraphInfo.h"
-
-#include <sstream>
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-DotSubgraphInfo::DotSubgraphInfo(const ir::OpSequenceIndex &index, const ir::OpSequence &op_seq,
- const util::Set<ir::OperandIndex> &shown_operands,
- const ir::Operations &operations_ctx)
- : _index{index}
-{
- for (const auto &op_idx : op_seq.operations())
- {
- _operations.insert(op_idx);
- const auto &node = operations_ctx.at(op_idx);
- for (auto o : node.getInputs())
- {
- // Must be a shown operand, not op_seq's inputs
- if (shown_operands.contains(o) && !op_seq.getInputs().contains(o))
- {
- _operands.insert(o);
- }
- }
- for (auto o : node.getOutputs())
- {
- // Must be a shown operand, not op_seq's inputs
- if (shown_operands.contains(o) && !op_seq.getOutputs().contains(o))
- {
- _operands.insert(o);
- }
- }
- }
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.h b/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.h
deleted file mode 100644
index 95ba8953e..000000000
--- a/runtime/onert/core/src/dumper/dot/DotSubgraphInfo.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
-#define __ONERT_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
-
-#include <unordered_set>
-
-#include "ir/Index.h"
-#include <ir/Operations.h>
-#include "ir/OpSequence.h"
-#include "util/Set.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-class DotSubgraphInfo
-{
-public:
- DotSubgraphInfo(const ir::OpSequenceIndex &index, const ir::OpSequence &op_seq,
- const util::Set<ir::OperandIndex> &shown_operands,
- const ir::Operations &operations_ctx);
-
- ir::OpSequenceIndex index() const { return _index; }
- std::string label() const { return _label; }
- void label(const std::string &val) { _label = val; }
- std::string fillcolor() const { return _fillcolor; }
- void fillcolor(const std::string &val) { _fillcolor = val; }
- const std::unordered_set<ir::OperationIndex> &operations() const { return _operations; }
- const std::unordered_set<ir::OperandIndex> &operands() const { return _operands; }
-
-private:
- ir::OpSequenceIndex _index;
- std::string _label;
- std::string _fillcolor;
- std::unordered_set<ir::OperationIndex> _operations;
- std::unordered_set<ir::OperandIndex> _operands;
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
diff --git a/runtime/onert/core/src/dumper/dot/Node.cc b/runtime/onert/core/src/dumper/dot/Node.cc
deleted file mode 100644
index 85d6e67a4..000000000
--- a/runtime/onert/core/src/dumper/dot/Node.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Node.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-const std::string Node::DEFAULT_COLORSCHEME = "x11";
-const std::string Node::DEFAULT_FILLCOLOR = "white";
-// RED, BLUE, GREEN, PURPLE, ORANGE, YELLOW, BROWN, PINK
-const std::string Node::BG_COLORS[8] = {"1", "2", "3", "4", "5", "6", "7", "8"};
-
-Node::Node(const std::string &id) : _id{id}
-{
- // Set default values
- _attributes["style"] = "filled";
- _attributes["colorscheme"] = DEFAULT_COLORSCHEME;
- _attributes["fillcolor"] = DEFAULT_FILLCOLOR;
-}
-
-void Node::setAttribute(const std::string &key, const std::string &val) { _attributes[key] = val; }
-
-std::string Node::getAttribute(const std::string &key)
-{
- auto itr = _attributes.find(key);
- if (itr == _attributes.end())
- {
- return "";
- }
- else
- {
- return itr->second;
- }
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/Node.h b/runtime/onert/core/src/dumper/dot/Node.h
deleted file mode 100644
index 9b09b92e7..000000000
--- a/runtime/onert/core/src/dumper/dot/Node.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Node.h
- * @brief This file contains Node class
- * @ingroup COM_AI_RUNTIME
- *
- */
-
-#ifndef __ONERT_DUMPER_DOT_NODE_H__
-#define __ONERT_DUMPER_DOT_NODE_H__
-
-#include <string>
-#include <memory>
-#include <vector>
-#include <unordered_map>
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-enum BGCOLORS : int
-{
- RED,
- BLUE,
- GREEN,
- PUPLE,
- ORANGE,
- YELLOW,
- BROWN,
- PINK
-};
-
-/**
- * @brief Class that represents a Node in "dot" format
- *
- */
-class Node
-{
-public:
- const static std::string DEFAULT_FILLCOLOR;
- const static std::string DEFAULT_COLORSCHEME;
- const static std::string BG_COLORS[8];
-
-public:
- /**
- * @brief Destroy the Node object
- *
- */
- virtual ~Node() = default;
-
- /**
- * @brief Construct a new Node object
- *
- * @param id
- */
- Node(const std::string &id);
-
- /**
- * @brief return id
- *
- * @return id
- */
- std::string id() const { return _id; }
-
- /**
- * @brief return attributes
- *
- * @return const reference of attributes object
- */
- const std::unordered_map<std::string, std::string> &attributes() const { return _attributes; }
- /**
- * @brief Store an attribute with key-value pair
- *
- * @param[in] key attribute's key
- * @param[in] val attribute's value that is associated with the key
- */
- void setAttribute(const std::string &key, const std::string &val);
- /**
- * @brief Get the attributte value that is associated with key
- *
- * @param[in] key key of the attribute
- * @return value that is associated with the key
- */
- std::string getAttribute(const std::string &key);
-
- /**
- * @brief Add an edge in the graph, which is an outgoing edge
- *
- * @param[in] dotinfo A node that the new edge will be connected to
- */
- void addOutEdge(Node *dotinfo) { _out_edges.emplace_back(dotinfo); }
- /**
- * @brief Return list of out edges
- *
- * @return Edges
- */
- const std::vector<Node *> &out_edges() const { return _out_edges; }
-
-private:
- std::string _id;
- std::unordered_map<std::string, std::string> _attributes;
- std::vector<Node *> _out_edges;
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_DUMPER_DOT_NODE_H__
diff --git a/runtime/onert/core/src/dumper/dot/OperandNode.cc b/runtime/onert/core/src/dumper/dot/OperandNode.cc
deleted file mode 100644
index 5a6015ca9..000000000
--- a/runtime/onert/core/src/dumper/dot/OperandNode.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sstream>
-
-#include "OperandNode.h"
-#include "ir/Graph.h"
-#include "ir/operand/LowerInfo.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-const std::string Operand::INPUT_SHAPE = "doublecircle";
-const std::string Operand::OUTPUT_SHAPE = "doublecircle";
-const std::string Operand::OPERAND_SHAPE = "ellipse";
-const std::string Operand::BG_COLOR_SCHEME = "set18";
-
-Operand::Operand(const ir::OperandIndex &index, Type type)
- : Node{"operand" + std::to_string(index.value())}
-{
- {
- auto type_to_shape = [](Type type) {
- switch (type)
- {
- case Type::MODEL_INPUT:
- return INPUT_SHAPE;
- case Type::MODEL_OUTPUT:
- return OUTPUT_SHAPE;
- case Type::UNDEFINED:
- case Type::INTERNAL:
- default:
- return OPERAND_SHAPE;
- }
- };
- setAttribute("shape", type_to_shape(type));
- }
-
- setAttribute("colorscheme", BG_COLOR_SCHEME);
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/OperandNode.h b/runtime/onert/core/src/dumper/dot/OperandNode.h
deleted file mode 100644
index 2e7cc5861..000000000
--- a/runtime/onert/core/src/dumper/dot/OperandNode.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Operand.h
- * @brief This file contains Operand
- * @ingroup COM_AI_RUNTIME
- *
- */
-
-#ifndef __ONERT_DUMPER_DOT_DOT_OPERAND_INFO_H__
-#define __ONERT_DUMPER_DOT_DOT_OPERAND_INFO_H__
-
-#include <vector>
-
-#include "Node.h"
-#include "ir/Operand.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-/**
- * @brief Class that represents an Operand
- *
- */
-class Operand : public Node
-{
-public:
- enum class Type
- {
- UNDEFINED,
- MODEL_INPUT,
- MODEL_OUTPUT,
- INTERNAL
- };
-
-public:
- static const std::string INPUT_SHAPE;
- static const std::string OUTPUT_SHAPE;
- static const std::string OPERAND_SHAPE;
- static const std::string BG_COLOR_SCHEME;
-
-public:
- /**
- * @brief Construct a new Operand Node object
- *
- * @param[in] index Operand index
- * @param[in] type Operand type
- * @param[in] lower_info Operand LowerInfo
- */
- Operand(const ir::OperandIndex &index, Type type);
-
-private:
- void addBackendLabel();
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_DUMPER_DOT_DOT_OPERAND_INFO_H__
diff --git a/runtime/onert/core/src/dumper/dot/OperationNode.cc b/runtime/onert/core/src/dumper/dot/OperationNode.cc
deleted file mode 100644
index bee137e7c..000000000
--- a/runtime/onert/core/src/dumper/dot/OperationNode.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sstream>
-
-#include "OperationNode.h"
-#include "ir/Graph.h"
-#include "ir/operation/LowerInfo.h"
-#include "backend/IConfig.h"
-#include "backend/Backend.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-const std::string Operation::OPERATION_SHAPE = "rect";
-const std::string Operation::BG_COLOR_SCHEME = "pastel18";
-
-Operation::Operation(const ir::OperationIndex &index, const ir::Operation &node)
- : Node{"operation" + std::to_string(index.value())}
-{
- setAttribute("label", std::to_string(index.value()) + " : " + node.name());
- setAttribute("shape", OPERATION_SHAPE);
- setAttribute("colorscheme", BG_COLOR_SCHEME);
- setAttribute("fillcolor", DEFAULT_FILLCOLOR);
-}
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
diff --git a/runtime/onert/core/src/dumper/dot/OperationNode.h b/runtime/onert/core/src/dumper/dot/OperationNode.h
deleted file mode 100644
index 74a37d3fb..000000000
--- a/runtime/onert/core/src/dumper/dot/OperationNode.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Operation.h
- * @brief This file contains Operation
- * @ingroup COM_AI_RUNTIME
- *
- */
-
-#ifndef __ONERT_DUMPER_DOT_DOT_NODE_INFO_H__
-#define __ONERT_DUMPER_DOT_DOT_NODE_INFO_H__
-
-#include "Node.h"
-#include "ir/Operation.h"
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace dumper
-{
-namespace dot
-{
-
-/**
- * @brief Class that represents an Operation
- *
- */
-class Operation : public Node
-{
-public:
- static const std::string OPERATION_SHAPE;
- static const std::string BG_COLOR_SCHEME;
-
-public:
- /**
- * @brief Construct a new Operation Node object
- *
- * @param[in] index operation index
- * @param[in] node operation object
- */
- Operation(const ir::OperationIndex &index, const ir::Operation &node);
-};
-
-} // namespace dot
-} // namespace dumper
-} // namespace onert
-
-#endif // __ONERT_DUMPER_DOT_DOT_NODE_INFO_H__
diff --git a/runtime/onert/core/src/exec/BackendSet.h b/runtime/onert/core/src/exec/BackendSet.h
deleted file mode 100644
index 33ec75e4b..000000000
--- a/runtime/onert/core/src/exec/BackendSet.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_BACKEND_SET_H__
-#define __ONERT_EXEC_BACKEND_SET_H__
-
-#include "util/Set.h"
-
-namespace onert
-{
-namespace backend
-{
-class Backend;
-} // namespace backend
-} // namespace onert
-
-namespace onert
-{
-namespace exec
-{
-
-using BackendSet = util::Set<const backend::Backend *>;
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_BACKEND_SET_H__
diff --git a/runtime/onert/core/src/exec/DataflowExecutor.cc b/runtime/onert/core/src/exec/DataflowExecutor.cc
deleted file mode 100644
index 53bc3c204..000000000
--- a/runtime/onert/core/src/exec/DataflowExecutor.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "DataflowExecutor.h"
-
-#include <cassert>
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-int64_t DataflowExecutor::calculateRank(const std::vector<ir::OperationIndex> &operations)
-{
- int64_t rank = 0;
- if (!_indexed_ranks)
- {
- return rank;
- }
- for (const auto &operation_idx : operations)
- {
- auto it = _indexed_ranks->find(operation_idx);
- if (it == _indexed_ranks->end())
- {
- assert(_graph.operations().at(operation_idx).opcode() == ir::OpCode::Permute &&
- operations.size() == 1);
- // run Permute ASAP for next operations to be ready for other backends
- return std::numeric_limits<int64_t>::max();
- }
- else
- {
- rank += it->second;
- }
- }
- return rank;
-}
-
-void DataflowExecutor::emplaceToReadyJobs(const uint32_t &id)
-{
- auto &job = _waiting_jobs[id];
- assert(job != nullptr);
- auto &op_seq = _lowered_graph->op_seqs().at(_job_to_op_seq[job->index()]);
- auto rank = calculateRank(op_seq.operations());
- _ready_jobs.emplace(rank, std::move(job));
-}
-
-void DataflowExecutor::notify(uint32_t finished_job_id)
-{
- for (auto id : _output_info[finished_job_id])
- {
- assert(_input_info[id] > 0);
- auto count = --_input_info[id];
- if (count == 0) // No dependent jobs left, ready for execution
- {
- emplaceToReadyJobs(id);
- }
- }
-}
-bool DataflowExecutor::noWaitingJobs()
-{
- return std::all_of(_waiting_jobs.begin(), _waiting_jobs.end(),
- [](const std::unique_ptr<Job> &job) { return job == nullptr; });
-}
-
-DataflowExecutor::DataflowExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs,
- compiler::CodeMap &&code_map)
- : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs},
- _code_map{std::move(code_map)}
-{
- VERBOSE(DataflowExecutor) << "Constructing Dataflow Executor" << std::endl;
-
- const auto &op_seqs = _lowered_graph->op_seqs();
- // Assign jobs convert OpSequenceIndex to job index(uint32_t)
- uint32_t next_job_index = 0;
- std::unordered_map<ir::OpSequenceIndex, uint32_t> op_seq_to_job;
- op_seqs.iterate([&](const ir::OpSequenceIndex &op_seq_index, const ir::OpSequence &) {
- VERBOSE(DataflowExecutor) << "Create a job #" << next_job_index << " with OpSequenceIndex "
- << op_seq_index.value() << std::endl;
- _finished_jobs.emplace_back(
- std::make_unique<Job>(next_job_index, _code_map.at(op_seq_index).fn_seq.get()));
- op_seq_to_job[op_seq_index] = next_job_index++;
- });
-
- _waiting_jobs.resize(next_job_index);
- _output_info.resize(next_job_index);
- _initial_input_info.resize(next_job_index, 0);
-
- op_seqs.iterate([&](const ir::OpSequenceIndex &op_seq_index, const ir::OpSequence &op_seq) {
- auto job_index = op_seq_to_job[op_seq_index];
- for (auto output : op_seq.getOutputs())
- {
- // Update output and input info
- op_seqs.iterate(
- [&](const ir::OpSequenceIndex &op_seq_cur_index, const ir::OpSequence &op_seq_cur) {
- if (op_seq_cur.getInputs().contains(output))
- {
- auto dep_index = op_seq_to_job[op_seq_cur_index];
- ++_initial_input_info[dep_index];
- _output_info[job_index].push_back(dep_index);
- }
- });
- }
- });
- for (const auto &s : op_seq_to_job)
- _job_to_op_seq.emplace(s.second, s.first);
-
- _input_info = _initial_input_info;
-}
-
-void DataflowExecutor::executeImpl()
-{
- assert(noWaitingJobs());
-
- bool dynamic_input_exists = hasDynamicInput();
-
- // Execution setup
- _waiting_jobs.swap(_finished_jobs); // Move finished jobs to waiting jobs
-
- for (uint32_t i = 0; i < _waiting_jobs.size(); ++i)
- {
- if (_input_info[i] == 0)
- {
- emplaceToReadyJobs(i);
- }
- }
- assert(!_ready_jobs.empty()); // Cannot begin if there is no initial jobs
-
- _subject.notifyModelBegin(this);
-
- while (!_ready_jobs.empty())
- {
- auto job = std::move((_ready_jobs.begin())->second);
- _ready_jobs.erase(_ready_jobs.begin());
- auto job_index = job->index();
- VERBOSE(DataflowExecutor) << "Run job #" << job_index << std::endl;
-
- auto op_seq_index = _job_to_op_seq[job_index];
- auto op_seq = &_lowered_graph->op_seqs().at(op_seq_index);
- const backend::Backend *backend =
- _lowered_graph->getLowerInfo()->op_seq.at(op_seq_index)->backend();
-
- _subject.notifyJobBegin(this, op_seq, backend);
-
- job->fn_seq()->initRunning();
-
- // check if FunctionSequence needs to handle dynamic tensor
- bool handle_dynamic_tensor = op_seq->has_dynamic_tensor() || dynamic_input_exists;
- job->fn_seq()->enableDynamicShapeInferer(handle_dynamic_tensor);
-
- job->run();
-
- _subject.notifyJobEnd(this, op_seq, backend);
- notify(job_index);
- _finished_jobs[job_index] = std::move(job);
- }
- assert(noWaitingJobs());
-
- _subject.notifyModelEnd(this);
-
- // Reset input info for the next execution
- _input_info = _initial_input_info;
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/DataflowExecutor.h b/runtime/onert/core/src/exec/DataflowExecutor.h
deleted file mode 100644
index 69dfda15c..000000000
--- a/runtime/onert/core/src/exec/DataflowExecutor.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_DATAFLOW_EXECUTOR_H__
-#define __ONERT_EXEC_DATAFLOW_EXECUTOR_H__
-
-#include <list>
-#include <map>
-#include <unordered_map>
-
-#include "exec/FunctionSequence.h"
-#include "Job.h"
-#include "ir/OperandIndexSequence.h"
-#include "ir/Index.h"
-#include <memory>
-#include "exec/ExecutorBase.h"
-#include "compiler/CodeMap.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class DataflowExecutor : public ExecutorBase
-{
-
-protected:
- virtual void notify(uint32_t finished_job_id);
- bool noWaitingJobs();
-
-public:
- /**
- * @brief Constructs a DataflowExecutor object
- *
- * @param lowered_graph LoweredGraph object
- * @param tensor_builders Tensor builders that are currently used
- * @param code_map OpSequence and its code map
- */
- DataflowExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs, compiler::CodeMap &&code_map);
-
- void executeImpl() override;
-
-protected:
- int64_t calculateRank(const std::vector<ir::OperationIndex> &operations);
- void emplaceToReadyJobs(const uint32_t &id);
-
-protected:
- compiler::CodeMap _code_map;
- /**
- * @brief A vector of finished jobs for current execution
- * After a run it has all the jobs of this execution for the next run
- */
- std::vector<std::unique_ptr<Job>> _finished_jobs;
- /**
- * @brief A vector of waiting jobs for current execution
- * All the jobs are moved from #_finished_jobs to it when start a run
- */
- std::vector<std::unique_ptr<Job>> _waiting_jobs;
- /**
- * @brief Jobs' output info
- * Used for notifying after finishing a job
- */
- std::vector<std::list<uint32_t>> _output_info;
- std::vector<uint32_t> _initial_input_info;
- std::vector<uint32_t> _input_info;
- /**
- * @brief A collection of jobs that are ready for execution
- * Jobs in it are ready to be scheduled.
- * Ordered by priority from `_indexed_ranks`
- */
- std::multimap<int64_t, std::unique_ptr<Job>, std::greater<int64_t>> _ready_jobs;
-
- /// @brief Which job runs which op and function.
- std::unordered_map<uint32_t, ir::OpSequenceIndex> _job_to_op_seq;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_DATAFLOW_EXECUTOR_H__
diff --git a/runtime/onert/core/src/exec/DynamicShapeInference.cc b/runtime/onert/core/src/exec/DynamicShapeInference.cc
deleted file mode 100644
index 0f604c43f..000000000
--- a/runtime/onert/core/src/exec/DynamicShapeInference.cc
+++ /dev/null
@@ -1,1236 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/DynamicShapeInference.h"
-#include "util/ShapeInference.h"
-#include <assert.h>
-
-namespace onert
-{
-namespace exec
-{
-
-void DynamicShapeInferer::handleBinaryArithmeticOp(const ir::Operation &op,
- const ir::OperandIndex lhs_idx,
- const ir::OperandIndex rhs_idx)
-{
- auto lhs = _tensor_registry->getITensor(lhs_idx);
- auto lhs_shape = lhs->getShape();
-
- auto rhs = _tensor_registry->getITensor(rhs_idx);
- auto rhs_shape = rhs->getShape();
-
- /*
- Here, the state after compilation (satic shape inference) could be one of the following:
-
- lhs rhs output execution-time shape inf required
- ------------------------------------------ ---------------------------------
- case 1) static static static X
- case 2) one or both are dynamic dynamic O
-
- Then nnfw_apply_tensorinf() could change one or both inputs dynamic.
- So, in this method, we have one more state and we have to re-calculate shape for this shape.
-
- case 3) one or both are dynamic static O
-
- So, only when all inputs are static, we can skip dynamic shape inference.
- */
- if ((!lhs->is_dynamic()) && (!rhs->is_dynamic()))
- return;
-
- auto output_idx = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_idx);
-
- ir::Shape new_shape = shape_inference::inferEltwiseShape(lhs_shape, rhs_shape);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::handleSimpleUnaryOp(const ir::Operation &op,
- const ir::OperandIndex input_ind)
-{
- // check if input is not dynamic
- auto input = _tensor_registry->getITensor(input_ind);
- auto output_shape = input->getShape();
-
- /*
- Here, the state after compilation (satic shape inference) could be one of the following:
-
- input output execution-time shape inf required
- ------------------------- ---------------------------------
- case 1) static static X
- case 2) dynamic dynamic O
-
- Then nnfw_apply_tensorinf() could change input dynamic.
- So, in this method, we have one more state and we have to re-calculate shape for this shape.
-
- case 3) dynamic static O
-
- So, only when input is static, we can skip dynamic shape inference.
- */
- if (!input->is_dynamic())
- return;
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ArgMax &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::ArgMax::Input::INPUT)};
- const auto input = _tensor_registry->getITensor(input_idx);
-
- const auto axis_idx{op.getInputs().at(ir::operation::ArgMax::Input::AXIS)};
- const auto axis = _tensor_registry->getITensor(axis_idx);
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- if (!input->is_dynamic())
- return;
-
- auto input_shape = input->getShape();
- auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
- const auto rank = input_shape.rank();
- axis_value = axis_value < 0 ? axis_value + rank : axis_value;
-
- ir::Shape new_shape = shape_inference::inferArgMaxShape(input_shape, axis_value, rank);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::BatchMatMul &op)
-{
- const auto lhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::LHS);
- const auto rhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::RHS);
- auto lhs = _tensor_registry->getITensor(lhs_index);
- auto rhs = _tensor_registry->getITensor(rhs_index);
-
- if (!lhs->is_dynamic() && !rhs->is_dynamic())
- return;
-
- const auto output_index = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_index);
-
- auto lhs_shape = lhs->getShape();
- auto rhs_shape = rhs->getShape();
- // TODO
-
- auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param());
- output->applyShape(new_shape);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::BCQFullyConnected &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
-
- const auto cluster_idx{
- op.getInputs().at(ir::operation::BCQFullyConnected::Input::WEIGHTS_CLUSTERS)};
- const auto &cluster = _tensor_registry->getITensor(cluster_idx);
- assert(cluster->is_constant());
-
- if (!input->is_dynamic())
- return;
-
- auto input_shape = input->getShape();
- auto cluster_shape = cluster->getShape();
-
- auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
- assert(cluster_buf);
-
- ir::Shape new_shape =
- shape_inference::inferBCQFullyConnectedShape(input_shape, cluster_shape, cluster_buf);
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::BCQGather &op)
-{
- const auto indices_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
- const auto &indices = _tensor_registry->getITensor(indices_idx);
-
- const auto input_binary_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
- const auto &input_binary = _tensor_registry->getITensor(input_binary_idx);
-
- const auto cluster_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
- const auto &cluster = _tensor_registry->getITensor(cluster_idx);
- assert(cluster->is_constant());
-
- if (!indices->is_dynamic())
- return;
-
- auto indices_shape = indices->getShape();
- auto cluster_shape = cluster->getShape();
- auto rank = input_binary->getShape().rank();
-
- auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
- assert(cluster_buf);
-
- ir::Shape new_shape = shape_inference::inferBCQGatherShape(indices_shape, cluster_shape,
- cluster_buf, rank, op.param());
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::BinaryArithmetic &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::BinaryArithmetic::Input::LHS),
- op.getInputs().at(ir::operation::BinaryArithmetic::Input::RHS));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::BroadcastTo &op)
-{
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto input_idx = op.getInputs().at(ir::operation::BroadcastTo::INPUT);
- auto input = _tensor_registry->getITensor(input_idx);
-
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- auto shape_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
- const auto &shape = _tensor_registry->getITensor(shape_idx);
-
- assert(shape); // It shouldn't be 0.
-
- auto output_shape = shape_inference::inferBroadcastToShape(
- shape->getShape(), reinterpret_cast<const int32_t *>(shape->buffer()));
-
- // set output shape and output buffer
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Comparison &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::Comparison::Input::INPUT0),
- op.getInputs().at(ir::operation::Comparison::Input::INPUT1));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Concat &op)
-{
- /*
- The state after compilation (satic shape inference) could be one of the following:
-
- inputs output execution-time shape inf required
- ------------------------------------------ ---------------------------------
- case 1) all static static X
- case 2) at least on is dynamic dynamic O
-
- Then nnfw_apply_tensorinf() could change one or both inputs dynamic.
- So, in this method, we have one more state and we have to re-calculate shape for this shape.
-
- case 3) at least on is dynamic static O
-
- So, only when all inputs are static, we can skip dynamic shape inference.
- */
- bool all_static = true;
- for (auto input_ind : op.getInputs())
- {
- auto input = _tensor_registry->getITensor(input_ind);
- if (input->is_dynamic())
- {
- all_static = false;
- break;
- }
- }
-
- if (all_static)
- return;
-
- // sanity check
- {
- auto isConcatible = [](const backend::ITensor *input1, const backend::ITensor *input2,
- int32_t axis) {
- if (input1->num_dimensions() != input2->num_dimensions())
- return false;
-
- for (size_t i = 0; i < input1->num_dimensions(); i++)
- {
- auto positive_axis = (axis >= 0) ? axis : axis + input1->num_dimensions();
-
- if (i != positive_axis)
- if (input1->dimension(i) != input2->dimension(i))
- return false;
- }
-
- return true;
- };
-
- auto first_input_ind = op.getInputs().at(0);
- auto first_input = _tensor_registry->getITensor(first_input_ind);
-
- for (auto input_ind : op.getInputs())
- {
- auto input = _tensor_registry->getITensor(input_ind);
- if (input != first_input && !isConcatible(first_input, input, op.param().axis))
- throw std::runtime_error("input shapes does not matched for concat");
- }
- }
-
- // getting output shape
- onert::shape_inference::Shapes in_shapes;
- for (auto input_ind : op.getInputs())
- {
- auto input = _tensor_registry->getITensor(input_ind);
- ir::Shape shape = input->getShape();
-
- in_shapes.emplace_back(shape);
- }
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
- auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param());
-
- output->applyShape(output_shape);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Conv2D &op)
-{
- // check if input is not dynamic
- auto input_ind = op.getInputs().at(ir::operation::Conv2D::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
-
- auto ker_ind = op.getInputs().at(ir::operation::Conv2D::KERNEL);
- auto ker = _tensor_registry->getITensor(ker_ind);
-
- if ((!input->is_dynamic()) && (!ker->is_dynamic()))
- return;
-
- ir::Shape input_shape = input->getShape();
- ir::Shape ker_shape = ker->getShape();
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param());
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ElementwiseActivation &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::ElementwiseActivation::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ElementwiseBinary &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::ElementwiseBinary::Input::LHS),
- op.getInputs().at(ir::operation::ElementwiseBinary::Input::RHS));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ElementwiseUnary &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ExpandDims &op)
-{
- // check if input is not dynamic
- auto input_ind = op.getInputs().at(ir::operation::ExpandDims::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
-
- // check if output is not dynamic, meaning when 1st input is static and 2nd input is const
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- /*
- Here, the state after compilation (satic shape inference) could be one of the following:
-
- input1 input2 output execution-time shape inf required
- ----------------------------- --------------------------------
- case 1) static const static X
- case 2) static placeholder dynamic O
- case 3) dynamic const dynamic O
- case 4) dynamic placeholder dynamic O
-
- Then nnfw_apply_tensorinf() could change input dynamic.
- So, in this method, we could have one more state and we have to re-calculate shape
- for this shape.
-
- case 5) dynamic const static O
-
- So, only when input1 and ouput are static, we can skip dynamic shape inference.
- */
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- ir::Shape input_shape = input->getShape();
-
- auto axis_ind = op.getInputs().at(ir::operation::ExpandDims::AXIS);
- auto axis = _tensor_registry->getITensor(axis_ind);
- auto axis_buf = reinterpret_cast<const int32_t *>(axis->buffer());
- assert(axis_buf);
-
- auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_buf[0]);
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Fill &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
- auto input_ind = op.getInputs().at(ir::operation::Fill::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
- ir::Shape input_shape = input->getShape();
-
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- assert(input->data_type() == ir::DataType::INT32);
-
- auto input_buf = reinterpret_cast<const int32_t *>(input->buffer());
- assert(input_buf);
-
- auto output_shape = shape_inference::inferFillShape(input_shape, input_buf);
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::FullyConnected &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::FullyConnected::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
-
- const auto ker_idx{op.getInputs().at(ir::operation::FullyConnected::Input::WEIGHT)};
- const auto &ker = _tensor_registry->getITensor(ker_idx);
-
- if (!input->is_dynamic() && !ker->is_dynamic())
- return;
-
- auto input_shape = input->getShape();
- auto ker_shape = ker->getShape();
-
- ir::Shape new_shape = shape_inference::inferFullyConnectedShape(input_shape, ker_shape);
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::FusedBatchNorm &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::FusedBatchNorm::Input::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Gather &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Gather::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
- auto input_shape = input->getShape();
-
- const auto indices_idx{op.getInputs().at(ir::operation::Gather::Input::INDICES)};
- const auto &indices = _tensor_registry->getITensor(indices_idx);
- auto indices_shape = indices->getShape();
-
- if (!(input->is_dynamic()) && !(indices->is_dynamic()))
- return;
-
- const auto rank = input_shape.rank();
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
-
- assert(0 <= axis && axis < rank);
-
- ir::Shape new_shape = shape_inference::inferGatherShape(input_shape, indices_shape, axis, rank);
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::L2Normalization &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::L2Normalization::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::LSTM &op)
-{
- const auto output_index{op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
- auto output = _tensor_registry->getITensor(output_index);
-
- const auto output_state_out_index{
- op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)};
-
- const auto cell_state_out_index{op.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
-
- const auto scratch_buffer_index{op.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
-
- if (!output->is_dynamic() &&
- !(_tensor_registry->getITensor(output_state_out_index) != nullptr &&
- _tensor_registry->getITensor(output_state_out_index)->is_dynamic()) &&
- !(_tensor_registry->getITensor(cell_state_out_index) != nullptr &&
- _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()) &&
- !(_tensor_registry->getITensor(scratch_buffer_index) != nullptr &&
- _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()))
- return;
-
- const auto input_index{op.getInputs().at(ir::operation::LSTM::Input::INPUT)};
- const auto input = _tensor_registry->getITensor(input_index);
- const auto input_shape = input->getShape();
-
- const auto input_to_output_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)};
- const auto input_to_output_weights = _tensor_registry->getITensor(input_to_output_weights_index);
- const auto input_to_output_weights_shape = input_to_output_weights->getShape();
-
- const auto recurrent_to_output_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)};
- const auto recurrent_to_output_weights =
- _tensor_registry->getITensor(recurrent_to_output_weights_index);
- const auto recurrent_to_output_weights_shape = recurrent_to_output_weights->getShape();
-
- // re-sizing outputs
- const int n_batch =
- (input_shape.rank() == 3 && op.param().time_major) ? input_shape.dim(1) : input_shape.dim(0);
- const int n_cell = input_to_output_weights_shape.dim(0);
- const int n_output = recurrent_to_output_weights_shape.dim(1);
- if (input_shape.rank() == 3)
- {
- if (op.param().time_major)
- output->applyShape(ir::Shape{input_shape.dim(0), n_batch, n_output});
- else
- output->applyShape(ir::Shape{n_batch, input_shape.dim(1), n_output});
- }
- else
- {
- assert(input_shape.rank() == 2);
- output->applyShape(ir::Shape{n_batch, n_output});
- }
- assert(output->buffer() != nullptr);
-
- auto output_state_out = _tensor_registry->getITensor(output_state_out_index);
- if (output_state_out != nullptr)
- {
- output_state_out->applyShape(ir::Shape{n_batch, n_output});
- assert(output_state_out->buffer() != nullptr);
- }
-
- auto cell_state_out = _tensor_registry->getITensor(cell_state_out_index);
- if (cell_state_out != nullptr)
- {
- cell_state_out->applyShape(ir::Shape{n_batch, n_cell});
- assert(cell_state_out->buffer() != nullptr);
- }
-
- auto scratch_buffer = _tensor_registry->getITensor(scratch_buffer_index);
- if (scratch_buffer != nullptr)
- {
- const auto input_to_input_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)};
- const auto recurrent_to_input_weights_index{
- op.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)};
-
- const auto input_to_input_weights_shape =
- _tensor_registry->getITensor(input_to_input_weights_index)->getShape();
- bool has_input_to_input_weights =
- input_to_input_weights_shape.dim(0) != 0 && input_to_input_weights_shape.dim(1) != 0;
-
- const auto recurrent_to_input_weights_shape =
- _tensor_registry->getITensor(recurrent_to_input_weights_index)->getShape();
- bool has_recurrent_to_input_weights = recurrent_to_input_weights_shape.dim(0) != 0 &&
- recurrent_to_input_weights_shape.dim(1) != 0;
-
- // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
- // true: no CIFG
- // false: CIFG
- bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
- if (has_cifg_param)
- {
- scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 4});
- }
- else
- {
- scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 3});
- }
- assert(scratch_buffer->buffer() != nullptr);
- }
-}
-
-void DynamicShapeInferer::visit(const ir::operation::MatrixBandPart &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::MatrixBandPart::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::OneHot &op)
-{
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto indices_ind = op.getInputs().at(ir::operation::OneHot::INDICES);
- const auto &indices = _tensor_registry->getITensor(indices_ind);
- auto indices_shape = indices->getShape();
-
- auto depth_ind = op.getInputs().at(ir::operation::OneHot::DEPTH);
- const auto &depth = _tensor_registry->getITensor(depth_ind);
-
- if (!indices->is_dynamic() && !depth->is_dynamic())
- {
- return;
- }
-
- int32_t *depth_buf = reinterpret_cast<int32_t *>(depth->buffer());
- assert(depth_buf);
- const auto axis_val = op.param().axis;
-
- ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val);
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Pack &op)
-{
- bool is_any_of_inputs_dynamic = [&]() -> bool {
- for (uint32_t i = 0; i < op.getInputs().size(); ++i)
- {
- const auto &input = _tensor_registry->getITensor(op.getInputs().at(i));
- if (input->is_dynamic())
- {
- return true;
- }
- }
- return false;
- }();
-
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _tensor_registry->getITensor(input_idx);
- auto input_shape = input->getShape();
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- if (!is_any_of_inputs_dynamic && !output->is_dynamic())
- return;
-
- const auto rank = input_shape.rank() + 1;
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
- const auto num = op.param().num;
-
- assert(0 <= axis && axis < rank);
-
- ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Pad &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto input_ind = op.getInputs().at(ir::operation::Pad::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
-
- auto pad_ind = op.getInputs().at(ir::operation::Pad::Input::PAD);
- auto pad = _tensor_registry->getITensor(pad_ind);
-
- // check if input and output are not dynamic
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- int32_t *pad_buf = reinterpret_cast<int32_t *>(pad->buffer());
- assert(pad_buf);
-
- auto output_shape =
- shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements());
-
- // change output shape and reallocate output tensor memory
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Permute & /* op */)
-{
- // NOTE Permute is a special operation which does not do shape inference before the actual
- // function(kernel) execution. Shape inference and output allocation will be done in the kernel
- // on-the-fly, as it must support inter-backend inference/allocation.
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Pow &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::Pow::Input::LHS),
- op.getInputs().at(ir::operation::Pow::Input::RHS));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Range &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- // from op, access the buffer of second input to read new shape
- auto start_idx = op.getInputs().at(ir::operation::Range::Input::START);
- auto start_tensor = _tensor_registry->getITensor(start_idx);
-
- auto limit_idx = op.getInputs().at(ir::operation::Range::Input::LIMIT);
- auto limit_tensor = _tensor_registry->getITensor(limit_idx);
-
- auto delta_idx = op.getInputs().at(ir::operation::Range::Input::DELTA);
- auto delta_tensor = _tensor_registry->getITensor(delta_idx);
-
- if (!start_tensor->is_dynamic() && !limit_tensor->is_dynamic() && !delta_tensor->is_dynamic() &&
- !output->is_dynamic())
- return;
-
- ir::Shape new_shape;
- if (output->data_type() == ir::DataType::FLOAT32)
- {
- new_shape =
- shape_inference::inferRangeShape<float>(*reinterpret_cast<float *>(start_tensor->buffer()),
- *reinterpret_cast<float *>(limit_tensor->buffer()),
- *reinterpret_cast<float *>(delta_tensor->buffer()));
- }
- else if (output->data_type() == ir::DataType::INT32)
- {
- new_shape = shape_inference::inferRangeShape<int32_t>(
- *reinterpret_cast<int32_t *>(start_tensor->buffer()),
- *reinterpret_cast<int32_t *>(limit_tensor->buffer()),
- *reinterpret_cast<int32_t *>(delta_tensor->buffer()));
- }
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Reduce &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
- auto input_shape = input->getShape();
-
- const auto axes_idx{op.getInputs().at(ir::operation::Reduce::Input::AXES)};
- const auto &axes = _tensor_registry->getITensor(axes_idx);
-
- if (!input->is_dynamic())
- return;
-
- std::vector<int32_t> axes_vec;
- for (uint32_t i = 0; i < axes->getShape().num_elements(); ++i)
- {
- const auto buffer = axes->buffer() + axes->calcOffset({i});
- switch (axes->data_type())
- {
- case ir::DataType::INT32:
- {
- axes_vec.emplace_back(*reinterpret_cast<const int32_t *>(buffer));
- break;
- }
- case ir::DataType::INT64:
- {
- axes_vec.emplace_back(*reinterpret_cast<const int64_t *>(buffer));
- break;
- }
- default:
- throw std::runtime_error("DynamicShapeInferer " + op.name() + ": Not supported data type");
- break;
- }
- }
- const auto keep_dims = op.param().keep_dims;
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Reshape &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
-
- /*
- Here, the state after compilation (satic shape inference) could be one of the following:
-
- input1 input2 (or option) output execution-time shape inf required
- ------------------------------------ --------------------------------
- case 1) static const static X
- case 2) static placeholder dynamic O
- case 3) dynamic const dynamic O
- case 4) dynamic placeholder dynamic O
-
- Then nnfw_apply_tensorinf() could change input dynamic.
- So, in this method, we could have one more state and we have to re-calculate shape
- for this shape.
-
- case 5) dynamic const static O
-
- So, only when both input1 and ouput are static, we can skip dynamic shape inference.
- */
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- // New shape is given by second input tensor
- if (op.getInputs().size() == 2)
- {
- // from op, access the buffer of second input to read new shape
- auto new_shape_ind = op.getInputs().at(ir::operation::Reshape::Input::SHAPE);
-
- // getting output shape by reading new_shape tensor buffer
- auto new_shape = _tensor_registry->getITensor(new_shape_ind);
- assert(new_shape);
-
- int32_t *new_shape_buf = reinterpret_cast<int32_t *>(new_shape->buffer());
- assert(new_shape_buf);
-
- auto output_shape = shape_inference::inferReshapeShape(
- new_shape_buf, new_shape->getShape().num_elements(), input->getShape().num_elements());
-
- // if shape is changed, change output shape and reallocate output tensor memory
- if (output_shape != output->getShape() || output->buffer() == nullptr)
- {
- // change on output shape
- output->applyShape(output_shape);
- }
- assert(output->buffer() != nullptr);
- }
- // New shape is given by option
- else if (op.param().new_shape.size() != 0)
- {
- // Let's check the new_shape option
- auto shape = op.param().new_shape;
- auto output_shape = shape_inference::inferReshapeShape(shape.data(), shape.size(),
- input->getShape().num_elements());
-
- // if shape is changed, change output shape and reallocate output tensor memory
- if (output_shape != output->getShape() || output->buffer() == nullptr)
- {
- // change on output shape
- output->applyShape(output_shape);
- }
- assert(output->buffer() != nullptr);
- }
- else
- {
- throw std::runtime_error("Reshape: new shape is missing");
- return;
- }
-}
-
-void DynamicShapeInferer::visit(const ir::operation::ResizeBilinear &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
-
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- // getting output shape from input shape and Params
- int32_t height_out, width_out;
- if (op.getInputs().size() == 2)
- {
- auto size_ind = op.getInputs().at(ir::operation::ResizeBilinear::Input::SIZE);
- auto size = _tensor_registry->getITensor(size_ind);
- if (size->data_type() == ir::DataType::INT32)
- {
- auto size_buf = reinterpret_cast<const int32_t *>(size->buffer());
- height_out = size_buf[0];
- width_out = size_buf[1];
- }
- else
- {
- throw std::runtime_error("DynamicShapeInferer ResizeBilinear : Unsupported data type");
- }
- }
- else
- {
- height_out = op.param().height_out;
- width_out = op.param().width_out;
- }
- auto output_shape =
- shape_inference::inferResizeBilinearShape(input->getShape(), height_out, width_out);
-
- // if shape is changed, change output shape and reallocate output tensor memory
- if (output_shape != output->getShape() || output->buffer() == nullptr)
- {
- // change on output shape
- output->applyShape(output_shape);
- }
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Reverse &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Reverse::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Select &op)
-{
- const auto input_cond_idx = op.getInputs().at(ir::operation::Select::Input::CONDITION);
- const auto &input_cond = _tensor_registry->getITensor(input_cond_idx);
-
- const auto input_true_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_TRUE);
- const auto &input_true = _tensor_registry->getITensor(input_true_idx);
-
- const auto input_false_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_FALSE);
- const auto &input_false = _tensor_registry->getITensor(input_false_idx);
-
- if ((!input_cond->is_dynamic()) && (!input_true->is_dynamic()) && (!input_false->is_dynamic()))
- {
- return;
- }
-
- auto input_cond_shape = input_cond->getShape();
- auto input_true_shape = input_true->getShape();
- auto input_false_shape = input_false->getShape();
-
- // Select output shpae
- ir::Shape new_shape =
- shape_inference::inferSelectShape(input_cond_shape, input_true_shape, input_false_shape);
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Shape &op)
-{
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _tensor_registry->getITensor(input_idx);
- auto input_shape = input->getShape();
-
- if (!input->is_dynamic())
- return;
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- ir::Shape output_shape;
- output_shape.append(input_shape.rank());
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Slice &op)
-{
- const auto input_index{op.getInputs().at(ir::operation::Slice::Input::INPUT)};
- const auto input = _tensor_registry->getITensor(input_index);
- const auto begins_index{op.getInputs().at(ir::operation::Slice::Input::BEGINS)};
- const auto begins = _tensor_registry->getITensor(begins_index);
- const auto sizes_index{op.getInputs().at(ir::operation::Slice::Input::SIZES)};
- const auto sizes = _tensor_registry->getITensor(sizes_index);
- auto output_index = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_index);
-
- if (!(input->is_dynamic() || begins->is_dynamic() || sizes->is_dynamic() || output->is_dynamic()))
- {
- return;
- }
-
- ir::Shape input_shape = input->getShape();
- auto begins_buf = reinterpret_cast<const int32_t *>(begins->buffer());
- auto sizes_buf = reinterpret_cast<const int32_t *>(sizes->buffer());
-
- ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Softmax &op)
-{
- handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Softmax::INPUT));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::SpaceToBatchND &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
- auto output_idx{op.getOutputs().at(0)};
-
- const auto &input = _tensor_registry->getITensor(input_idx);
- const auto &block_shape = _tensor_registry->getITensor(block_shape_idx);
- const auto &padding = _tensor_registry->getITensor(padding_idx);
- auto output = _tensor_registry->getITensor(output_idx);
-
- if (!(input->is_dynamic() || block_shape->is_dynamic() || padding->is_dynamic() ||
- output->is_dynamic()))
- {
- return;
- }
-
- auto input_shape = input->getShape();
- auto block_shape_shape = block_shape->getShape();
- auto padding_shape = padding->getShape();
-
- auto block_shape_data = reinterpret_cast<int32_t *>(block_shape->buffer());
- auto padding_data = reinterpret_cast<int32_t *>(padding->buffer());
-
- ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
- input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Split &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Split::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
-
- // Return if all tensors are not dynamic
- bool has_dynamic = false;
- for (const auto output_idx : op.getOutputs())
- {
- auto output = _tensor_registry->getITensor(output_idx);
- has_dynamic |= output->is_dynamic();
- }
- if (!input->is_dynamic() && !has_dynamic)
- {
- return;
- }
-
- auto input_shape = input->getShape();
-
- const auto axis_idx{op.getInputs().at(ir::operation::Split::Input::AXIS)};
- const auto &axis = _tensor_registry->getITensor(axis_idx);
-
- auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
- const auto num_splits = op.param().num_splits;
- const auto rank = input_shape.rank();
- axis_value = axis_value < 0 ? axis_value + rank : axis_value;
-
- assert(0 <= axis_value && axis_value < rank);
-
- ir::Shape new_shape = shape_inference::inferSplitShape(input_shape, axis_value, num_splits);
- for (int out_tensor_idx = 0; out_tensor_idx < num_splits; out_tensor_idx++)
- {
- auto output_ind = op.getOutputs().at(out_tensor_idx);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
- }
-}
-
-void DynamicShapeInferer::visit(const ir::operation::SquaredDifference &op)
-{
- handleBinaryArithmeticOp(op, op.getInputs().at(ir::operation::SquaredDifference::Input::LHS),
- op.getInputs().at(ir::operation::SquaredDifference::Input::RHS));
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Squeeze &op)
-{
- const auto input_idx{op.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
- const auto &input = _tensor_registry->getITensor(input_idx);
-
- if (!input->is_dynamic())
- {
- return;
- }
-
- auto input_shape = input->getShape();
-
- // Squeeze output shpae
- ir::Shape new_shape = shape_inference::inferSqueezeShape(input_shape, op.param());
-
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::StridedSlice &op)
-{
-
- const auto input_index{op.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
- auto input = _tensor_registry->getITensor(input_index);
- ir::Shape input_shape = input->getShape();
-
- const auto starts_index{op.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
- auto starts = _tensor_registry->getITensor(starts_index);
-
- const auto ends_index{op.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
- auto ends = _tensor_registry->getITensor(ends_index);
-
- const auto strides_index{op.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
- auto strides = _tensor_registry->getITensor(strides_index);
-
- if (!(input->is_dynamic() || starts->is_dynamic() || ends->is_dynamic() || strides->is_dynamic()))
- {
- return;
- }
-
- const auto begin_mask = op.param().begin_mask;
- const auto end_mask = op.param().end_mask;
- const auto shrink_axis_mask = op.param().shrink_axis_mask;
- const auto rank = input_shape.rank();
-
- auto op_params = shape_inference::buildStridedSliceParams(
- reinterpret_cast<uint32_t *>(starts->buffer()), reinterpret_cast<uint32_t *>(ends->buffer()),
- reinterpret_cast<uint32_t *>(strides->buffer()), begin_mask, end_mask, shrink_axis_mask,
- rank);
-
- auto output_index = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_index);
-
- ir::Shape output_shape =
- onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank);
-
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Tile &op)
-{
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- auto input_idx = op.getInputs().at(ir::operation::Tile::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_idx);
-
- auto multiplier_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
- auto multiplier = _tensor_registry->getITensor(multiplier_idx);
-
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- auto input_shape = input->getShape();
- auto multiplier_buffer = reinterpret_cast<const int32_t *>(multiplier->buffer());
- assert(multiplier_buffer);
-
- auto output_shape =
- shape_inference::inferTileShape(input_shape, multiplier_buffer, multiplier->dimension(0));
-
- // set output shape and output buffer
- output->applyShape(output_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Transpose &op)
-{
- // check if output is not dynamic
- auto output_ind = op.getOutputs().at(0);
- auto output = _tensor_registry->getITensor(output_ind);
-
- // from op, access the buffer of second input to read new shape
- auto input_ind = op.getInputs().at(ir::operation::Transpose::Input::INPUT);
- auto input = _tensor_registry->getITensor(input_ind);
- auto input_shape = input->getShape();
-
- /*
- Here, the state after compilation (static shape inference) could be one of the following:
-
- input perms output execution-time shape inf required
- ------------------------------------ --------------------------------
- case 1) static const static X
- case 2) static non-const dynamic O
- case 3) dynamic const dynamic O
- case 4) dynamic non-const dynamic O
-
- So, only when both input1 and ouput are static, we can skip dynamic shape inference.
- */
- if ((!input->is_dynamic()) && (!output->is_dynamic()))
- return;
-
- auto perm_ind = op.getInputs().at(ir::operation::Transpose::Input::PERMUTATION);
- auto perm = _tensor_registry->getITensor(perm_ind);
-
- ir::Shape new_shape;
- // TODO Change perm->dimension(0) == 0 to perm->num_elements() == 0
- if (perm->dimension(0) == 0) // This condition means that perm is (n-1...0)
- {
- // Call by (n-1...0)
- new_shape = shape_inference::inferTransposeShape(input_shape, nullptr, 0);
- }
- else
- {
- // Check rank
- if (input->num_dimensions() != perm->getShape().num_elements())
- {
- throw std::runtime_error("DynamicShapeInferer failed, bad rank size: " +
- std::to_string(perm->getShape().num_elements()));
- }
-
- // set output shape, based on input and params
- const auto perm_buffer = reinterpret_cast<const int32_t *>(perm->buffer());
- new_shape = shape_inference::inferTransposeShape(input_shape, perm_buffer, perm->dimension(0));
- }
- output->applyShape(new_shape);
- assert(output->buffer() != nullptr);
-}
-
-void DynamicShapeInferer::visit(const ir::operation::Unpack &op)
-{
- // check if output is not dynamic
- const auto input_idx{op.getInputs().at(0)};
- const auto &input = _tensor_registry->getITensor(input_idx);
-
- if (!input->is_dynamic())
- return;
-
- auto input_shape = input->getShape();
-
- const auto rank = input_shape.rank();
- const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
- const auto num = op.param().num;
-
- assert(0 <= axis && axis < rank);
-
- ir::Shape new_shape = shape_inference::inferUnpackShape(input_shape, axis, rank);
-
- for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
- {
- auto output_ind = op.getOutputs().at(out_tensor_idx);
- auto output = _tensor_registry->getITensor(output_ind);
-
- output->applyShape(new_shape);
-
- assert(output->buffer() != nullptr);
- }
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecTime.cc b/runtime/onert/core/src/exec/ExecTime.cc
deleted file mode 100644
index 6bf2744a9..000000000
--- a/runtime/onert/core/src/exec/ExecTime.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/ExecTime.h"
-
-#include <fstream>
-#include <cassert>
-#include <limits>
-#include <algorithm>
-
-namespace onert
-{
-namespace exec
-{
-
-int64_t ExecTime::getOperationExecTime(const backend::Backend *backend,
- const std::string &operation, bool quant,
- uint32_t op_size) const
-{
- auto found_backend = _measurements.find(backend);
- if (found_backend == _measurements.end())
- return NOT_FOUND; // no execution time for this backend
-
- auto found_operation_with_type = found_backend->second.find(operation);
- if (found_operation_with_type == found_backend->second.end())
- // no execution time for this operation
- return NOT_FOUND;
-
- auto found_operation = found_operation_with_type->second.find(quant);
- if (found_operation == found_operation_with_type->second.end())
- // no execution time for this operation
- return NOT_FOUND;
-
- auto found_size = found_operation->second.find(op_size);
- if (found_size != found_operation->second.end())
- return found_size->second; // found execution time
-
- // Try to interpolate
- if (found_operation->second.size() < 2)
- // not possible to do linear interpolation
- return found_operation->second.begin()->second;
-
- // if we reach here, then this means, that there is no record, that is equal to op_size
- auto upper_bound = found_operation->second.upper_bound(op_size); // > op_size
- auto lower_bound = upper_bound;
-
- if (upper_bound == found_operation->second.end()) // all values <= op_size
- {
- upper_bound--;
- lower_bound = upper_bound;
- lower_bound--;
- }
- else if (upper_bound == found_operation->second.begin()) // all values > op_size
- {
- upper_bound++;
- }
- else // op_size between
- {
- lower_bound--;
- }
-
- // Linear interpolation
- const auto x0 = static_cast<int64_t>(lower_bound->first); // size
- const auto x1 = static_cast<int64_t>(upper_bound->first); // size
- const int64_t y0 = lower_bound->second; // time
- const int64_t y1 = upper_bound->second; // time
- const auto x = static_cast<int64_t>(op_size);
-
- int64_t interpolated_value = y0 + (x - x0) * (y1 - y0) / (x1 - x0);
-
- // In some cases ops with smaller inputs is executed slower than the one
- // with larger inputs, more likely because of a backend's load difference
- if (interpolated_value < 0 && x > x1)
- {
- return y0;
- }
- // It must be non-positive ONLY if it's lesser than both of them
- assert(interpolated_value > 0 || x < x0);
-
- // execution time must be non-negative
- return std::max<int64_t>(interpolated_value, 1);
-}
-
-void ExecTime::updateOperationExecTime(const backend::Backend *backend,
- const std::string &operation, bool quant, uint32_t op_size,
- int64_t time)
-{
- // If the op is not implemented for some input, it should not be scheduled
- const auto &recs = _measurements[backend][operation][quant];
- if (time == getMax() ||
- std::any_of(recs.begin(), recs.end(),
- [](std::pair<const uint32_t, const int64_t> p) { return p.second == getMax(); }))
- {
- _measurements[backend][operation][quant].clear();
- _measurements[backend][operation][quant].emplace(op_size, getMax());
- }
- else
- {
- auto it = _measurements[backend][operation][quant].emplace(op_size, time);
- if (!it.second)
- {
- // affect of the last measurement is bigger than the previous ones:
- // this prefers new metrics than older once, so will adapt backend changes
- it.first->second = (it.first->second + time) / 2;
- }
- }
-}
-
-void ExecTime::updatePermuteTime(const backend::Backend *from_backend,
- const backend::Backend *to_backend, bool quant, uint32_t op_size,
- int64_t time)
-{
- updateOperationExecTime(from_backend, to_backend->config()->id(), quant, op_size, time);
-}
-
-int64_t ExecTime::getPermuteTime(const backend::Backend *from_backend,
- const backend::Backend *to_backend, bool quant,
- uint32_t op_size) const
-{
- return getOperationExecTime(from_backend, to_backend->config()->id(), quant, op_size);
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecTime.h b/runtime/onert/core/src/exec/ExecTime.h
deleted file mode 100644
index 846d0930b..000000000
--- a/runtime/onert/core/src/exec/ExecTime.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_EXEC_TIME_H__
-#define __ONERT_EXEC_EXEC_TIME_H__
-
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-#include "JSONExecTime.h"
-#include <memory>
-#include <limits>
-#include <map>
-#include <unordered_map>
-#include <vector>
-
-namespace onert
-{
-namespace exec
-{
-class ExecTime
-{
-public:
- explicit ExecTime(const std::vector<const backend::Backend *> &backends)
- : _json(backends, _measurements)
- {
- }
-
-public:
- /**
- * @brief Get exec time of an operation with input size
- * or linearly interpolated value based on size if there is no record for given size
- *
- * @param[in] backend id of a backend
- * @param[in] operation name of an operation
- * @param[in] quant if input type quantized
- * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
- * @return execution time for given input sizes
- * -1 if there are no records for given parameters (backend, op, quantization).
- */
- int64_t getOperationExecTime(const backend::Backend *backend, const std::string &operation,
- bool quant, uint32_t op_size) const;
- /**
- * @brief Update exec time of the operation on a backend with given input size or
- * add new entity if there is no one.
- *
- * @param[in] backend id of a backend
- * @param[in] operation name of an operation
- * @param[in] quant if input type quantized
- * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
- * @param[in] time real measured value
- */
- void updateOperationExecTime(const backend::Backend *backend, const std::string &operation,
- bool quant, uint32_t op_size, int64_t time);
- /**
- * @brief Get the permute time from one backend to another
- *
- * @param[in] from_backend
- * @param[in] to_backend
- * @param[in] quant if input type quantized
- * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
- * @return permutation time for operation size
- */
- int64_t getPermuteTime(const backend::Backend *from_backend, const backend::Backend *to_backend,
- bool quant, uint32_t op_size) const;
- /**
- * @brief Update permute time from one backend to another
- *
- * @param[in] from_backend
- * @param[in] to_backend
- * @param[in] quant if input type quantized
- * @param[in] time measured permutation time
- * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
- */
- void updatePermuteTime(const backend::Backend *from_backend, const backend::Backend *to_backend,
- bool quant, uint32_t op_size, int64_t time);
- /**
- * @brief Get the max value of int32_t in int64_t
- * @return max value
- */
- static int64_t getMax() { return _MAX; }
- /**
- * @brief Update metrics file with new data.
- */
- void uploadOperationsExecTime() const { _json.uploadOperationsExecTime(); }
- static const int64_t NOT_FOUND = -1;
-
-private:
- /// @brief Measurement data, which is shared with serializer
- MeasurementData _measurements;
- // int64_t::max may cause integer overflow
- static const int64_t _MAX = std::numeric_limits<int32_t>::max();
- /// @brief Serializer
- JSON _json;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_EXEC_TIME_H__
diff --git a/runtime/onert/core/src/exec/Execution.cc b/runtime/onert/core/src/exec/Execution.cc
deleted file mode 100644
index 21fdd9c05..000000000
--- a/runtime/onert/core/src/exec/Execution.cc
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/Execution.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-Execution::Execution(const std::shared_ptr<ExecutorMap> &executors) : _executors{executors}
-{
- assert(executors != nullptr);
- assert(executors->at(ir::SubgraphIndex{0}) != nullptr);
- const auto &primary_subg = primary_subgraph();
- _io_desc.inputs.resize(primary_subg.getInputs().size());
- _io_desc.outputs.resize(primary_subg.getOutputs().size());
-}
-
-void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_shape)
-{
- // This will be used later to set input tensor dynamic
- // Note that 'compiled' model will not be updated with new_shape
- // but new_shape will change model input shape while 'running' the model
- _io_desc.dynamic_input_shapes[index] = new_shape;
-
- VERBOSE(Execution) << "Model input shape will be changed at the start of execute()"
- << "(index: " << index.value() << ")" << std::endl;
-}
-
-// TODO Remove default parameter
-void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
- ir::Layout layout)
-{
- const auto input_index = primary_subgraph().getInputs().at(index);
- const auto info = primary_subgraph().operands().at(input_index).info();
-
- // TODO handle when (!buffer && length != 0) : setting the input as an optional tensor
-
- // check if size enough for input is passed
- // if input_shape_sig is set, input_shape_sig overrides shape in info
- // note: input_shape_sig contains shape passed by nnfw_set_input_tensorinfo()
- {
- auto input_shape_sig = _io_desc.dynamic_input_shapes.find(index);
- auto size_required = (input_shape_sig != _io_desc.dynamic_input_shapes.end())
- ? input_shape_sig->second.num_elements() *
- onert::ir::sizeOfDataType(info.typeInfo().type())
- : info.total_size();
-
- if (length < size_required)
- {
- throw std::runtime_error{"Too small length"};
- }
- }
-
- _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
-}
-
-// TODO Remove default parameter
-void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
- const void *buffer, size_t length, ir::Layout layout)
-{
- auto info = ir::OperandInfo::createStaticInfo(shape, type);
-
- if (length < info.total_size())
- {
- throw std::runtime_error{"Too small length"};
- }
-
- _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
-}
-
-// TODO Remove default parameter
-void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
-{
- const auto output_index = primary_subgraph().getOutputs().at(index);
- const auto info = primary_subgraph().operands().at(output_index).info();
-
- if (length < info.total_size())
- {
- throw std::runtime_error{"Too small length"};
- }
-
- _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
-}
-
-// TODO Remove default parameter
-void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
- const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
-{
- auto info = ir::OperandInfo::createStaticInfo(shape, type);
-
- if (length < info.total_size())
- {
- throw std::runtime_error{"Too small length"};
- }
-
- _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
-}
-
-void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout)
-{
- const auto &input_desc = _io_desc.inputs.at(index.value());
- _io_desc.inputs.at(index.value()) =
- std::make_unique<InputDesc>(input_desc->info, input_desc->buffer, input_desc->size, layout);
-}
-
-void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout)
-{
- const auto &output_desc = _io_desc.outputs.at(index.value());
- _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(
- output_desc->info, output_desc->buffer, output_desc->size, layout);
-}
-
-void Execution::execute()
-{
- VERBOSE(Execution) << "Start execution" << std::endl;
-
- primary_executor()->execute(_io_desc);
- finished = true;
-
- VERBOSE(Execution) << "Execution finished" << std::endl;
-}
-
-void Execution::startExecute()
-{
- VERBOSE(Execution) << "Create asynchronous execution thread" << std::endl;
-
- _exec_thread = std::make_unique<std::thread>(&Execution::execute, this);
-}
-
-void Execution::waitFinish()
-{
- VERBOSE(Execution) << "Wait to finish execution" << std::endl;
-
- _exec_thread->join();
- finished = true;
-}
-
-bool Execution::isFinished(void) const { return finished; }
-
-ir::Shape Execution::getInputShape(ir::IOIndex ind) const
-{
- auto itr = _io_desc.dynamic_input_shapes.find(ind);
- if (itr == _io_desc.dynamic_input_shapes.end())
- {
- auto operand_idx = primary_subgraph().getInputs().at(ind.value());
- return primary_subgraph().operands().at(operand_idx).shape();
- }
- else
- {
- return itr->second;
- }
-}
-
-ir::Shape Execution::getOutputShape(ir::IOIndex ind) const
-{
- if (!isFinished())
- throw std::runtime_error("Cannot get output shape before execution is finished");
-
- const auto &output_desc = _io_desc.outputs.at(ind.value());
-
- return output_desc->info.shape();
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecutionObservee.cc b/runtime/onert/core/src/exec/ExecutionObservee.cc
deleted file mode 100644
index ddb1fb6a0..000000000
--- a/runtime/onert/core/src/exec/ExecutionObservee.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ExecutionObservee.h"
-
-namespace onert
-{
-namespace exec
-{
-
-void ExecutionObservee::add(std::unique_ptr<IExecutionObserver> observer)
-{
- _observers.emplace_back(std::move(observer));
-}
-
-void ExecutionObservee::notifyModelBegin(IExecutor *executor)
-{
- for (auto &o : _observers)
- {
- o->handleBegin(executor);
- }
-}
-
-void ExecutionObservee::notifyModelEnd(IExecutor *executor)
-{
- for (auto &o : _observers)
- {
- o->handleEnd(executor);
- }
-}
-
-void ExecutionObservee::notifyJobBegin(IExecutor *executor, const ir::OpSequence *op_seq,
- const backend::Backend *backend)
-{
- for (auto &o : _observers)
- {
- o->handleBegin(executor, op_seq, backend);
- }
-}
-
-void ExecutionObservee::notifyJobEnd(IExecutor *executor, const ir::OpSequence *op_seq,
- const backend::Backend *backend)
-{
- for (auto &o : _observers)
- {
- o->handleEnd(executor, op_seq, backend);
- }
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecutionObservee.h b/runtime/onert/core/src/exec/ExecutionObservee.h
deleted file mode 100644
index 49d409a3a..000000000
--- a/runtime/onert/core/src/exec/ExecutionObservee.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_EXECUTION_OBSERVEE_H__
-#define __ONERT_EXEC_EXECUTION_OBSERVEE_H__
-
-#include <list>
-
-#include "exec/ExecutionObservers.h"
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Class that
- *
- */
-class ExecutionObservee
-{
-public:
- /**
- * @brief Register an observer
- *
- * @param observer Observer to be added
- */
- void add(std::unique_ptr<IExecutionObserver> observer);
- void notifyModelBegin(IExecutor *executor);
- void notifyModelEnd(IExecutor *executor);
- void notifyJobBegin(IExecutor *executor, const ir::OpSequence *op_seq,
- const backend::Backend *backend);
- void notifyJobEnd(IExecutor *executor, const ir::OpSequence *op_seq,
- const backend::Backend *backend);
-
-private:
- std::list<std::unique_ptr<IExecutionObserver>> _observers;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_EXECUTION_OBSERVEE__
diff --git a/runtime/onert/core/src/exec/ExecutionObservers.cc b/runtime/onert/core/src/exec/ExecutionObservers.cc
deleted file mode 100644
index 5883d9a1c..000000000
--- a/runtime/onert/core/src/exec/ExecutionObservers.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/ExecutionObservers.h"
-
-#include <string>
-
-#include "util/logging.h"
-#include "exec/IExecutor.h"
-#include "misc/polymorphic_downcast.h"
-#include "ir/OpSequence.h"
-#include "util/EventWriter.h"
-
-namespace onert
-{
-
-namespace exec
-{
-
-void ProfileObserver::handleBegin(onert::exec::IExecutor *, const ir::OpSequence *,
- const onert::backend::Backend *backend)
-{
- _timer = backend->config()->timer();
- if (_timer == nullptr)
- throw std::runtime_error("To profile backend timer() method must be implemented");
- _timer->handleBegin();
-}
-
-void ProfileObserver::handleEnd(IExecutor *exec, const ir::OpSequence *op_seq,
- const backend::Backend *backend)
-{
- _timer->handleEnd();
- const auto timer_res = _timer->getTime();
-
- // NOTE This assumes there is just one operation in a op_seq
- const auto &node = _graph.operations().at(op_seq->operations().at(0));
- auto node_name = node.name();
- VERBOSE(ProfileInfo) << "Time for " << node_name << " : " << timer_res << std::endl;
-
- // fill ExecTime:
- bool is_quantized = exec->graph().operands().at(node.getInputs().at(0)).typeInfo().type() ==
- ir::DataType::QUANT_UINT8_ASYMM;
-
- uint32_t size = 0;
- for (const auto &ind : node.getInputs() + node.getOutputs())
- {
- size += exec->graph().operands().at(ind).info().total_size();
- }
- if (node_name == "Permute")
- {
- // TODO Change it to updateOperationExecTime()
- _et->updatePermuteTime(backend, backend, is_quantized, size, timer_res);
- }
- else
- {
- _et->updateOperationExecTime(backend, node_name, is_quantized, size, timer_res);
- }
-};
-
-ChromeTracingObserver::ChromeTracingObserver(const std::string &filepath, const ir::Graph &graph)
- : _base_filepath(filepath), _recorder{}, _collector{&_recorder}, _graph{graph}
-{
-}
-
-ChromeTracingObserver::~ChromeTracingObserver()
-{
- try
- {
- EventWriter{_recorder}.writeToFiles(_base_filepath);
- }
- catch (const std::exception &e)
- {
- std::cerr << "E: Fail to record event in ChromeTracingObserver: " << e.what() << std::endl;
- }
-}
-
-void ChromeTracingObserver::handleBegin(IExecutor *)
-{
- _collector.onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, "runtime", "Graph"});
-}
-
-void ChromeTracingObserver::handleBegin(IExecutor *, const ir::OpSequence *op_seq,
- const backend::Backend *backend)
-{
- std::string backend_id = backend->config()->id();
- _collector.onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, backend_id,
- opSequenceTag(op_seq, _graph.operations())});
-}
-
-void ChromeTracingObserver::handleEnd(IExecutor *, const ir::OpSequence *op_seq,
- const backend::Backend *backend)
-{
- std::string backend_id = backend->config()->id();
- _collector.onEvent(EventCollector::Event{EventCollector::Edge::END, backend_id,
- opSequenceTag(op_seq, _graph.operations())});
-}
-
-void ChromeTracingObserver::handleEnd(IExecutor *)
-{
- _collector.onEvent(EventCollector::Event{EventCollector::Edge::END, "runtime", "Graph"});
-}
-
-std::string ChromeTracingObserver::opSequenceTag(const ir::OpSequence *op_seq,
- const ir::Operations &operations)
-{
- if (op_seq->size() == 0)
- return "Empty OpSequence";
-
- const auto &first_op_idx = op_seq->operations().at(0);
- const auto &first_op_node = operations.at(first_op_idx);
- std::string tag = "$" + std::to_string(first_op_idx.value());
- tag += " " + first_op_node.name();
- if (op_seq->size() > 1)
- {
- tag += " (+" + std::to_string(op_seq->size() - 1) + ")";
- }
- return tag;
-}
-
-} // namespace exec
-
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecutionObservers.h b/runtime/onert/core/src/exec/ExecutionObservers.h
deleted file mode 100644
index f8c2acca5..000000000
--- a/runtime/onert/core/src/exec/ExecutionObservers.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_OBSREVERS_H__
-#define __ONERT_EXEC_OBSREVERS_H__
-
-#include "exec/IFunction.h"
-#include "ir/OpSequence.h"
-#include "ExecTime.h"
-#include "util/ITimer.h"
-#include "exec/IExecutor.h"
-#include "util/EventCollector.h"
-#include "util/EventRecorder.h"
-
-namespace onert
-{
-namespace exec
-{
-class IExecutionObserver
-{
-public:
- /// @brief Invoked just before model (not individual operation) execution begins
- virtual void handleBegin(IExecutor *) { return; }
-
- virtual void handleBegin(IExecutor *, const ir::OpSequence *, const backend::Backend *) = 0;
- virtual void handleEnd(IExecutor *, const ir::OpSequence *, const backend::Backend *) = 0;
-
- /// @brief Invoked just after model (not individual operation) execution ends
- virtual void handleEnd(IExecutor *) { return; }
-
- virtual ~IExecutionObserver() = default;
-};
-
-class ProfileObserver : public IExecutionObserver
-{
-public:
- explicit ProfileObserver(std::shared_ptr<ExecTime> et, const ir::Graph &graph)
- : _et(std::move(et)), _graph(graph)
- {
- }
- void handleBegin(IExecutor *, const ir::OpSequence *, const backend::Backend *) override;
- void handleEnd(IExecutor *, const ir::OpSequence *, const backend::Backend *) override;
-
- void handleEnd(IExecutor *) override { _et->uploadOperationsExecTime(); }
-
-private:
- std::unique_ptr<util::ITimer> _timer;
- std::shared_ptr<ExecTime> _et;
- const ir::Graph &_graph;
-};
-
-class ChromeTracingObserver : public IExecutionObserver
-{
-public:
- ChromeTracingObserver(const std::string &filepath, const ir::Graph &graph);
- ~ChromeTracingObserver();
- void handleBegin(IExecutor *) override;
- void handleBegin(IExecutor *, const ir::OpSequence *, const backend::Backend *) override;
- void handleEnd(IExecutor *, const ir::OpSequence *, const backend::Backend *) override;
- void handleEnd(IExecutor *) override;
-
-private:
- static std::string opSequenceTag(const ir::OpSequence *op_seq, const ir::Operations &operations);
-
-private:
- const std::string &_base_filepath;
- EventRecorder _recorder;
- EventCollector _collector;
- const ir::Graph &_graph;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_OBSREVERS_H__
diff --git a/runtime/onert/core/src/exec/ExecutorBase.cc b/runtime/onert/core/src/exec/ExecutorBase.cc
deleted file mode 100644
index 018a0bba0..000000000
--- a/runtime/onert/core/src/exec/ExecutorBase.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ExecutorBase.h"
-
-#include "backend/ITensor.h"
-#include "backend/controlflow/UserTensor.h"
-#include "backend/cpu_common/Tensor.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs)
- : _lowered_graph{std::move(lowered_graph)}, _graph{_lowered_graph->graph()},
- _input_tensors{input_tensors}, _output_tensors{output_tensors}, _mutex()
-{
- // TODO Fix the way of knowing whether it is primary or not
- bool primary_executor = !(_input_tensors.empty() && _output_tensors.empty());
- if (!primary_executor)
- {
- auto build_input_tensor_list = [&](const onert::ir::OperandIndexSequence &ind_seq) {
- std::vector<backend::ITensor *> list;
- for (auto ind : ind_seq)
- {
- backend::ITensor *tensor = tensor_regs.getITensor(ind);
- assert(tensor != nullptr);
- list.push_back(tensor);
- }
- return list;
- };
- auto build_output_tensor_list = [&](const onert::ir::OperandIndexSequence &ind_seq) {
- std::vector<backend::ITensor *> list;
- for (auto ind : ind_seq)
- {
- backend::ITensor *tensor = tensor_regs.getITensor(ind);
- assert(tensor != nullptr);
- list.push_back(tensor);
- }
- return list;
- };
- _input_tensors = build_input_tensor_list(_graph.getInputs());
- _output_tensors = build_output_tensor_list(_graph.getOutputs());
- }
-}
-
-void ExecutorBase::execute(const std::vector<backend::ITensor *> &src_tensors,
- const std::shared_ptr<IPermuteFunction> &pre_fn)
-{
- // For thread-safe, use mutex
- // TODO: if all used backends on this executor are thread-safe,
- // do not need to use mutex (otherwise, use mutex)
- // Deadlock occurs when an Executor is called recursively.
- std::lock_guard<std::mutex> lock(_mutex);
-
- assert(src_tensors.size() == _graph.getInputs().size());
- assert(src_tensors.size() == _input_tensors.size());
- for (uint32_t n = 0; n < _graph.getInputs().size(); ++n)
- {
- // when user changes input shape, the input tensor is dynamic and its memory is not allocated.
- // This code find the info to allocate dynamic tensor, and allocate memory based on the source
- // tensor's shape set by caller.
- const auto src_tensor = src_tensors[n];
- auto input_tensor = _input_tensors[n];
- // If src_tensor or input_tensor is nullptr, pre_fn does not copy the tensors
- if (src_tensor != nullptr && input_tensor != nullptr)
- {
- const auto orig_input_shape = input_tensor->getShape();
- const auto changed_input_shape =
- convertShape(src_tensor->getShape(), src_tensor->layout(), input_tensor->layout());
- if (orig_input_shape != changed_input_shape)
- {
- input_tensor->set_dynamic();
- }
- }
- }
-
- // TODO Move calling permute_fn.run() into executeImpl()
- assert(pre_fn);
- pre_fn->run();
-
- executeImpl();
-}
-
-void ExecutorBase::execute(const IODescription &desc)
-{
- // For thread-safe, use mutex
- // TODO: if all used backends on this executor are thread-safe,
- // do not need to use mutex (otherwise, use mutex)
- std::lock_guard<std::mutex> lock(_mutex);
-
- // Set input(s)
- assert(_input_tensors.size() == desc.inputs.size());
- for (uint32_t i = 0; i < _input_tensors.size(); ++i)
- {
- // TODO Remove dynamic_cast
- auto *tensor = dynamic_cast<backend::controlflow::UserTensor *>(_input_tensors[i]);
- assert(tensor);
- auto input_shape = desc.dynamic_input_shapes.find(ir::IOIndex{i});
- if (input_shape != desc.dynamic_input_shapes.end())
- {
- tensor->set_dynamic();
- tensor->setShape(input_shape->second);
- }
- // TODO Check if (desc.inputs[i] == nullptr)
- // TODO Better design for ITensor? (we need const_cast as ITensor is writable)
- tensor->setBuffer(static_cast<uint8_t *>(const_cast<void *>(desc.inputs[i]->buffer)),
- desc.inputs[i]->size);
-
- handleDynamicInputTensor(ir::IOIndex{i}, desc);
- }
-
- assert(_output_tensors.size() == desc.outputs.size());
- for (uint32_t i = 0; i < _output_tensors.size(); ++i)
- {
- // TODO Remove dynamic_cast
- auto *tensor = dynamic_cast<backend::controlflow::UserTensor *>(_output_tensors[i]);
- assert(tensor);
- tensor->set_dynamic(); // It can't be resized but shape could change
- if (desc.outputs[i] == nullptr)
- throw std::runtime_error{"Output " + std::to_string(i) + "'s buffer is not set."};
- tensor->setBuffer(static_cast<uint8_t *>(desc.outputs[i]->buffer), desc.outputs[i]->size);
- }
-
- executeImpl();
-
- // Update output(s) desc
- for (uint32_t n = 0; n < _graph.getOutputs().size(); ++n)
- {
- ir::IOIndex output_index{n};
- // Optional output
- if (desc.outputs.at(n) == nullptr)
- {
- continue;
- }
- auto &output = *desc.outputs.at(n);
-
- // set shape of outputDesc to tensor shape since tensor can be dynamic
- const auto output_tensor_shape = _output_tensors[n]->getShape();
- output.info.shape(
- convertShape(output_tensor_shape, _output_tensors[n]->layout(), output.layout));
- }
-}
-
-/**
- * @brief Changes tensor shape and allocate memory
- * if input shape was changed by nnfw_set_input_tensorinfo()
- *
- * @note Cases are:
- * 1) static operand -> nnfw_set_input_tensorinfo() -> execute() -> execute()
- * (a) (b)
- *
- * at (a), operand is static, tensor is static - memory dealloc is not needed
- * (DynamicTensorManager cannot dealloc memory allocated by StaticTensorManager)
- * at (b), operand is static, tensor is dynamic - memory dealloc is needed
- *
- * 2) dynamic operand -> nnfw_set_input_tensorinfo() -> execute() -> execute()
- * (a) (b)
- *
- * at (a), operand is dynamic, tensor is dynamic - memory dealloc is not needed
- * since it has not been allocated yet
- * at (b), operand is dynamic, tensor is dynamic - memory dealloc is needed
- */
-void ExecutorBase::handleDynamicInputTensor(ir::IOIndex io_ind, const IODescription &desc)
-{
- auto shape_sig_found = desc.dynamic_input_shapes.find(io_ind);
- if (shape_sig_found != desc.dynamic_input_shapes.end())
- {
- auto changed_input_shape = shape_sig_found->second;
- _input_tensors[io_ind.value()]->applyShape(changed_input_shape);
- }
-}
-
-bool ExecutorBase::hasDynamicInput()
-{
- for (auto &tensor : _input_tensors)
- {
- if (tensor->is_dynamic())
- return true;
- }
- return false;
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ExecutorBase.h b/runtime/onert/core/src/exec/ExecutorBase.h
deleted file mode 100644
index 8a6ec9174..000000000
--- a/runtime/onert/core/src/exec/ExecutorBase.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_EXECUTOR_BASE_H__
-#define __ONERT_EXEC_EXECUTOR_BASE_H__
-
-#include <mutex>
-
-#include "IPermuteFunction.h"
-#include "exec/ExecutionObservers.h"
-#include "ShapeConverter.h"
-#include "exec/IExecutor.h"
-#include "compiler/LoweredGraph.h"
-#include "ir/LowerInfoMap.h"
-#include "backend/IConfig.h"
-#include "backend/Backend.h"
-#include "exec/ExecTime.h"
-#include "exec/IFunction.h"
-#include "backend/IDynamicTensorManager.h"
-#include "backend/ITensorManager.h"
-#include "exec/ExecutionObservee.h"
-#include "compiler/TensorRegistries.h"
-#include <list>
-
-namespace onert
-{
-namespace exec
-{
-
-class ExecutorBase : public IExecutor
-{
-public:
- /**
- * @brief Construct a new ExecutorBase object
- * @param graph Graph object
- * @param tensor_builders Tensor builders that are currently used
- */
- ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs);
-
- virtual ~ExecutorBase() = default;
-
- const ir::Graph &graph() final { return _graph; }
-
- /**
- * @brief Execute without IODescription
- *
- * @param src_tensor Tensor list that will be copied to input tensors of this
- * @param pre_fn The permutation function that copy from src_tensor to input tensors of this
- */
- void execute(const std::vector<backend::ITensor *> &src_tensors,
- const std::shared_ptr<IPermuteFunction> &pre_fn);
-
- void execute(const IODescription &desc) final;
-
- // Used only in Dataflow and Parallel Executors
- void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>> ranks) final
- {
- _indexed_ranks = std::move(ranks);
- };
-
- virtual void executeImpl(void) = 0;
-
- void addObserver(std::unique_ptr<IExecutionObserver> ref) { _subject.add(std::move(ref)); };
-
- const std::vector<backend::ITensor *> &getInputTensors() const { return _input_tensors; }
-
- const std::vector<backend::ITensor *> &getOutputTensors() const { return _output_tensors; }
-
-protected:
- /**
- * @brief Returns @c true if any input tensor is dynamic; @c false if all are static tensors
- */
- bool hasDynamicInput();
-
-protected:
- ExecutionObservee _subject;
- std::shared_ptr<ir::OperationIndexMap<int64_t>> _indexed_ranks;
- std::unique_ptr<compiler::LoweredGraph> _lowered_graph;
- const ir::Graph &_graph;
- std::vector<backend::ITensor *> _input_tensors;
- std::vector<backend::ITensor *> _output_tensors;
- std::mutex _mutex;
-
-private:
- void handleDynamicInputTensor(ir::IOIndex input_index, const IODescription &desc);
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_EXECUTOR_BASE_H__
diff --git a/runtime/onert/core/src/exec/FunctionSequence.cc b/runtime/onert/core/src/exec/FunctionSequence.cc
deleted file mode 100644
index 8aefa5eeb..000000000
--- a/runtime/onert/core/src/exec/FunctionSequence.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/FunctionSequence.h"
-
-#include "ir/Operation.h"
-#include "backend/IDynamicTensorManager.h"
-#include "backend/ITensorRegistry.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-void FunctionSequence::run()
-{
- if (_enable_dynamic_shape_inferer && _dynamic_tensor_ctx)
- {
- // acl_cl and acl_neon backend don't support dynamic shape.
- // _dynamic_tensor_ctx is always nullptr for acl_cl and acl_neon
- // Thus, those two bakends cannot reach here.
- if (_dynamic_tensor_ctx->op_seq->size() != _functions.size())
- throw std::runtime_error("operation and functions should be mapped one by one");
-
- auto op_seq_iter = _dynamic_tensor_ctx->op_seq->begin();
- for (const auto &function : _functions)
- {
- // set shape of output and allocate memory when needed
- auto &op = _dynamic_tensor_ctx->operations->at(*op_seq_iter);
- op.accept(*_dynamic_tensor_ctx->dynamic_shape_inferer);
-
- auto *sub_func_seq = dynamic_cast<FunctionSequence *>(function.get());
- if (sub_func_seq != nullptr)
- {
- sub_func_seq->enableDynamicShapeInferer(true);
- sub_func_seq->dynamic_tensor_ctx(dynamic_tensor_ctx());
- }
-
- // run kernel
- function->run();
-
- // deallocate input tensors which is no longer used
- _dynamic_tensor_ctx->dynamic_tensor_manager->deallocInput(*op_seq_iter);
-
- op_seq_iter++;
- }
- }
- else
- {
- for (const auto &function : _functions)
- {
- function->run();
- }
- }
-}
-
-void FunctionSequence::prepare()
-{
- for (const auto &function : _functions)
- {
- function->prepare();
- }
-}
-
-void FunctionSequence::append(std::unique_ptr<IFunction> &&function)
-{
- _functions.push_back(std::move(function));
-}
-
-void FunctionSequence::iterate(const std::function<void(IFunction &)> &fn)
-{
- for (const auto &func : _functions)
- {
- fn(*func);
- }
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/IPermuteFunction.h b/runtime/onert/core/src/exec/IPermuteFunction.h
deleted file mode 100644
index 94bc2e436..000000000
--- a/runtime/onert/core/src/exec/IPermuteFunction.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_I_PERMUTE_FUNCTION_H__
-#define __ONERT_EXEC_I_PERMUTE_FUNCTION_H__
-
-#include "feature/IndexIterator.h"
-#include "feature/nchw/Reader.h"
-#include "feature/nchw/View.h"
-#include "feature/nhwc/Reader.h"
-#include "feature/nhwc/View.h"
-
-#include "backend/ITensor.h"
-#include "exec/IFunction.h"
-#include "ir/Index.h"
-#include "ir/Shape.h"
-#include <memory>
-#include <typeinfo>
-#include "util/Utils.h"
-#include <vector>
-
-namespace onert
-{
-namespace exec
-{
-
-class IPermuteFunction : public IFunction
-{
-private:
- enum class PermuteType
- {
- NHWC_TO_NCHW,
- NCHW_TO_NHWC,
- COPY
- };
-
-public:
- virtual void run() override
- {
- // TODO Optimization : Make control does not reach here? when (_src_tensors.size() == 0)
- assert(_src_tensors.size() == _dst_tensors.size());
- auto src_it = _src_tensors.begin();
- auto dst_it = _dst_tensors.begin();
- while (src_it != _src_tensors.end())
- {
- auto src_tensor = *src_it;
- auto dst_tensor = *dst_it;
- if (src_tensor != dst_tensor)
- {
- // TODO Change to permute in parallel
- assert(underlying_type(src_tensor->data_type()) ==
- underlying_type(dst_tensor->data_type()));
- const auto rank = src_tensor->num_dimensions();
- switch (src_tensor->data_type())
- {
- case ir::DataType::FLOAT32:
- permute<float>(src_tensor, dst_tensor, rank);
- break;
- case ir::DataType::INT32:
- permute<int32_t>(src_tensor, dst_tensor, rank);
- break;
- case ir::DataType::UINT32:
- permute<uint32_t>(src_tensor, dst_tensor, rank);
- break;
- case ir::DataType::BOOL8:
- case ir::DataType::QUANT_UINT8_ASYMM:
- case ir::DataType::UINT8:
- permute<uint8_t>(src_tensor, dst_tensor, rank);
- break;
- case ir::DataType::QUANT_INT8_SYMM:
- permute<int8_t>(src_tensor, dst_tensor, rank);
- break;
- case ir::DataType::INT64:
- permute<int64_t>(src_tensor, dst_tensor, rank);
- break;
- default:
- throw std::runtime_error("IPermuteFunction: Not supported data type");
- break;
- }
- }
- src_it++;
- dst_it++;
- }
- }
-
- virtual void prepare() override { optimize(); }
-
- virtual void optimize() = 0;
-
-private:
- // TODO make src const by proving const access()
- template <class T> void permute(backend::ITensor *src, backend::ITensor *dst, size_t rank)
- {
- const auto permute_type = [&]() -> PermuteType {
- if (src->layout() == ir::Layout::NHWC && dst->layout() == ir::Layout::NCHW)
- {
- return PermuteType::NHWC_TO_NCHW;
- }
- else if (src->layout() == ir::Layout::NCHW && dst->layout() == ir::Layout::NHWC)
- {
- return PermuteType::NCHW_TO_NHWC;
- }
- else
- {
- return PermuteType::COPY;
- }
- }();
- auto fn = [&](backend::ITensor &src_tensor) {
- dst->access([&](backend::ITensor &dst_tensor) {
- if (rank == 4 && permute_type != PermuteType::COPY)
- {
- switch (permute_type)
- {
- case PermuteType::NHWC_TO_NCHW:
- {
- ir::FeatureShape shape;
- shape.N = dst_tensor.dimension(0);
- shape.C = dst_tensor.dimension(1);
- shape.H = dst_tensor.dimension(2);
- shape.W = dst_tensor.dimension(3);
- const feature::nhwc::Reader<T> from(&src_tensor);
- feature::nchw::View<T> into(&dst_tensor);
- feature::iterate(shape)
- << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(batch, row, col, ch);
- into.at(batch, ch, row, col) = value;
- };
- break;
- }
- case PermuteType::NCHW_TO_NHWC:
- {
- ir::FeatureShape shape;
- shape.N = src_tensor.dimension(0);
- shape.C = src_tensor.dimension(1);
- shape.H = src_tensor.dimension(2);
- shape.W = src_tensor.dimension(3);
- const feature::nchw::Reader<T> from(&src_tensor);
- feature::nhwc::View<T> into(&dst_tensor);
- feature::iterate(shape)
- << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(batch, ch, row, col);
- into.at(batch, row, col, ch) = value;
- };
- break;
- }
- default:
- {
- throw std::runtime_error("Unsupported Permutation");
- break;
- }
- }
- }
- else if (!src_tensor.has_padding() && !dst_tensor.has_padding())
- {
- auto src_size = src_tensor.total_size();
- assert(src_size <= dst_tensor.total_size());
- memcpy(dst_tensor.buffer(), src_tensor.buffer(), src_size);
- }
- else
- {
- auto loop_shape = src_tensor.getShape();
- const auto copy_axis = loop_shape.rank() - 1;
- const auto copy_len = loop_shape.dim(copy_axis) * sizeof(T);
- loop_shape.dim(copy_axis) = 1;
- ShapeLoop(loop_shape, [&](const onert::ir::Coordinates &coords) {
- memcpy(dst_tensor.buffer() + dst_tensor.calcOffset(coords),
- src_tensor.buffer() + src_tensor.calcOffset(coords), copy_len);
- });
- }
- });
- };
- src->access(fn);
- }
-
- // NOTE The typeid expression is lvalue expression which refers to an object with static storage
- // duration, of the polymorphic type const std::type_info or of some type derived from it.
- // So std::type_info is non-copyable
- const std::type_info &underlying_type(ir::DataType type) const
- {
- switch (type)
- {
- case ir::DataType::FLOAT32:
- return typeid(float);
- case ir::DataType::INT32:
- return typeid(int32_t);
- case ir::DataType::UINT32:
- return typeid(uint32_t);
- case ir::DataType::INT64:
- return typeid(int64_t);
- case ir::DataType::BOOL8:
- case ir::DataType::QUANT_UINT8_ASYMM:
- case ir::DataType::UINT8:
- return typeid(uint8_t);
- case ir::DataType::QUANT_INT8_SYMM:
- return typeid(int8_t);
- default:
- throw std::runtime_error("IPermuteFunction: Not supported data type");
- }
- }
-
-protected:
- std::vector<backend::ITensor *> _src_tensors;
- std::vector<backend::ITensor *> _dst_tensors;
- // TODO Remove this member if it is possible
- std::vector<size_t> _ranks;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_I_PERMUTE_FUNCTION_H__
diff --git a/runtime/onert/core/src/exec/JSONExecTime.cc b/runtime/onert/core/src/exec/JSONExecTime.cc
deleted file mode 100644
index 72a18def1..000000000
--- a/runtime/onert/core/src/exec/JSONExecTime.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/JSONExecTime.h"
-#include "backend/IConfig.h"
-#include <fstream>
-
-namespace onert
-{
-namespace exec
-{
-/**
- * @brief Helper function for reading string from stream
- *
- * @param str Output string
- * @param stream File stream
- */
-void readString(std::string &str, std::ifstream &stream)
-{
- str.clear();
- char buf;
- while (stream.good())
- {
- stream.get(buf);
- if (buf == '"')
- break;
- str.push_back(buf);
- }
-}
-
-/**
- * @brief Helper function for reading bool from stream
- *
- * @param quant Output bool
- * @param stream File stream
- */
-void readBool(bool &quant, std::ifstream &stream)
-{
- char buf;
- stream.get(buf);
- quant = (buf == '1');
- stream.get(buf);
-}
-
-void printString(const std::string &str, std::ofstream &stream) { stream << "\"" << str << "\""; }
-
-void printBool(bool quant, std::ofstream &stream) { stream << "\"" << quant << "\""; }
-
-void JSON::readOperation(const std::string &backend, const std::string &operation, bool quant,
- std::ifstream &stream)
-{
- uint32_t size = 0;
- int64_t time = 0;
-
- std::string int_buf;
- char buf;
- int number_of_closed_braces = 0;
- int number_of_commas = 0;
-
- while (stream.good())
- {
- stream.get(buf);
-
- switch (buf)
- {
- case ']':
- {
- number_of_closed_braces++;
- break;
- }
- case '[':
- {
- number_of_closed_braces--;
- break;
- }
- default:
- {
- if (std::isdigit(buf))
- {
- int_buf.push_back(buf);
- }
- break;
- }
- }
-
- if (number_of_closed_braces == 1)
- break;
-
- if ((buf == ']' && number_of_closed_braces == 0) ||
- (buf == ',' && number_of_closed_braces == -1))
- {
- switch (number_of_commas % 2)
- {
- case 0:
- {
- size = static_cast<uint32_t>(std::atoi(int_buf.c_str()));
- break;
- }
- case 1:
- {
- time = static_cast<int64_t>(std::atol(int_buf.c_str()));
- auto bf = _backends.find(backend);
- if (bf != _backends.end())
- {
- _measurements[bf->second][operation][quant][size] = time;
- } // we ignore the records for unsupported backends
- break;
- }
- }
- number_of_commas++;
- int_buf.clear();
- }
- }
-}
-void JSON::printOperation(const std::map<uint32_t, int64_t> &operation_info,
- std::ofstream &stream) const
-{
- for (const auto &items : operation_info)
- {
- stream << "[" << items.first << ", " << items.second << "], ";
- }
- stream.seekp(-2, std::ofstream::end);
-}
-
-void JSON::uploadOperationsExecTime() const
-{
- std::ofstream stream(_measurement_file);
- if (!stream.is_open())
- {
- throw std::runtime_error("Failed to save backend config file");
- }
- else
- {
- stream << "{";
- for (const auto &backend : _measurements)
- {
- printString(backend.first->config()->id(), stream);
- stream << ": {";
- for (const auto &operation : backend.second)
- {
- printString(operation.first, stream);
- stream << ": {";
- for (const auto &type : operation.second)
- {
- printBool(type.first, stream);
- stream << ": [";
- printOperation(type.second, stream);
- stream << "], ";
- }
- stream.seekp(-2, std::ofstream::end);
- stream << "}, ";
- }
- stream.seekp(-2, std::ofstream::end);
- stream << "}, ";
- }
- stream.seekp(-2, std::ofstream::end);
- stream << "}";
- stream.close();
- }
-}
-
-void JSON::loadOperationsExecTime()
-{
- std::ifstream stream(_measurement_file);
- if (stream.is_open())
- {
- std::string backend;
- std::string operation;
- bool quant = false;
- char buf;
- int number_of_open_braces = 0;
-
- while (stream.good())
- {
- stream.get(buf);
- switch (buf)
- {
- case '{':
- number_of_open_braces++;
- break;
- case '}':
- number_of_open_braces--;
- break;
- case '"':
- {
- if (number_of_open_braces == 1)
- {
- // read backend string
- readString(backend, stream);
- }
- if (number_of_open_braces == 2)
- {
- // read operation string
- readString(operation, stream);
- }
- if (number_of_open_braces == 3)
- {
- // read operation string
- readBool(quant, stream);
- }
- break;
- }
- case '[':
- {
- // reading and creating all info for operation
- readOperation(backend, operation, quant, stream);
- break;
- }
- default:
- break;
- }
- }
- stream.close();
- }
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/JSONExecTime.h b/runtime/onert/core/src/exec/JSONExecTime.h
deleted file mode 100644
index a64cb3133..000000000
--- a/runtime/onert/core/src/exec/JSONExecTime.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_JSON_EXEC_TIME_H__
-#define __ONERT_EXEC_JSON_EXEC_TIME_H__
-
-#include <fstream>
-#include <unordered_map>
-#include <map>
-#include <vector>
-#include "backend/Backend.h"
-#include "backend/IConfig.h"
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief table, that contains execution time of an operation on some backend for different input
- * sizes and transfer time from one backend to another for various input sizes (permutation time)
- *
- * backend -> op -> quant-> size --> time
- * _measurements[Backend*]["string"][bool][uint32_t] = int64_t
- */
-using MeasurementData = std::unordered_map<
- const backend::Backend *,
- std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>;
-
-class JSON
-{
-public:
- explicit JSON(const std::vector<const backend::Backend *> &backends,
- MeasurementData &measurements)
- : _measurement_file("exec_time.json"), _backends(), _measurements(measurements)
- {
- for (const auto b : backends)
- {
- _backends.emplace(b->config()->id(), b);
- }
- loadOperationsExecTime();
- };
- /**
- * @brief Update _operations_exec_time_file with new data.
- */
- void uploadOperationsExecTime() const;
-
-private:
- ///@brief file containing measurements
- std::string _measurement_file;
- std::unordered_map<std::string, const backend::Backend *> _backends;
- std::unordered_map<
- const backend::Backend *,
- std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>
- &_measurements;
- /**
- * @brief Helper function for inserting data to OperationExecTimes
- *
- * @param backend String name of backend
- * @param operation String name of operation
- * @param quant if input type quantized
- * @param stream File stream
- */
- void readOperation(const std::string &backend, const std::string &operation, bool quant,
- std::ifstream &stream);
-
- /**
- * @brief Helper function for writing OperationExecTimes to stream
- *
- * @param operation_info Map of operations execution information
- * @param stream File stream
- */
- void printOperation(const std::map<uint32_t, int64_t> &operation_info,
- std::ofstream &stream) const;
- /**
- * @brief Parse and load operations_exec_time from _operations_exec_time_file.
- */
- void loadOperationsExecTime();
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_JSON_EXEC_TIME_H__
diff --git a/runtime/onert/core/src/exec/Job.cc b/runtime/onert/core/src/exec/Job.cc
deleted file mode 100644
index 27925a93c..000000000
--- a/runtime/onert/core/src/exec/Job.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Job.h"
-
-#include <cassert>
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-Job::Job(uint32_t index, FunctionSequence *fn_seq) : _index{index}, _fn_seq{fn_seq} {}
-
-void Job::run() { _fn_seq->run(); }
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/Job.h b/runtime/onert/core/src/exec/Job.h
deleted file mode 100644
index 6de9c31a0..000000000
--- a/runtime/onert/core/src/exec/Job.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_JOB_H__
-#define __ONERT_EXEC_JOB_H__
-
-#include <unordered_set>
-
-#include "exec/FunctionSequence.h"
-#include "ir/Index.h"
-#include "ir/OperandIndexSequence.h"
-#include "backend/Backend.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class Job
-{
-public:
- /**
- * @brief Constructs a Job object
- *
- * @param index Operation index for this job
- * @param fn_seq compiled code to run this job
- * @param inputs Input operand list
- * @param outputs Output operand list
- */
- Job(uint32_t index, FunctionSequence *fn_seq);
- /**
- * @brief Execute the compiled code
- */
- void run();
- /**
- * @brief Return job index
- *
- * @return Job index
- */
- uint32_t index() const { return _index; }
- /**
- * @brief Return the function to be executed
- *
- * @return Pointer of the function
- */
- FunctionSequence *fn_seq() { return _fn_seq; }
-
-private:
- uint32_t _index;
- FunctionSequence *_fn_seq;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_JOB_H__
diff --git a/runtime/onert/core/src/exec/LinearExecutor.cc b/runtime/onert/core/src/exec/LinearExecutor.cc
deleted file mode 100644
index 6e6ca110f..000000000
--- a/runtime/onert/core/src/exec/LinearExecutor.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LinearExecutor.h"
-#ifdef RUY_PROFILER
-#include "ruy/profiler/instrumentation.h"
-#endif
-
-namespace onert
-{
-namespace exec
-{
-
-#ifdef RUY_PROFILER
-namespace
-{
-char *seq_to_label(const onert::ir::OpSequence *op_seq, const onert::ir::Operations &operations)
-{
- auto node_name = operations.at(*op_seq->begin()).name();
- char *cstr = new char[node_name.length() + 1];
- std::strcpy(cstr, node_name.c_str());
- return cstr;
-}
-} // namespace
-#endif
-
-void LinearExecutor::executeImpl()
-{
- _subject.notifyModelBegin(this);
- for (auto &&code : _code)
- {
- const auto op_seq = code.op_seq;
- const auto backend = code.lower_info->backend();
-// TODO : Move ruy profiler into ExecutionObserver
-#ifdef RUY_PROFILER
- ruy::profiler::ScopeLabel label(seq_to_label(op_seq, _graph.operations()));
-#endif
- _subject.notifyJobBegin(this, op_seq, backend);
-
- auto &fn_seq = code.fn_seq;
-
- fn_seq->initRunning();
-
- bool handle_dynamic_tensor = op_seq->has_dynamic_tensor() || hasDynamicInput();
- fn_seq->enableDynamicShapeInferer(handle_dynamic_tensor);
- fn_seq->run();
-
- _subject.notifyJobEnd(this, op_seq, backend);
- }
- _subject.notifyModelEnd(this);
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/LinearExecutor.h b/runtime/onert/core/src/exec/LinearExecutor.h
deleted file mode 100644
index 22d00ec30..000000000
--- a/runtime/onert/core/src/exec/LinearExecutor.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file LinearExecutor.h
- * @brief This file contains LinearExecutor class to define and run execution phase
- */
-
-#ifndef __ONERT_EXEC_EXECUTOR_H_
-#define __ONERT_EXEC_EXECUTOR_H_
-
-#include "ir/Index.h"
-#include "ExecutorBase.h"
-#include "compiler/Linear.h"
-#include "exec/FunctionSequence.h"
-#include "compiler/CodeMap.h"
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Class to handle execution phase. Simple run the sequence of operations that is sorted in
- * topological order
- */
-class LinearExecutor final : public ExecutorBase
-{
-public:
- /**
- * @brief Construct a new LinearExecutor object
- * @param lowered_graph LoweredGraph object
- * @param tensor_builders Tensor builders that are currently used
- * @param code_map OpSequence and its code map
- */
- LinearExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs, compiler::CodeMap &&code_map,
- const std::vector<ir::OpSequenceIndex> &order)
- : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs}
- {
- for (auto index : order)
- {
- _code.emplace_back(std::move(code_map.at(index)));
- }
- }
-
-public:
- void executeImpl(void) override;
-
-private:
- std::vector<compiler::CodeAndInfo> _code;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_EXECUTOR_H_
diff --git a/runtime/onert/core/src/exec/ParallelExecutor.cc b/runtime/onert/core/src/exec/ParallelExecutor.cc
deleted file mode 100644
index 676bdb5fa..000000000
--- a/runtime/onert/core/src/exec/ParallelExecutor.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ParallelExecutor.h"
-
-#include <cassert>
-
-#include "util/logging.h"
-#include "exec/IFunction.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class HookFunction : public IFunction
-{
-public:
- HookFunction(IFunction *fn, const std::function<void()> &setup,
- const std::function<void()> &teardown)
- : _fn{fn}, _setup{setup}, _teardown{teardown}
- {
- }
-
-public:
- void run() override
- {
- _setup();
- _fn->run();
- _teardown();
- }
-
-private:
- IFunction *_fn;
- std::function<void()> _setup;
- std::function<void()> _teardown;
-};
-
-void ParallelExecutor::notify(uint32_t finished_job_id)
-{
- std::unique_lock<std::mutex> lock{_mu_jobs};
-
- DataflowExecutor::notify(finished_job_id);
-
- lock.unlock();
- _cv_jobs.notify_all();
-}
-
-ParallelExecutor::ParallelExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs,
- compiler::CodeMap &&code_map)
- : DataflowExecutor{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs,
- std::move(code_map)}
-{
- VERBOSE(ParallelExecutor) << "Constructing Parallel Executor" << std::endl;
-}
-
-void ParallelExecutor::executeImpl()
-{
- bool dynamic_input_exists = hasDynamicInput();
-
- // Init scheduler
- // TODO Consider to have distinct backend set in LowerInfoMap
- BackendSet backends;
- for (auto &itr : _lowered_graph->getLowerInfo()->op_seq)
- {
- backends.add(itr.second->backend());
- }
- _scheduler = std::make_unique<ParallelScheduler>(backends);
-
- assert(noWaitingJobs());
-
- // Execution setup
- _waiting_jobs.swap(_finished_jobs); // Move finished jobs to waiting jobs
-
- for (uint32_t i = 0; i < _waiting_jobs.size(); ++i)
- {
- VERBOSE(ParallelExecutor) << i << ": " << _input_info[i] << std::endl;
- if (_input_info[i] == 0)
- {
- emplaceToReadyJobs(i);
- }
- }
- assert(!_ready_jobs.empty()); // Cannot begin if there is no initial jobs
-
- VERBOSE(ParallelExecutor) << "INITIAL JOBS : " << _ready_jobs.size() << std::endl;
-
- _subject.notifyModelBegin(this);
- while (true)
- {
- std::unique_lock<std::mutex> lock{_mu_jobs};
-
- if (_ready_jobs.empty())
- {
- _cv_jobs.wait(lock, [this] { return !_ready_jobs.empty() || noWaitingJobs(); });
- // Check finish condition
- if (_ready_jobs.empty() && noWaitingJobs())
- {
- break;
- }
- }
-
- auto job = std::move(_ready_jobs.begin()->second);
- _ready_jobs.erase(_ready_jobs.begin());
-
- lock.unlock();
-
- VERBOSE(ParallelExecutor) << "Assigning fn #" << job->index() << std::endl;
-
- auto job_index = job->index();
- auto op_sequence_index = _job_to_op_seq[job_index];
- auto op_seq = &_lowered_graph->op_seqs().at(op_sequence_index);
- auto backend = _lowered_graph->getLowerInfo()->op_seq.at(op_sequence_index)->backend();
- auto setup = [&, op_seq, backend]() { _subject.notifyJobBegin(this, op_seq, backend); };
- auto teardown = [&, job_index, op_seq, backend]() {
- _subject.notifyJobEnd(this, op_seq, backend);
- notify(job_index);
- };
-
- job->fn_seq()->initRunning();
-
- // dynamic tensor setting
- bool handle_dynamic_tensor = op_seq->has_dynamic_tensor() || dynamic_input_exists;
- job->fn_seq()->enableDynamicShapeInferer(handle_dynamic_tensor);
-
- _scheduler->assign(std::make_unique<HookFunction>(job->fn_seq(), setup, teardown), backend);
- _finished_jobs[job_index] = std::move(job);
- }
-
- assert(noWaitingJobs());
-
- // Wait for all the jobs done
- _scheduler->finish();
- _subject.notifyModelEnd(this);
-
- // Reset input info for the next execution
- _input_info = _initial_input_info;
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ParallelExecutor.h b/runtime/onert/core/src/exec/ParallelExecutor.h
deleted file mode 100644
index 111c20c0c..000000000
--- a/runtime/onert/core/src/exec/ParallelExecutor.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_PARALLEL_EXECUTOR_H__
-#define __ONERT_EXEC_PARALLEL_EXECUTOR_H__
-
-#include <list>
-#include <queue>
-#include <unordered_map>
-
-#include "exec/FunctionSequence.h"
-#include "Job.h"
-#include "ir/OperandIndexSequence.h"
-#include "ir/Index.h"
-#include <memory>
-#include "exec/DataflowExecutor.h"
-#include "ParallelScheduler.h"
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Class to execute Graph in parallel
- */
-class ParallelExecutor : public DataflowExecutor
-{
-protected:
- void notify(uint32_t finished_job_id) override;
-
-public:
- /**
- * @brief Constructs a ParallelExecutor object
- *
- * @param lowered_graph LoweredGraph object
- * @param tensor_builders Tensor builders that are currently used
- * @param code_map OpSequence and its code map
- */
- ParallelExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
- const std::vector<backend::ITensor *> &input_tensors,
- const std::vector<backend::ITensor *> &output_tensors,
- const compiler::TensorRegistries &tensor_regs, compiler::CodeMap &&code_map);
-
- void executeImpl() override;
-
-private:
- std::condition_variable _cv_jobs;
- std::mutex _mu_jobs;
- std::unique_ptr<ParallelScheduler> _scheduler;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_PARALLEL_EXECUTOR_H__
diff --git a/runtime/onert/core/src/exec/ParallelScheduler.cc b/runtime/onert/core/src/exec/ParallelScheduler.cc
deleted file mode 100644
index 70c9c3dd6..000000000
--- a/runtime/onert/core/src/exec/ParallelScheduler.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ParallelScheduler.h"
-
-#include <cassert>
-
-#include <memory>
-#include "util/logging.h"
-
-namespace onert
-{
-namespace exec
-{
-
-ParallelScheduler::ParallelScheduler(const BackendSet &backends)
-{
- assert(!backends.empty());
-
- for (auto backend : backends)
- {
- _thread_pools[backend] = std::make_unique<ThreadPool>();
- }
-}
-
-void ParallelScheduler::assign(std::unique_ptr<IFunction> &&fn, const backend::Backend *backend)
-{
- assert(!_thread_pools.empty());
-
- _thread_pools.at(backend)->enqueue(std::move(fn));
-}
-
-void ParallelScheduler::finish()
-{
- for (auto &itr : _thread_pools)
- {
- itr.second->finish();
- }
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ParallelScheduler.h b/runtime/onert/core/src/exec/ParallelScheduler.h
deleted file mode 100644
index 6802c9e43..000000000
--- a/runtime/onert/core/src/exec/ParallelScheduler.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_PARALLEL_SCHEDULER_H__
-#define __ONERT_EXEC_PARALLEL_SCHEDULER_H__
-
-#include <unordered_map>
-#include <memory>
-
-#include "exec/IFunction.h"
-#include "BackendSet.h"
-#include "ThreadPool.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class ParallelScheduler
-{
-public:
- /**
- * @brief Constructs ParallelScheduler object
- *
- * @param backends Backend set
- */
- ParallelScheduler(const BackendSet &backends);
- /**
- * @brief Assign a task to the given backend
- *
- * @param[in] fn Function to be assigned
- * @param[in] fn Target backend
- */
- void assign(std::unique_ptr<IFunction> &&fn, const backend::Backend *backend);
- /**
- * @brief Block until all jobs are finished
- */
- void finish();
-
-private:
- std::unordered_map<const backend::Backend *, std::unique_ptr<ThreadPool>> _thread_pools;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_PARALLEL_SCHEDULER_H__
diff --git a/runtime/onert/core/src/exec/ShapeConverter.cc b/runtime/onert/core/src/exec/ShapeConverter.cc
deleted file mode 100644
index 707aef29b..000000000
--- a/runtime/onert/core/src/exec/ShapeConverter.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ShapeConverter.h"
-
-namespace onert
-{
-namespace exec
-{
-
-ir::Shape convertShape(const ir::Shape &shape, ir::Layout src_layout, ir::Layout dst_layout)
-{
- if (shape.rank() != 4)
- return shape;
-
- if (src_layout == dst_layout)
- return shape;
-
- if (src_layout == ir::Layout::NCHW && dst_layout == ir::Layout::NHWC)
- {
- const ir::Shape &src_NCHW = shape;
- ir::Shape dst_NHWC(4);
- dst_NHWC.dim(0) = src_NCHW.dim(0); // N
- dst_NHWC.dim(1) = src_NCHW.dim(2); // H
- dst_NHWC.dim(2) = src_NCHW.dim(3); // W
- dst_NHWC.dim(3) = src_NCHW.dim(1); // C
-
- return dst_NHWC;
- }
-
- if (src_layout == ir::Layout::NHWC && dst_layout == ir::Layout::NCHW)
- {
- const ir::Shape &src_NHWC = shape;
- ir::Shape dst_NCHW(4);
- dst_NCHW.dim(0) = src_NHWC.dim(0); // N
- dst_NCHW.dim(1) = src_NHWC.dim(3); // C
- dst_NCHW.dim(2) = src_NHWC.dim(1); // H
- dst_NCHW.dim(3) = src_NHWC.dim(2); // W
-
- return dst_NCHW;
- }
-
- throw std::runtime_error("Should not reach here");
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ShapeConverter.h b/runtime/onert/core/src/exec/ShapeConverter.h
deleted file mode 100644
index 7dc7e7536..000000000
--- a/runtime/onert/core/src/exec/ShapeConverter.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_SHAPE_CONVERTER_H__
-#define __ONERT_EXEC_SHAPE_CONVERTER_H__
-
-#include <ir/Layout.h>
-#include <ir/Shape.h>
-
-namespace onert
-{
-namespace exec
-{
-
-/**
- * @brief Converts shape when its rank is 4
- *
- * @return ir::Shape Return a shape based on dst_layout. If rank is not 4, input shape is
- * returned without conversion.
- */
-ir::Shape convertShape(const ir::Shape &shape, ir::Layout src_layout, ir::Layout dst_layout);
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_SHAPE_CONVERTER_H__
diff --git a/runtime/onert/core/src/exec/ThreadPool.cc b/runtime/onert/core/src/exec/ThreadPool.cc
deleted file mode 100644
index c8e0e3265..000000000
--- a/runtime/onert/core/src/exec/ThreadPool.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ThreadPool.h"
-
-#include <cassert>
-
-namespace onert
-{
-namespace exec
-{
-
-ThreadPool::ThreadPool(uint32_t num_threads)
-{
- assert(num_threads >= 1);
-
- for (uint32_t i = 0; i < num_threads; i++)
- {
- _threads.emplace_back(std::ref(_worker));
- }
-}
-
-ThreadPool::~ThreadPool()
-{
- if (!_threads.empty())
- {
- _worker.terminate();
- join();
- }
-}
-
-void ThreadPool::enqueue(std::unique_ptr<IFunction> &&fn) { _worker.enqueue(std::move(fn)); }
-
-uint32_t ThreadPool::numJobsInQueue() { return _worker.numJobsInQueue(); }
-
-void ThreadPool::join()
-{
- for (auto &thread : _threads)
- {
- thread.join();
- }
- _threads.clear();
-}
-
-void ThreadPool::finish()
-{
- _worker.finish();
- join();
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/ThreadPool.h b/runtime/onert/core/src/exec/ThreadPool.h
deleted file mode 100644
index b638bd94c..000000000
--- a/runtime/onert/core/src/exec/ThreadPool.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_THREAD_POOL_H__
-#define __ONERT_EXEC_THREAD_POOL_H__
-
-#include <thread>
-#include <memory>
-#include <vector>
-
-#include "WorkQueue.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class ThreadPool
-{
-public:
- /**
- * @brief Coustruct ThreadPool object
- *
- * @param num_threads Number of threads
- */
- ThreadPool(uint32_t num_threads = 1);
- /**
- * @brief Destroy ThreadPool object
- */
- ~ThreadPool();
- /**
- * @brief Enqueue a function
- *
- * @param fn A function to be queued
- */
- void enqueue(std::unique_ptr<IFunction> &&fn);
- /**
- * @brief Get number of jobs in worker's queue
- *
- * @return Number of jobs
- */
- uint32_t numJobsInQueue();
-
- /**
- * @brief Block until all jobs are finished
- */
- void finish();
-
-private:
- void join();
-
-private:
- WorkQueue _worker;
- std::vector<std::thread> _threads;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_THREAD_POOL_H__
diff --git a/runtime/onert/core/src/exec/WorkQueue.cc b/runtime/onert/core/src/exec/WorkQueue.cc
deleted file mode 100644
index b37f6a387..000000000
--- a/runtime/onert/core/src/exec/WorkQueue.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "WorkQueue.h"
-
-#include <cassert>
-
-namespace onert
-{
-namespace exec
-{
-
-WorkQueue::~WorkQueue()
-{
- {
- std::unique_lock<std::mutex> lock(_mu);
- _state = State::FORCE_FINISHING;
- }
- _cv.notify_all();
-}
-
-void WorkQueue::operator()()
-{
- while (true)
- {
- std::unique_ptr<IFunction> fn = nullptr;
-
- {
- std::unique_lock<std::mutex> lock{_mu};
- _cv.wait(lock, [this] {
- return (_state == State::FORCE_FINISHING) || (_state == State::FINISHING) ||
- (_state == State::ONLINE && !_functions.empty());
- });
-
- if (_state == State::FORCE_FINISHING)
- {
- assert(_functions.empty() && "Terminating with unfinished jobs");
- return;
- }
- else if (_state == State::FINISHING && _functions.empty())
- {
- return;
- }
- else
- {
- assert(((_state == State::FINISHING) || (_state == State::ONLINE)) && !_functions.empty());
- fn = std::move(_functions.front());
- _functions.pop();
- }
- }
-
- assert(fn);
- fn->run();
- }
-}
-
-void WorkQueue::enqueue(std::unique_ptr<IFunction> &&fn)
-{
- {
- std::unique_lock<std::mutex> lock{_mu};
- _functions.emplace(std::move(fn));
- }
- _cv.notify_one();
-}
-
-void WorkQueue::terminate()
-{
- {
- std::unique_lock<std::mutex> lock{_mu};
- _state = State::FORCE_FINISHING;
- }
- _cv.notify_all();
-}
-
-void WorkQueue::finish()
-{
- {
- std::unique_lock<std::mutex> lock{_mu};
- _state = State::FINISHING;
- }
- _cv.notify_all();
-}
-
-uint32_t WorkQueue::numJobsInQueue()
-{
- std::unique_lock<std::mutex> lock{_mu};
- return _functions.size();
-}
-
-} // namespace exec
-} // namespace onert
diff --git a/runtime/onert/core/src/exec/WorkQueue.h b/runtime/onert/core/src/exec/WorkQueue.h
deleted file mode 100644
index 2e56d85e8..000000000
--- a/runtime/onert/core/src/exec/WorkQueue.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_WORK_QUEUE_H__
-#define __ONERT_EXEC_WORK_QUEUE_H__
-
-#include <condition_variable>
-#include <memory>
-#include <mutex>
-#include <queue>
-
-#include "exec/IFunction.h"
-
-namespace onert
-{
-namespace exec
-{
-
-class WorkQueue
-{
-public:
- enum class State
- {
- ONLINE,
- FINISHING,
- FORCE_FINISHING
- };
-
-public:
- /**
- * @brief Create WorkQueue object
- */
- WorkQueue() = default;
- /**
- * @brief Destroy WorkQueue object
- */
- ~WorkQueue();
- /**
- * @brief Thread entry function
- */
- void operator()();
- /**
- * @brief Push the given Task to the job queue
- *
- * @param fn Function to be executed(a job)
- */
- void enqueue(std::unique_ptr<IFunction> &&fn);
- /**
- * @brief Flag as terminating so all the worker threads can terminate
- */
- void terminate();
- /**
- * @brief Flag as terminating so all the worker threads can terminate
- */
- void finish();
- /**
- * @brief Check if it has pending jobs. Even if this returns fals, WorkQueue threads may be still
- * running
- *
- * @return true if the job queue not empty otherwise false
- */
- uint32_t numJobsInQueue();
-
-private:
- State _state{State::ONLINE};
- std::queue<std::unique_ptr<IFunction>> _functions;
- std::mutex _mu;
- std::condition_variable _cv;
-};
-
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_WORK_QUEUE_H__
diff --git a/runtime/onert/core/src/exec/feature/IndexIterator.h b/runtime/onert/core/src/exec/feature/IndexIterator.h
deleted file mode 100644
index 9613f5a30..000000000
--- a/runtime/onert/core/src/exec/feature/IndexIterator.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file IndexIterator.h
- * @brief This file contains IndexIterator class
- */
-
-#ifndef __ONERT_EXEC_FEATURE_INDEX_ITERATOR_H__
-#define __ONERT_EXEC_FEATURE_INDEX_ITERATOR_H__
-
-#include "ir/Shape.h"
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-
-/**
- * @brief Class to iterate Callable with Index of feature
- */
-class IndexIterator
-{
-public:
- /**
- * @brief Construct IndexIterator object with Shape of feature
- * @param[in] shape Shape reference of feature
- */
- IndexIterator(const ir::FeatureShape &shape) : _shape{shape}
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Call a function iterated
- * @param[in] cb A callback function
- * @return Current IndexIterator object
- */
- template <typename Callable> IndexIterator &iter(Callable cb)
- {
- for (int32_t batch = 0; batch < _shape.N; ++batch)
- {
- for (int32_t ch = 0; ch < _shape.C; ++ch)
- {
- for (int32_t row = 0; row < _shape.H; ++row)
- {
- for (int32_t col = 0; col < _shape.W; ++col)
- {
- cb(batch, ch, row, col);
- }
- }
- }
- }
-
- return (*this);
- }
-
-private:
- /**
- * @brief Shape for feature
- */
- const ir::FeatureShape _shape;
-};
-
-/**
- * @brief Create an object of IndexIterator for feature
- * @param[in] Shape reference of feature
- * @return Created IndexIterator object
- */
-static inline IndexIterator iterate(const ir::FeatureShape &shape) { return IndexIterator{shape}; }
-
-/**
- * @brief Call a function iterated using IndexIterator of feature
- * Overloaded operator<<
- * @param[in] it An IndexIterator reference
- * @param[in] cb A callback function
- * @return created IndexIterator object
- */
-template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb)
-{
- return it.iter(cb);
-}
-
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_INDEX_ITERATOR_H__
diff --git a/runtime/onert/core/src/exec/feature/Reader.h b/runtime/onert/core/src/exec/feature/Reader.h
deleted file mode 100644
index ed87bb990..000000000
--- a/runtime/onert/core/src/exec/feature/Reader.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Reader.h
- * @brief This file contains Reader class
- */
-
-#ifndef __ONERT_EXEC_FEATURE_READER_H__
-#define __ONERT_EXEC_FEATURE_READER_H__
-
-#include <cstdint>
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-
-/**
- * @brief Class reads values of feature
- * The interface class
- */
-template <typename T> struct Reader
-{
- /**
- * @brief Destruct Reader object using default destructor
- */
- virtual ~Reader() = default;
-
- /**
- * @brief Get the value used by three indexes
- * @param[in] ch The depth index
- * @param[in] row The height index
- * @param[in] col The width index
- * @return The value at the offset
- */
- virtual T at(uint32_t ch, uint32_t row, uint32_t col) const = 0;
- /**
- * @brief Get the value used by four indexes
- * @param[in] batch The batch index
- * @param[in] ch The depth index
- * @param[in] row The height index
- * @param[in] col The width index
- * @return The value at the offset
- */
- virtual T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0;
-};
-
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_READER_H__
diff --git a/runtime/onert/core/src/exec/feature/nchw/Reader.h b/runtime/onert/core/src/exec/feature/nchw/Reader.h
deleted file mode 100644
index 7be9df4d5..000000000
--- a/runtime/onert/core/src/exec/feature/nchw/Reader.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_FEATURE_NCHW_READER_H__
-#define __ONERT_EXEC_FEATURE_NCHW_READER_H__
-
-#include "../Reader.h"
-
-#include <cassert>
-
-#include "backend/ITensor.h"
-#include "ir/Shape.h"
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-namespace nchw
-{
-
-template <typename T> class Reader : public feature::Reader<T>
-{
-public:
- // Construct for buffer of model inputs
- Reader(const ir::FeatureShape &shape, const T *ptr, size_t len)
- : _shape{shape}, _ptr{reinterpret_cast<const uint8_t *>(ptr)}, _len{len}
- {
- assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
-
- // No padding
- _strides.W = sizeof(T);
- _strides.H = shape.W * sizeof(T);
- _strides.C = shape.W * shape.H * sizeof(T);
- _strides.N = shape.W * shape.H * shape.C * sizeof(T);
- }
-
- // Construct for backend tensor
- Reader(backend::ITensor *tensor)
- : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
- {
- assert(tensor->layout() == ir::Layout::NCHW);
-
- const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
- _strides.W = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
- _strides.H = tensor->dimension(2) == 1 ? 0 : tensor->calcOffset({0, 0, 1, 0}) - start_offset;
- _strides.C = tensor->dimension(1) == 1 ? 0 : tensor->calcOffset({0, 1, 0, 0}) - start_offset;
- _strides.N = tensor->dimension(0) == 1 ? 0 : tensor->calcOffset({1, 0, 0, 0}) - start_offset;
-
- _shape.W = tensor->dimension(3);
- _shape.H = tensor->dimension(2);
- _shape.C = tensor->dimension(1);
- _shape.N = tensor->dimension(0);
- }
-
-public:
- T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const final
- {
- return getRef(batch, ch, row, col);
- }
- T at(uint32_t ch, uint32_t row, uint32_t col) const final { return getRef(0, ch, row, col); }
-
-protected:
- const T &getRef(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
- {
- const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
-
- const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
-
- return *ptr;
- }
-
-private:
- size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
- {
- assert(1u * _shape.N > batch); // shape.N > batch
- assert(1u * _shape.C > ch); // shape.C > ch
- assert(1u * _shape.H > row); // shape.H > row
- assert(1u * _shape.W > col); // shape.W > col
-
- uint32_t res = 0;
- res += batch * _strides.N;
- res += ch * _strides.C;
- res += row * _strides.H;
- res += col * _strides.W;
-
- return res;
- }
-
-private:
- // TODO Remove _shape
- ir::FeatureShape _shape;
- using Strides = ir::FeatureShape;
- Strides _strides;
- const uint8_t *_ptr;
- size_t _len;
-};
-
-} // namespace nchw
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_NCHW_READER_H__
diff --git a/runtime/onert/core/src/exec/feature/nchw/View.h b/runtime/onert/core/src/exec/feature/nchw/View.h
deleted file mode 100644
index dbaf1a91e..000000000
--- a/runtime/onert/core/src/exec/feature/nchw/View.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_FEATURE_NCHW_VIEW_H__
-#define __ONERT_EXEC_FEATURE_NCHW_VIEW_H__
-
-#include "Reader.h"
-
-#include "backend/ITensor.h"
-#include "ir/Shape.h"
-#include "util/logging.h"
-
-#include <cassert>
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-namespace nchw
-{
-
-template <typename T> class View final : public Reader<T>
-{
-public:
- // Construct for buffer of model inputs
- View(const ir::FeatureShape &shape, T *ptr, size_t len) : Reader<T>{shape, ptr, len}
- {
- // DO NOTHING
- }
-
- // Construct for backend tensor
- View(::onert::backend::ITensor *tensor) : Reader<T>{tensor}
- {
- // DO NOTHING
- }
-
-public:
- using Reader<T>::at;
- T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
- {
- return const_cast<T &>(Reader<T>::getRef(batch, ch, row, col));
- }
- T &at(uint32_t ch, uint32_t row, uint32_t col)
- {
- return const_cast<T &>(Reader<T>::getRef(0, ch, row, col));
- }
-};
-
-} // namespace nchw
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_NCHW_VIEW_H__
diff --git a/runtime/onert/core/src/exec/feature/nhwc/Reader.h b/runtime/onert/core/src/exec/feature/nhwc/Reader.h
deleted file mode 100644
index 7730cee72..000000000
--- a/runtime/onert/core/src/exec/feature/nhwc/Reader.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_FEATURE_NHWC_READER_H__
-#define __ONERT_EXEC_FEATURE_NHWC_READER_H__
-
-#include "../Reader.h"
-
-#include <cassert>
-
-#include "backend/ITensor.h"
-#include "ir/Shape.h"
-#include "util/Utils.h"
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-namespace nhwc
-{
-
-template <typename T> class Reader : public feature::Reader<T>
-{
-public:
- // Construct for buffer of model inputs
- Reader(const ir::FeatureShape &shape, const T *ptr, size_t len)
- : _shape{shape}, _ptr{reinterpret_cast<const uint8_t *>(ptr)}, _len{len}
- {
- UNUSED_RELEASE(len); // Workaround for unused variable in release mode
- assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
-
- // No padding
- _strides.C = sizeof(T);
- _strides.W = shape.C * sizeof(T);
- _strides.H = shape.C * shape.W * sizeof(T);
- _strides.N = shape.C * shape.W * shape.H * sizeof(T);
- }
-
- // Construct for backend tensor
- Reader(const backend::ITensor *tensor)
- : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
- {
- assert(tensor->layout() == ir::Layout::NHWC);
-
- const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
- _strides.C = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
- _strides.W = tensor->dimension(2) == 1 ? 0 : tensor->calcOffset({0, 0, 1, 0}) - start_offset;
- _strides.H = tensor->dimension(1) == 1 ? 0 : tensor->calcOffset({0, 1, 0, 0}) - start_offset;
- _strides.N = tensor->dimension(0) == 1 ? 0 : tensor->calcOffset({1, 0, 0, 0}) - start_offset;
-
- _shape.C = tensor->dimension(3);
- _shape.W = tensor->dimension(2);
- _shape.H = tensor->dimension(1);
- _shape.N = tensor->dimension(0);
- }
-
-public:
- T at(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch) const final
- {
- return getRef(batch, row, col, ch);
- }
- T at(uint32_t row, uint32_t col, uint32_t ch) const final { return getRef(0, row, col, ch); }
-
-protected:
- const T &getRef(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch) const
- {
- const auto offset = feature_index_to_byte_offset(batch, row, col, ch);
-
- const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
-
- return *ptr;
- }
-
-private:
- size_t feature_index_to_byte_offset(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch) const
- {
- assert(1u * _shape.N > batch); // shape.N > batch
- assert(1u * _shape.H > row); // shape.H > row
- assert(1u * _shape.W > col); // shape.W > col
- assert(1u * _shape.C > ch); // shape.C > ch
-
- uint32_t res = 0;
- res += batch * _strides.N;
- res += row * _strides.H;
- res += col * _strides.W;
- res += ch * _strides.C;
-
- return res;
- }
-
-private:
- // TODO Remove _shape
- ir::FeatureShape _shape;
- using Strides = ir::FeatureShape;
- Strides _strides;
- const uint8_t *_ptr;
- size_t _len;
-};
-
-} // namespace nhwc
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_NHWC_READER_H__
diff --git a/runtime/onert/core/src/exec/feature/nhwc/View.h b/runtime/onert/core/src/exec/feature/nhwc/View.h
deleted file mode 100644
index 72c8c3415..000000000
--- a/runtime/onert/core/src/exec/feature/nhwc/View.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_EXEC_FEATURE_NHWC_VIEW_H__
-#define __ONERT_EXEC_FEATURE_NHWC_VIEW_H__
-
-#include "../Reader.h"
-
-#include <cassert>
-#include <cstddef>
-
-#include "backend/ITensor.h"
-#include "ir/Shape.h"
-#include "util/Utils.h"
-
-namespace onert
-{
-namespace exec
-{
-namespace feature
-{
-namespace nhwc
-{
-
-template <typename T> class View final : public Reader<T>
-{
-public:
- // Construct for buffer of model inputs
- View(const ir::FeatureShape &shape, T *ptr, size_t len) : Reader<T>{shape, ptr, len}
- {
- // DO NOTHING
- }
-
- // Construct for backend tensor
- View(backend::ITensor *tensor) : Reader<T>{tensor}
- {
- // DO NOTHING
- }
-
-public:
- using Reader<T>::at;
- T &at(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch)
- {
- return const_cast<T &>(Reader<T>::getRef(batch, row, col, ch));
- }
- T &at(uint32_t row, uint32_t col, uint32_t ch)
- {
- return const_cast<T &>(Reader<T>::getRef(0, row, col, ch));
- }
-};
-
-} // namespace nhwc
-} // namespace feature
-} // namespace exec
-} // namespace onert
-
-#endif // __ONERT_EXEC_FEATURE_NHWC_VIEW_H__
diff --git a/runtime/onert/core/src/interp/Buffer.h b/runtime/onert/core/src/interp/Buffer.h
deleted file mode 100644
index 24938f74f..000000000
--- a/runtime/onert/core/src/interp/Buffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Buffer.h
- * @brief This file contains Buffer interface and InternalBuffer, ExternalBuffer class
- */
-#ifndef __ONERT_INTERP_BUFFER_H__
-#define __ONERT_INTERP_BUFFER_H__
-
-#include <memory>
-
-#include "ir/Data.h"
-
-namespace onert
-{
-namespace interp
-{
-
-/**
- * @brief Interface for writable data area
- */
-class Buffer : public ir::Data
-{
-public:
- /**
- * @brief Return writable pointer for data area
- * @return Writable pointer
- */
- virtual uint8_t *baseWritable(void) const = 0;
-};
-
-/**
- * @brief Class for internally allocated data area
- */
-class InternalBuffer final : public Buffer
-{
-public:
- InternalBuffer(size_t size) : _base{std::make_unique<uint8_t[]>(size)}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- size_t size(void) const override { return _size; }
- const uint8_t *base(void) const override { return _base.get(); }
- uint8_t *baseWritable(void) const override { return _base.get(); }
-
-private:
- std::unique_ptr<uint8_t[]> _base;
- size_t _size;
-};
-
-/**
- * @brief Class for data area from outside
- */
-class ExternalBuffer final : public Buffer
-{
-public:
- ExternalBuffer(uint8_t *base, size_t size) : _base{base}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- size_t size(void) const override { return _size; }
- const uint8_t *base(void) const override { return _base; }
- uint8_t *baseWritable(void) const override { return _base; }
-
-private:
- uint8_t *_base;
- size_t _size;
-};
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_BUFFER_H__
diff --git a/runtime/onert/core/src/interp/ExecEnv.h b/runtime/onert/core/src/interp/ExecEnv.h
deleted file mode 100644
index 7f577ea6e..000000000
--- a/runtime/onert/core/src/interp/ExecEnv.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file ExecEnv.h
- * @brief This file contains ExecEnv to access interpreter tensor and execution status
- */
-#ifndef __ONERT_INTERP_EXEC_ENV_H_
-#define __ONERT_INTERP_EXEC_ENV_H_
-
-#include <unordered_set>
-
-#include "ir/Graph.h"
-#include "Tensor.h"
-
-namespace onert
-{
-namespace interp
-{
-
-/**
- * @brief Class to gather interpreter execution environment
- * Each interpreter instance own execution environment
- */
-class ExecEnv
-{
-public:
- /**
- * @brief Construct a new Exec Env object (deleted)
- */
- ExecEnv(void) = delete;
- /**
- * @brief Construct a new ExecEnv object
- * @param[in] graph Graph to execute by interpreter
- */
- explicit ExecEnv(const ir::Graph &graph) : _graph(graph)
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Return graph to execute
- * @return Graph
- */
- const ir::Graph &graph(void) const { return _graph; }
- /**
- * @brief Assign tensor to environment which have allocated or assigned buffer
- * @param[in] index Tensor index
- * @param[in] tensor Tensor
- */
- void assignTensor(const ir::OperandIndex index, std::shared_ptr<ITensor> tensor)
- {
- assert(tensor->bufferRO() != nullptr);
- _tensors.emplace(index, tensor);
- }
-
- /**
- * @brief Return tensor pointer in environment
- * @param[in] index Tensor index
- * can_optional @c True if tensor can be optional input, otherwise @c false
- * @return Tensor pointer
- */
- const ITensor *tensorAt(const ir::OperandIndex index, bool can_optional = false) const
- {
- if (_tensors.find(index) == _tensors.end())
- {
- // It may optional input,
- // otherwise input is not set by runtime user
- if (can_optional)
- {
- return nullptr;
- }
-
- throw std::runtime_error{"ExecEnv: Input is not set"};
- }
-
- return _tensors.at(index).get();
- }
-
- /**
- * @brief Check environment contains tensor
- * @param[in] index Tensor index
- * @return @c true if environment contain tensor, otherwise @c false
- */
- bool contains(const ir::OperandIndex index) const
- {
- return (_tensors.find(index) != _tensors.end());
- }
-
- /**
- * @brief Allocate tensor using operand info
- * @param[in] index Tensor index
- * @param[in] info Operand info
- * @note If already allocated, just return
- * @TODO More smart allocation policy
- */
- void allocateIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info)
- {
- // already allocated, or constant
- if (contains(index))
- {
- return;
- }
-
- // Buffer from external (ex. model output)
- auto tensor = std::make_shared<Tensor>(info);
- if (isExtBuffer(index))
- {
- tensor->setBuffer(_external_buffers.at(index));
- assignTensor(index, tensor);
-
- return;
- }
-
- tensor->setBuffer(std::make_shared<InternalBuffer>(tensor->total_size()));
- assignTensor(index, tensor);
- _buffers.insert(index);
- }
-
- /**
- * @brief Allocate read-only tensor and share data with other tensor
- * @param[in] index Tensor index
- * @param[in] info Operand info
- * @param[in] index_to_share Tensor index that have data to share
- */
- void allocateAndShareIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info,
- const ir::OperandIndex index_to_share)
- {
- if (!contains(index_to_share))
- {
- throw std::runtime_error{"Cannot find tensor to share data"};
- }
-
- // already allocated
- if (contains(index))
- {
- return;
- }
-
- if (isExtBuffer(index))
- {
- auto tensor = std::make_shared<Tensor>(info);
- tensor->setBuffer(_external_buffers.at(index));
- assignTensor(index, tensor);
- }
- else
- {
- auto tensor = std::make_shared<ROTensor>(info);
- tensor->setData(tensorAt(index_to_share)->shareData());
- assignTensor(index, tensor);
- _buffers.insert(index);
- }
- }
-
- /**
- * @brief Free buffer if allocated by allocateIfNeed
- * @param[in] index Tensor index
- * @note If allocated by outside, just return
- */
- void freeIfAllocated(const ir::OperandIndex index)
- {
- if (_buffers.find(index) != _buffers.end())
- {
- _tensors.at(index)->releaseData();
- }
- }
-
- /**
- * @brief Assign ExternalBuffer into external buffer map
- * @param[in] index Tensor index
- * @param[in] buffer External buffer
- */
- void assignExternalBuffer(const ir::OperandIndex index, std::shared_ptr<ExternalBuffer> buffer)
- {
- _external_buffers.emplace(index, buffer);
- }
-
-private:
- bool isExtBuffer(const ir::OperandIndex index)
- {
- return (_external_buffers.find(index) != _external_buffers.end());
- }
-
-private:
- const ir::Graph &_graph;
- // Tensor map to use in interpreter
- // It should map tensors that have allocated or assigned buffer pointer
- std::unordered_map<ir::OperandIndex, std::shared_ptr<ITensor>> _tensors;
- // Tensors allocated by allocateIfNeed (buffer)
- std::unordered_set<ir::OperandIndex> _buffers;
- // Tensor buffer from external
- std::unordered_map<ir::OperandIndex, std::shared_ptr<ExternalBuffer>> _external_buffers;
-};
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_EXEC_ENV_H_
diff --git a/runtime/onert/core/src/interp/InterpExecutor.cc b/runtime/onert/core/src/interp/InterpExecutor.cc
deleted file mode 100644
index cd31a4dca..000000000
--- a/runtime/onert/core/src/interp/InterpExecutor.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interp/InterpExecutor.h"
-#include "interp/ExecEnv.h"
-#include "interp/Interpreter.h"
-
-#include "util/logging.h"
-
-#include <memory>
-
-namespace onert
-{
-namespace interp
-{
-
-void InterpExecutor::execute(const exec::IODescription &desc)
-{
- /************************************************************************
- * Prepare execution model (submodel)
- It may execute divided model
- but now consider model inference is done at interpreter
- ***********************************************************************/
- ir::OperandIndexMap<std::shared_ptr<ITensor>> tensor_map;
-
- for (uint32_t n = 0; n < _graph.getInputs().size(); n++)
- {
- ir::IOIndex index{n};
- const auto input_index = _graph.getInputs().at(index);
-
- const auto input = desc.inputs.at(n).get();
- if (input == nullptr)
- {
- // Optional input
- continue;
- }
-
- auto input_tensor = std::make_shared<ROTensor>(input->info);
- input_tensor->setData(std::make_shared<const ir::ExternalData>(
- reinterpret_cast<const uint8_t *>(input->buffer), input->size));
- tensor_map[input_index] = input_tensor;
- }
-
- /************************************************************************
- * Prepare execution environment
- Execution environment will be assigned to invoked interpreter instance
- ***********************************************************************/
-
- std::unique_ptr<ExecEnv> interp_env = std::make_unique<ExecEnv>(_graph);
-
- // Assign input/output tensor into interpreter execution environment
- for (auto index : _graph.getInputs())
- {
- if (tensor_map.find(index) != tensor_map.end())
- {
- VERBOSE(INTERPRETER) << "Assign input tensor. operand index:" << index.value() << std::endl;
- interp_env->assignTensor(index, tensor_map.at(index));
- }
- }
-
- for (uint32_t n = 0; n < _graph.getOutputs().size(); n++)
- {
- ir::IOIndex index{n};
- const auto output_index = _graph.getOutputs().at(index);
- const auto output = desc.outputs.at(n).get();
- if (output == nullptr)
- {
- // Optional output
- continue;
- }
-
- VERBOSE(INTERPRETER) << "Set out buffer to ExecEnv. operand index:" << output_index.value()
- << std::endl;
-
- interp_env->assignExternalBuffer(
- output_index, std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(output->buffer),
- output->size));
- }
-
- // Allocate constant tensor
- _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
- if (obj.isConstant())
- {
- VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value()
- << std::endl;
-
- assert(obj.data());
- auto const_tensor = std::make_shared<ROTensor>(obj.info());
- // Assume that interpreter's tensor layout is same with model (NHWC)
- const_tensor->setData(
- std::make_shared<ir::ExternalData>(obj.data()->base(), obj.info().total_size()));
- interp_env->assignTensor(ind, const_tensor);
- }
- });
-
- /*****************************************************************************
- * Invoke interpreter
- ****************************************************************************/
-
- interp::Interpreter interp(std::move(interp_env));
- interp.run();
-
- /*****************************************************************************
- * Invoked interpreter run is finished
- ****************************************************************************/
-
- // If interpreter execute submodel
- // 1. Get tensor output of submodel into tensor_map to save result
- // 2. Generate new ExecEnv for next interpretation
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/InterpExecutor.h b/runtime/onert/core/src/interp/InterpExecutor.h
deleted file mode 100644
index 2e3f3ca54..000000000
--- a/runtime/onert/core/src/interp/InterpExecutor.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file InterpExecutor.h
- * @brief This file contains InterpExecutor class\n
- * to manage interpreter execution and environment
- */
-#ifndef __ONERT_INTERP_INTERP_EXECUTOR_H__
-#define __ONERT_INTERP_INTERP_EXECUTOR_H__
-
-#include "ir/OperandIndexMap.h"
-#include "ir/Graph.h"
-#include "exec/IExecutor.h"
-
-namespace onert
-{
-namespace interp
-{
-
-class ITensor;
-
-/**
- * @brief Class to execute model using interpreter
- */
-class InterpExecutor final : public exec::IExecutor
-{
-public:
- explicit InterpExecutor(const ir::Graph &graph) : _graph(graph)
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Return graph object
- * @return Graph object
- */
- const ir::Graph &graph() final { return _graph; }
- void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>>) override{
- // Not implemented
- };
- /**
- * @brief Start execution
- * @note It should be called after setting input and output buffer
- */
- void execute(const exec::IODescription &desc) final;
-
-private:
- const ir::Graph &_graph;
- ir::OperandIndexMap<std::shared_ptr<ITensor>> _tensor_map;
-};
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_INTERP_EXECUTOR_H__
diff --git a/runtime/onert/core/src/interp/InterpOps.lst b/runtime/onert/core/src/interp/InterpOps.lst
deleted file mode 100644
index 0714df38a..000000000
--- a/runtime/onert/core/src/interp/InterpOps.lst
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef INTERP_OP
-#error Define INTERP_OP before including this file
-#endif
-
-// Supported operation name in interpreter
-//
-// Same list with Operations.lst
-// Make comment out if operation is not supported in interpreter
-INTERP_OP(BinaryArithmetic)
-//INTERP_OP(BatchToSpaceND)
-//INTERP_OP(Cast)
-INTERP_OP(Conv2D)
-INTERP_OP(DepthwiseConv2D)
-INTERP_OP(Pool2D)
-INTERP_OP(Concat)
-INTERP_OP(FullyConnected)
-//INTERP_OP(Reduce)
-INTERP_OP(Reshape)
-INTERP_OP(Softmax)
-//INTERP_OP(Squeeze)
-//INTERP_OP(Slice)
-//INTERP_OP(StridedSlice)
-INTERP_OP(ElementwiseActivation)
-//INTERP_OP(Transpose)
-//INTERP_OP(Exp)
-//INTERP_OP(Comparison)
-//INTERP_OP(LogicalNot)
-//INTERP_OP(LSTM)
-//INTERP_OP(RSQRT)
-//INTERP_OP(ResizeBilinear)
-//INTERP_OP(RNN)
-//INTERP_OP(Floor)
-//INTERP_OP(SpaceToBatchND)
-//INTERP_OP(SpaceToDepth)
-//INTERP_OP(EmbeddingLookup)
-//INTERP_OP(L2Normalization)
-//INTERP_OP(HashtableLookup)
-INTERP_OP(InstanceNorm)
-//INTERP_OP(PReLU)
-INTERP_OP(TransposeConv)
-//INTERP_OP(SQRT)
-//INTERP_OP(SquaredDifference)
-//INTERP_OP(TopKV2)
-INTERP_OP(Gather)
-//INTERP_OP(Neg)
-//INTERP_OP(Abs)
-//INTERP_OP(ArgMax)
-//INTERP_OP(Dequantize)
-//INTERP_OP(LocalResponseNormalization)
-//INTERP_OP(DepthToSpace)
-//INTERP_OP(Pack)
-//INTERP_OP(Split)
-//INTERP_OP(Unpack)
-INTERP_OP(Pad)
-//INTERP_OP(Custom)
-//INTERP_OP(Permute)
-//INTERP_OP(OneHot)
diff --git a/runtime/onert/core/src/interp/Interpreter.cc b/runtime/onert/core/src/interp/Interpreter.cc
deleted file mode 100644
index b92afbe73..000000000
--- a/runtime/onert/core/src/interp/Interpreter.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Interpreter.h"
-
-#include <stack>
-#include <unordered_set>
-
-#include "Registration.h"
-
-#include "ir/OperandIndexMap.h"
-#include "util/logging.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace interp
-{
-
-// TODO more structured execution kernel implementation
-// TODO use cker for execution
-// TODO divide tensor prepare and execution
-// TODO introduce memory manager (buffer allocate and free)
-class OperationExecutor
-{
-public:
- OperationExecutor(ExecEnv *env) : _env{env}
- {
-#define INTERP_OP(InternalName) _kernels[ir::OpCode::InternalName] = get##InternalName();
-#include "InterpOps.lst"
-#undef INTERP_OP
- }
-
- void execute(const ir::OperationIndex &idx)
- {
- const ir::Operation &node = _env->graph().operations().at(idx);
- const auto nodeName = node.name();
- VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName
- << " operation (id: " << idx.value() << ")" << std::endl;
-
- const auto nodeOpCode = node.opcode();
- if (_kernels.find(nodeOpCode) == _kernels.end())
- {
- throw std::runtime_error{"Interpreter: Operation " + nodeName + " is not yet implemented"};
- }
-
- if (_kernels[nodeOpCode]->prepare != nullptr)
- {
- _kernels[nodeOpCode]->prepare(_env, node);
- }
- _kernels[nodeOpCode]->invoke(_env, node);
- }
-
-private:
- ExecEnv *_env;
- std::unordered_map<ir::OpCode, OpKernel *> _kernels;
-};
-
-void Interpreter::run()
-{
- VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl;
-
- // operand_stack: save operands prepared to use
- std::stack<ir::OperandIndex> operand_stack;
-
- // Note: We should push input first, then constant.
- // We use use-def for find operators ready to execution,
- // but Use-Def cannot handle parameters (maybe constant, but not always)
- // Note: If all model inputs are constant, it may not work (depend on tensors' order).
- // But that scenario may not exist
- for (auto ind : _env->graph().getInputs())
- {
- VERBOSE(INTERPRETER) << "Input: Push to operand stack " << ind.value() << std::endl;
-
- operand_stack.push(ind);
- }
-
- _env->graph().operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
- if (obj.isConstant())
- {
- VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
-
- operand_stack.push(ind);
- }
- });
-
- // Execution
- std::unordered_set<ir::OperandIndex> ready_check;
- std::unordered_set<ir::OperationIndex> executed;
- OperationExecutor executor{_env.get()};
- while (!operand_stack.empty())
- {
- const auto current_operand_index = operand_stack.top();
- operand_stack.pop();
- VERBOSE(INTERPRETER) << "Poped operand " << current_operand_index.value()
- << " is checked ready to use" << std::endl;
-
- assert(ready_check.find(current_operand_index) == ready_check.end());
- ready_check.insert(current_operand_index);
-
- // Find prepared operations by scan use of current operand
- std::stack<ir::OperationIndex> operation_stack;
- const auto use_operators = _env->graph().operands().at(current_operand_index).getUses();
- for (const auto &use_operator : use_operators)
- {
- // Assumption: all parameters are ready to use
- bool operator_ready = true;
- for (auto input_index : _env->graph().operations().at(use_operator).getInputs())
- {
- if (ready_check.find(input_index) == ready_check.end())
- {
- operator_ready = false;
- break;
- }
- }
-
- if (operator_ready)
- {
- VERBOSE(INTERPRETER) << "Ready to execute operation " << use_operator.value() << std::endl;
- operation_stack.push(use_operator);
- }
- }
-
- while (!operation_stack.empty())
- {
- const auto current_operation_index = operation_stack.top();
- operation_stack.pop();
- VERBOSE(INTERPRETER) << "Poped operation: " << current_operation_index.value() << "("
- << _env->graph().operations().at(current_operation_index).name() << ")"
- << std::endl;
-
- // execution
- // 1. Prepare output tensor
- // 2. Call operation kernel
- executor.execute(current_operation_index);
- executed.insert(current_operation_index);
-
- // 3. Push each output into operand stack
- const auto def_operands = _env->graph().operations().at(current_operation_index).getOutputs();
- for (auto def_operand : def_operands)
- {
- VERBOSE(INTERPRETER) << "Buffer: Push to operand stack " << def_operand.value()
- << std::endl;
- operand_stack.push(def_operand);
- }
-
- // 4. Free if lifetime of buffer operands used by input is finished
- for (auto input_index : _env->graph().operations().at(current_operation_index).getInputs())
- {
- const auto use_operators = _env->graph().operands().at(input_index).getUses();
- bool dead_buffer = true;
- for (const auto &use_operator : use_operators)
- {
- if (executed.find(use_operator) == executed.end())
- {
- dead_buffer = false;
- break;
- }
- }
-
- if (dead_buffer)
- {
- _env->freeIfAllocated(input_index);
- }
- }
- }
- }
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/Interpreter.h b/runtime/onert/core/src/interp/Interpreter.h
deleted file mode 100644
index d2165f538..000000000
--- a/runtime/onert/core/src/interp/Interpreter.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Interpreter.h
- * @brief This file contains Interpreter class for interpretation
- */
-#ifndef __ONERT_INTERP_INTERPRETER_H__
-#define __ONERT_INTERP_INTERPRETER_H__
-
-#include "ExecEnv.h"
-
-namespace onert
-{
-namespace interp
-{
-
-/**
- * @brief Class for interpretation
- */
-class Interpreter
-{
-
-public:
- /**
- * @brief Construct a new Interpreter object (deleted)
- */
- Interpreter() = delete;
- /**
- * @brief Construct a new Interpreter object
- * @param[in] env Execution environment variable for interpreter object
- */
- Interpreter(std::unique_ptr<ExecEnv> env) : _env{std::move(env)}
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Run interpreter until there is no operation to execute
- */
- void run();
-
-private:
- std::unique_ptr<ExecEnv> _env;
-};
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_INTERPRETER_H__
diff --git a/runtime/onert/core/src/interp/Registration.h b/runtime/onert/core/src/interp/Registration.h
deleted file mode 100644
index 956b92a53..000000000
--- a/runtime/onert/core/src/interp/Registration.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_INTERP_REGISTRATION_H__
-#define __ONERT_INTERP_REGISTRATION_H__
-
-#include "ExecEnv.h"
-
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace interp
-{
-
-struct OpKernel
-{
- std::function<void(ExecEnv *, const ir::Operation &)> prepare;
- std::function<void(const ExecEnv *, const ir::Operation &)> invoke;
-};
-
-// Defined in operations/ directory
-#define INTERP_OP(InternalName) OpKernel *get##InternalName();
-#include "InterpOps.lst"
-#undef INTERP_OP
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_REGISTRATION_H__
diff --git a/runtime/onert/core/src/interp/Tensor.cc b/runtime/onert/core/src/interp/Tensor.cc
deleted file mode 100644
index 07f8b75dc..000000000
--- a/runtime/onert/core/src/interp/Tensor.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Tensor.h"
-
-#define NO_USE(a) (void)(a)
-
-namespace onert
-{
-namespace interp
-{
-
-void ITensor::access(const std::function<void(backend::ITensor &tensor)> &fn) { fn(*this); }
-
-size_t ROTensor::calcOffset(const ir::Coordinates &coords) const
-{
- NO_USE(coords);
- throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
-}
-
-size_t Tensor::calcOffset(const ir::Coordinates &coords) const
-{
- NO_USE(coords);
- throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
-}
-
-ir::Layout ROTensor::layout() const
-{
- // TODO Changes to return frontend layout
- return ir::Layout::NHWC;
-}
-
-ir::Layout Tensor::layout() const
-{
- // TODO Changes to return frontend layout
- return ir::Layout::NHWC;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/Tensor.h b/runtime/onert/core/src/interp/Tensor.h
deleted file mode 100644
index 8b72d537d..000000000
--- a/runtime/onert/core/src/interp/Tensor.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Tensor.h
- * @brief This file contains ITensor interface, ROTensor class, and Tensor class
- */
-#ifndef __ONERT_INTERP_TENSOR_H__
-#define __ONERT_INTERP_TENSOR_H__
-
-#include "Buffer.h"
-
-#include "ir/OperandInfo.h"
-#include "backend/ITensor.h"
-#include "ir/Layout.h"
-
-namespace onert
-{
-namespace interp
-{
-
-/**
- * @brief Interface to handle Tensor in interpreter
- */
-class ITensor : public backend::ITensor
-{
-public:
- virtual ~ITensor() = default;
-
-public:
- virtual uint8_t *buffer() const = 0;
- /**
- * @brief Return shared pointer for buffer
- * @return Buffer shared pointer
- */
- virtual std::shared_ptr<const Buffer> shareBuffer() const = 0;
- /**
- * @brief Return read-only buffer pointer
- * @return Read-only buffer pointer
- */
- virtual const uint8_t *bufferRO() const = 0;
- /**
- * @brief Return shared pointer for data
- * @return Data shared pointer
- */
- virtual std::shared_ptr<const ir::Data> shareData() const = 0;
- /**
- * @brief Set internal/external buffer
- * @param[in] buffer Buffer pointer
- */
- virtual void setBuffer(std::shared_ptr<const Buffer> buffer) = 0;
- /**
- * @brief Set data reference (including constant, input)
- * @param[in] data Data pointer
- */
- virtual void setData(std::shared_ptr<const ir::Data> data) = 0;
- virtual void releaseData() = 0;
-
- virtual size_t total_size() const = 0;
- virtual size_t dimension(size_t index) const = 0;
- virtual size_t num_dimensions() const = 0;
- virtual size_t calcOffset(const ir::Coordinates &coords) const = 0;
-
- virtual bool has_padding() const = 0;
- /**
- * @brief Return data type of tensor
- * @return Data type of tensor
- */
- virtual ir::DataType data_type() const = 0;
- /**
- * @brief Return TensorInfo
- * @return TensorInfo
- */
- virtual const ir::OperandInfo &tensorInfo() const = 0;
- /**
- * @brief Return number of elements
- * @return Number of elements
- */
- virtual uint64_t num_elements() const = 0;
- void access(const std::function<void(backend::ITensor &tensor)> &fn) final;
-};
-
-/**
- * @brief Class to handle tensor in interpreter as read-only
- */
-class ROTensor final : public ITensor
-{
-public:
- ROTensor() = delete;
- ROTensor(const ir::OperandInfo &info) : _info(info)
- {
- // DO NOTHING
- }
-
-public:
- uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; }
- std::shared_ptr<const Buffer> shareBuffer() const override
- {
- throw std::runtime_error{"Read only tensor"};
- }
- const uint8_t *bufferRO() const override { return _data->base(); }
- std::shared_ptr<const ir::Data> shareData() const override { return _data; }
- void setBuffer(std::shared_ptr<const Buffer> buffer) override { _data = buffer; }
- void setData(std::shared_ptr<const ir::Data> data) override { _data = data; }
- void releaseData() override { _data = nullptr; }
-
- size_t total_size() const override { return _info.total_size(); }
- size_t dimension(size_t index) const override { return _info.shape().dim(index); }
- size_t num_dimensions() const override { return _info.shape().rank(); }
- size_t calcOffset(const ir::Coordinates &coords) const override;
- ir::Layout layout() const override;
- bool is_dynamic() const override { return false; }
- bool has_padding() const override { return false; }
- ir::DataType data_type() const override { return _info.typeInfo().type(); }
- float data_scale() const override { return _info.typeInfo().scale(); }
- int32_t data_offset() const override { return _info.typeInfo().offset(); }
- const ir::OperandInfo &tensorInfo() const override { return _info; }
- uint64_t num_elements() const override { return _info.shape().num_elements(); };
-
-private:
- const ir::OperandInfo _info;
- std::shared_ptr<const ir::Data> _data{nullptr};
-};
-
-/**
- * @brief Class to handle tensor in interpreter as writable
- */
-class Tensor final : public ITensor
-{
-public:
- Tensor() = delete;
- Tensor(const ir::OperandInfo &info) : _info(info)
- {
- // DO NOTHING
- }
-
-public:
- uint8_t *buffer() const override { return _buffer->baseWritable(); }
- std::shared_ptr<const Buffer> shareBuffer() const override { return _buffer; };
- const uint8_t *bufferRO() const override { return _buffer->base(); }
- std::shared_ptr<const ir::Data> shareData() const override { return _buffer; }
- void setBuffer(std::shared_ptr<const Buffer> buffer) override { _buffer = buffer; }
- void setData(std::shared_ptr<const ir::Data>) override
- {
- throw std::runtime_error{"Passed data may read-only"};
- }
- void releaseData() override { _buffer = nullptr; }
-
- size_t total_size() const override { return _info.total_size(); }
- size_t dimension(size_t index) const override { return _info.shape().dim(index); }
- size_t num_dimensions() const override { return _info.shape().rank(); }
- size_t calcOffset(const ir::Coordinates &coords) const override;
- ir::Layout layout() const override;
- bool is_dynamic() const override { return false; }
- bool has_padding() const override { return false; }
- ir::DataType data_type() const override { return _info.typeInfo().type(); }
- float data_scale() const override { return _info.typeInfo().scale(); }
- int32_t data_offset() const override { return _info.typeInfo().offset(); }
- const ir::OperandInfo &tensorInfo() const override { return _info; }
- uint64_t num_elements() const override { return _info.shape().num_elements(); };
-
-private:
- const ir::OperandInfo _info;
- std::shared_ptr<const Buffer> _buffer{nullptr};
-};
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_TENSOR_H__
diff --git a/runtime/onert/core/src/interp/operations/BinaryArithmeticOps.cc b/runtime/onert/core/src/interp/operations/BinaryArithmeticOps.cc
deleted file mode 100644
index 86e883524..000000000
--- a/runtime/onert/core/src/interp/operations/BinaryArithmeticOps.cc
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/BinaryArithmeticOps.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/BinaryArithmetic.h"
-#include "misc/polymorphic_downcast.h"
-#include "cker/Types.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-enum class OpType
-{
- ADD,
- SUB,
- MUL
-};
-
-void prepare(ExecEnv *env, const ir::Operation &node)
-{
- const auto &arithmetic_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::BinaryArithmetic &>(node);
-
- const auto lhs_index = node.getInputs().at(arithmetic_node.LHS);
- const auto rhs_index = node.getInputs().at(arithmetic_node.RHS);
- const auto out_index = node.getOutputs().at(0);
-
- const auto lhs_tensor = env->tensorAt(lhs_index);
- const auto rhs_tensor = env->tensorAt(rhs_index);
-
- // Check shape and type lhs is same with rhs
- // TODO Util function to compare TensorInfo
- if (lhs_tensor->data_type() != rhs_tensor->data_type())
- {
- throw std::runtime_error{"Interp(" + arithmetic_node.name() + "): Different input types"};
- }
-
- bool try_broadcast = (lhs_tensor->tensorInfo().shape() != rhs_tensor->tensorInfo().shape());
- if (try_broadcast)
- {
- bool success = true;
- auto out_shape = calcBroadcastShape(lhs_tensor->tensorInfo().shape(),
- rhs_tensor->tensorInfo().shape(), success);
- if (!success)
- {
- throw std::runtime_error{"Interp(" + arithmetic_node.name() + "): Fail to brodcasting"};
- }
-
- auto output_info =
- ir::OperandInfo::createStaticInfo(out_shape, lhs_tensor->tensorInfo().typeInfo());
- // We can handle already allocated (ex. model output)
- env->allocateIfNeeded(out_index, output_info);
- }
- else
- {
- // Output's shape and type is same with input
- auto output_info = lhs_tensor->tensorInfo();
- // We can handle already allocated (ex. model output)
- env->allocateIfNeeded(out_index, output_info);
- }
-
- auto out_tensor = env->tensorAt(out_index);
- // Check shape and type lhs is same with output
- // TODO Util function to compare TensorInfo
- if (lhs_tensor->data_type() != out_tensor->data_type())
- {
- throw std::runtime_error{"Interp(" + arithmetic_node.name() + "): Invalid output type"};
- }
-}
-
-inline void setActivationParams(float min, float max, nnfw::cker::BinaryArithmeticOpParam *params)
-{
- params->float_activation_min = min;
- params->float_activation_max = max;
-}
-
-inline void setActivationParams(int32_t min, int32_t max,
- nnfw::cker::BinaryArithmeticOpParam *params)
-{
- params->quantized_activation_min = min;
- params->quantized_activation_max = max;
-}
-
-template <typename raw_type, OpType op_type>
-void invoke(const ITensor *lhs_tensor, const ITensor *rhs_tensor, const ITensor *out_tensor,
- const ir::operation::BinaryArithmetic::Param &param)
-{
- const auto lhs_buffer = lhs_tensor->bufferRO();
- const auto rhs_buffer = rhs_tensor->bufferRO();
- auto out_buffer = out_tensor->buffer();
-
- nnfw::cker::BinaryArithmeticOpParam cker_param;
- raw_type activation_min, activation_max;
- calculateActivationRange(param.activation, &activation_min, &activation_max);
- setActivationParams(activation_min, activation_max, &cker_param);
- const raw_type *lhs_ptr = reinterpret_cast<const raw_type *>(lhs_buffer);
- const raw_type *rhs_ptr = reinterpret_cast<const raw_type *>(rhs_buffer);
- raw_type *out_ptr = reinterpret_cast<raw_type *>(out_buffer);
-
- const auto cker_op_type =
- (op_type == OpType::ADD)
- ? nnfw::cker::BinaryArithmeticOpType::ADD
- : ((op_type == OpType::SUB) ? nnfw::cker::BinaryArithmeticOpType::SUB
- : nnfw::cker::BinaryArithmeticOpType::MUL);
-
- const bool need_broadcast = nnfw::cker::ProcessBroadcastShapes(
- convertShape(lhs_tensor->tensorInfo().shape()),
- convertShape(rhs_tensor->tensorInfo().shape()), &cker_param);
-
- if (need_broadcast)
- {
- const auto lhs_shape = convertShape(lhs_tensor->tensorInfo().shape());
- const auto rhs_shape = convertShape(rhs_tensor->tensorInfo().shape());
- const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
- nnfw::cker::BroadcastBinaryArithmeticOp<cker_op_type>(cker_param, lhs_shape, lhs_ptr, rhs_shape,
- rhs_ptr, out_shape, out_ptr);
- return;
- }
-
- const auto lhs_shape = convertShape(lhs_tensor->tensorInfo().shape());
- const auto rhs_shape = convertShape(rhs_tensor->tensorInfo().shape());
- const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
- nnfw::cker::BinaryArithmeticOp<cker_op_type>(cker_param, lhs_shape, lhs_ptr, rhs_shape, rhs_ptr,
- out_shape, out_ptr);
-}
-
-template <OpType op_type>
-void invokeBinaryArithmetic(const ExecEnv *env, const ir::operation::BinaryArithmetic &node)
-{
- const auto lhs_index = node.getInputs().at(node.LHS);
- const auto rhs_index = node.getInputs().at(node.RHS);
- const auto out_index = node.getOutputs().at(0);
- const auto lhs_tensor = env->tensorAt(lhs_index);
- const auto rhs_tensor = env->tensorAt(rhs_index);
- const auto out_tensor = env->tensorAt(out_index);
- const auto data_type = lhs_tensor->data_type();
-
- if (data_type == ir::DataType::INT32)
- {
- invoke<int32_t, op_type>(lhs_tensor, rhs_tensor, out_tensor, node.param());
- }
- else if (data_type == ir::DataType::FLOAT32)
- {
- invoke<float, op_type>(lhs_tensor, rhs_tensor, out_tensor, node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Unsupported data type"};
- }
-}
-
-void invokeBinaryArithmeticOps(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &arithmetic_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::BinaryArithmetic &>(node);
-
- switch (arithmetic_node.param().arithmetic_type)
- {
- case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
- invokeBinaryArithmetic<OpType::ADD>(env, arithmetic_node);
- break;
- case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
- invokeBinaryArithmetic<OpType::SUB>(env, arithmetic_node);
- break;
- case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
- invokeBinaryArithmetic<OpType::MUL>(env, arithmetic_node);
- break;
- default:
- throw std::runtime_error{"Interp(BinaryArithmetic): NYI unsupported operation " +
- arithmetic_node.name()};
- break;
- }
-}
-
-} // namespace
-
-OpKernel *getBinaryArithmetic()
-{
- static OpKernel kernel = {prepare, invokeBinaryArithmeticOps};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Concat.cc b/runtime/onert/core/src/interp/operations/Concat.cc
deleted file mode 100644
index efc46c66b..000000000
--- a/runtime/onert/core/src/interp/operations/Concat.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/Concatenation.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Concat.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace concat
-{
-
-void prepareConcat(ExecEnv *env, const ir::Operation &node)
-{
- const auto &concat_node = nnfw::misc::polymorphic_downcast<const ir::operation::Concat &>(node);
-
- const auto first_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- const auto first_tensor = env->tensorAt(first_index);
- uint32_t out_axis_dimension = 0;
- const int32_t axis_raw = concat_node.param().axis;
- const uint32_t axis = (axis_raw < 0) ? (axis_raw + first_tensor->num_dimensions()) : axis_raw;
-
- // All inputs shape should be same except axis dimension
- // All inputs type should be same
- for (auto input : node.getInputs())
- {
- assert(first_tensor->num_dimensions() == env->tensorAt(input)->num_dimensions());
- assert(first_tensor->data_type() == env->tensorAt(input)->data_type());
- for (uint32_t i = 0; i < first_tensor->num_dimensions(); i++)
- {
- if (i == axis)
- {
- out_axis_dimension += env->tensorAt(input)->dimension(i);
- continue;
- }
- assert(first_tensor->dimension(i) == env->tensorAt(input)->dimension(i));
- }
- }
-
- // Make output tensor info using first input tensor info, and accumulated axis dimension value
- auto out_shape = first_tensor->tensorInfo().shape();
- out_shape.dim(axis) = out_axis_dimension;
- env->allocateIfNeeded(out_index, ir::OperandInfo::createStaticInfo(
- out_shape, first_tensor->tensorInfo().typeInfo()));
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Output shape should be same with input except axis dimension
- // Output type should be same with input
- assert(first_tensor->data_type() == out_tensor->data_type());
- for (uint32_t i = 0; i < first_tensor->num_dimensions(); i++)
- {
- if (i == axis)
- {
- continue;
- }
- assert(first_tensor->dimension(i) == out_tensor->dimension(i));
- }
-}
-
-void invoke(const std::vector<const ITensor *> in_tensors, const ITensor *out_tensor, uint32_t axis)
-{
- const uint32_t count = in_tensors.size();
-
- // Calculate
- nnfw::cker::ConcatenationParams cker_param;
- cker_param.axis = (int8_t)axis;
- cker_param.inputs_count = count;
-
- const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
-
- std::vector<nnfw::cker::Shape> in_shapes;
- std::vector<const nnfw::cker::Shape *> in_shape_ptrs;
- in_shapes.reserve(count);
- in_shape_ptrs.reserve(count);
- std::vector<const float *> in_ptrs;
- for (uint32_t i = 0; i < count; i++)
- {
- in_shapes.push_back(convertShape(in_tensors[i]->tensorInfo().shape()));
- in_shape_ptrs.push_back(&in_shapes[i]);
- in_ptrs.push_back(reinterpret_cast<const float *>(in_tensors[i]->bufferRO()));
- }
-
- auto out_buffer = out_tensor->buffer();
- float *out_ptr = reinterpret_cast<float *>(out_buffer);
-
- nnfw::cker::Concatenation<float>(cker_param, in_shape_ptrs.data(), in_ptrs.data(), out_shape,
- out_ptr);
-}
-
-void invokeConcat(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &concat_node = nnfw::misc::polymorphic_downcast<const ir::operation::Concat &>(node);
- const int32_t axis_raw = concat_node.param().axis;
-
- std::vector<const ITensor *> in_tensors;
- for (const auto &e : concat_node.getInputs())
- {
- in_tensors.emplace_back(env->tensorAt(e));
- }
-
- const auto out_index = node.getOutputs().at(0);
- const auto out_tensor = env->tensorAt(out_index);
- const uint32_t axis = (axis_raw < 0) ? (axis_raw + out_tensor->num_dimensions()) : axis_raw;
-
- const auto data_type = in_tensors[0]->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(in_tensors, out_tensor, axis);
- }
- else
- {
- throw std::runtime_error{"NYI: Support float32 only"};
- }
-}
-} // namespace concat
-
-OpKernel *getConcat()
-{
- static OpKernel kernel = {concat::prepareConcat, concat::invokeConcat};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Conv2D.cc b/runtime/onert/core/src/interp/operations/Conv2D.cc
deleted file mode 100644
index bb00b828c..000000000
--- a/runtime/onert/core/src/interp/operations/Conv2D.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/Conv.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Conv2D.h"
-#include "util/Utils.h"
-#include "util/ShapeInference.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace conv2d
-{
-
-void prepareConv2D(ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(ir::operation::Conv2D::INPUT);
- const auto kernel_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
- const auto bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- const auto kernel_tensor = env->tensorAt(kernel_index);
- const auto bias_tensor = env->tensorAt(bias_index);
-
- assert(in_tensor->num_dimensions() == 4);
- assert(kernel_tensor->num_dimensions() == 4);
- assert(bias_tensor->num_dimensions() == 1);
-
- UNUSED_RELEASE(in_tensor);
- UNUSED_RELEASE(kernel_tensor);
- UNUSED_RELEASE(bias_tensor);
-
- const auto output_info = env->graph().operands().at(out_index).info();
- if (output_info.total_size() == 0)
- {
- // Handle unspecified output shape
- const auto &conv_node = nnfw::misc::polymorphic_downcast<const ir::operation::Conv2D &>(node);
- const auto infered_output_shape = shape_inference::inferConv2DShape(
- in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param());
- env->allocateIfNeeded(
- out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
- }
- else
- {
- env->allocateIfNeeded(out_index, output_info);
- }
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Handle same ifm & ofm data type only
- assert(in_tensor->data_type() == out_tensor->data_type());
- assert(out_tensor->num_dimensions() == 4);
-}
-
-void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
- const ITensor *ofm_tensor, const ir::operation::Conv2D::Param &param)
-{
- // TODO Support NCHW frontned
- const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto &ker_shape = ker_tensor->tensorInfo().shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
- const auto padding = ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride,
- ker_width, ker_height);
-
- // Calculate
- float activation_min, activation_max;
- calculateActivationRange(param.activation, &activation_min, &activation_max);
-
- nnfw::cker::ConvParams cker_param;
- cker_param.padding_type = convertPaddingType(param.padding.type);
- cker_param.padding_values.width = padding.left;
- cker_param.padding_values.height = padding.top;
- cker_param.stride_width = param.stride.horizontal;
- cker_param.stride_height = param.stride.vertical;
- cker_param.dilation_width_factor = 1;
- cker_param.dilation_height_factor = 1;
- cker_param.float_activation_min = activation_min;
- cker_param.float_activation_max = activation_max;
-
- const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
- const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
- const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
- const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
- const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
- const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
- const float *bias_ptr = reinterpret_cast<const float *>(bias_tensor->bufferRO());
- float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
-
- nnfw::cker::Conv conv_kernel;
- conv_kernel(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr, cker_bias_shape,
- bias_ptr, cker_ofm_shape, ofm_ptr);
-}
-
-void invokeConv2D(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &conv_node = nnfw::misc::polymorphic_downcast<const ir::operation::Conv2D &>(node);
-
- const auto ifm_index = node.getInputs().at(ir::operation::Conv2D::INPUT);
- const auto ker_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
- const auto bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
- const auto ofm_index = node.getOutputs().at(0);
-
- const auto ifm_tensor = env->tensorAt(ifm_index);
- const auto ker_tensor = env->tensorAt(ker_index);
- const auto bias_tensor = env->tensorAt(bias_index);
- const auto ofm_tensor = env->tensorAt(ofm_index);
-
- const auto data_type = ifm_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Support float32 only"};
- }
-}
-} // namespace conv2d
-
-OpKernel *getConv2D()
-{
- static OpKernel kernel = {conv2d::prepareConv2D, conv2d::invokeConv2D};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/DepthwiseConv2D.cc b/runtime/onert/core/src/interp/operations/DepthwiseConv2D.cc
deleted file mode 100644
index 0473855d9..000000000
--- a/runtime/onert/core/src/interp/operations/DepthwiseConv2D.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/DepthwiseConv.h>
-#include <misc/polymorphic_downcast.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/DepthwiseConv2D.h"
-#include "util/Utils.h"
-#include "util/ShapeInference.h"
-
-namespace onert
-{
-namespace interp
-{
-
-namespace
-{
-
-void prepareDepthwiseConv(ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(ir::operation::DepthwiseConv2D::INPUT);
- const auto kernel_index = node.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
- const auto bias_index = node.getInputs().at(ir::operation::DepthwiseConv2D::BIAS);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- const auto kernel_tensor = env->tensorAt(kernel_index);
- const auto bias_tensor = env->tensorAt(bias_index);
-
- assert(in_tensor->num_dimensions() == 4);
- assert(kernel_tensor->num_dimensions() == 4);
- assert(bias_tensor->num_dimensions() == 1);
-
- UNUSED_RELEASE(in_tensor);
- UNUSED_RELEASE(kernel_tensor);
- UNUSED_RELEASE(bias_tensor);
-
- // TODO handle unspecified output shape:
- // calculate output shape using ifm shape, kernel shape, padding, stride
- const auto output_info = env->graph().operands().at(out_index).info();
- if (output_info.total_size() == 0)
- {
- // Handle unspecified output shape
- const auto &depth_conv_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::DepthwiseConv2D &>(node);
- const auto infered_output_shape = shape_inference::inferDepthwiseConv2DShape(
- in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(),
- depth_conv_node.param());
- env->allocateIfNeeded(
- out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
- }
- else
- {
- env->allocateIfNeeded(out_index, output_info);
- }
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Handle same ifm & ofm data type only
- assert(in_tensor->data_type() == out_tensor->data_type());
- assert(out_tensor->num_dimensions() == 4);
-}
-
-void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
- const ITensor *ofm_tensor, const ir::operation::DepthwiseConv2D::Param &param)
-{
- // TODO Support NCHW frontend
- const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- // Kernel format is [1, kernel_height, kernel_width, depth_out].
- const auto &ker_shape = ker_tensor->tensorInfo().shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
- const auto padding = ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride,
- ker_width, ker_height);
-
- // Calculate
- float activation_min, activation_max;
- calculateActivationRange(param.activation, &activation_min, &activation_max);
-
- nnfw::cker::DepthwiseConvParams cker_param;
- cker_param.padding_values.width = padding.left;
- cker_param.padding_values.height = padding.top;
- cker_param.depth_multiplier = param.multiplier;
- cker_param.stride_width = param.stride.horizontal;
- cker_param.stride_height = param.stride.vertical;
- cker_param.dilation_width_factor = 1;
- cker_param.dilation_height_factor = 1;
- cker_param.float_activation_min = activation_min;
- cker_param.float_activation_max = activation_max;
-
- const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
- const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
- const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
- const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
- const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
- const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
- const float *bias_ptr = reinterpret_cast<const float *>(bias_tensor->bufferRO());
- float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
-
- nnfw::cker::DepthwiseConv(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr,
- cker_bias_shape, bias_ptr, cker_ofm_shape, ofm_ptr);
-}
-
-void invokeDepthwiseConv(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &conv_node = static_cast<const ir::operation::DepthwiseConv2D &>(node);
-
- const auto ifm_index = node.getInputs().at(ir::operation::DepthwiseConv2D::INPUT);
- const auto ker_index = node.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
- const auto bias_index = node.getInputs().at(ir::operation::DepthwiseConv2D::BIAS);
- const auto ofm_index = node.getOutputs().at(0);
-
- const auto ifm_tensor = env->tensorAt(ifm_index);
- const auto ker_tensor = env->tensorAt(ker_index);
- const auto bias_tensor = env->tensorAt(bias_index);
- const auto ofm_tensor = env->tensorAt(ofm_index);
-
- const auto data_type = ifm_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Support float32 only"};
- }
-}
-
-} // namespace
-
-OpKernel *getDepthwiseConv2D()
-{
- static OpKernel kernel = {prepareDepthwiseConv, invokeDepthwiseConv};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/ElementwiseActivations.cc b/runtime/onert/core/src/interp/operations/ElementwiseActivations.cc
deleted file mode 100644
index c8773bef4..000000000
--- a/runtime/onert/core/src/interp/operations/ElementwiseActivations.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cmath>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-
-#include "ir/operation/ElementwiseActivation.h"
-
-#include <misc/polymorphic_downcast.h>
-#include <cker/operation/Logistic.h>
-#include <cker/operation/Tanh.h>
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-enum class ActivationType
-{
- Logistic,
- ReLU,
- Tanh
-};
-
-void prepare(ExecEnv *env, const ir::Operation &node)
-{
- const auto input_index = node.getInputs().at(0);
- const auto output_index = node.getOutputs().at(0);
-
- const auto input_tensor = env->tensorAt(input_index);
-
- const auto output_info = env->graph().operands().at(output_index).info();
- if (output_info.total_size() == 0)
- {
- // Output's shape and type is same with input
- auto input_info = input_tensor->tensorInfo();
- // We can handle already allocated (ex. model output)
- env->allocateIfNeeded(output_index, input_info);
- }
- else
- {
- env->allocateIfNeeded(output_index, output_info);
- }
-
- const auto output_tensor = env->tensorAt(output_index);
- // Check shape and type lhs is same with output
- // TODO Util function to compare TensorInfo
- if (input_tensor->data_type() != output_tensor->data_type())
- {
- throw std::runtime_error{"Interp(ElementwiseActivation): Invalid output type"};
- }
-}
-
-template <ActivationType act_type>
-void evalFloat(const float *input_ptr, float *output_ptr, uint64_t num_elements, float alpha,
- float beta)
-{
- std::function<float(const float &)> fn = [](const float &) { return std::nanf(""); };
- switch (act_type)
- {
- case ActivationType::ReLU:
- fn = [alpha, beta](const float &in) { return std::min(std::max(beta, in), alpha); };
- break;
- case ActivationType::Tanh:
- fn = [](const float &in) { return std::tanh(in); };
- break;
- default:
- throw std::runtime_error{"Interp(ElementwiseActivation): NYI - Unsupported activation"};
- break;
- }
-
- const float *input_end = input_ptr + num_elements;
- for (; input_ptr < input_end; input_ptr++, output_ptr++)
- {
- *output_ptr = fn(*input_ptr);
- }
-}
-
-template <ActivationType act_type> void invoke(const ExecEnv *env, const ir::Operation &node)
-{
- const auto input_index = node.getInputs().at(0);
- const auto output_index = node.getOutputs().at(0);
-
- // Check lhs shape is same with rhs (with broadcast)
- const auto input_tensor = env->tensorAt(input_index);
- const auto output_tensor = env->tensorAt(output_index);
-
- const auto data_type = input_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- uint64_t elements = input_tensor->num_elements();
- const float *input_start = reinterpret_cast<const float *>(input_tensor->bufferRO());
- float *out = reinterpret_cast<float *>(output_tensor->buffer());
- if (act_type == ActivationType::Logistic)
- {
- const auto cker_input_shape = convertShape(input_tensor->tensorInfo().shape());
- const auto cker_output_shape = convertShape(output_tensor->tensorInfo().shape());
- nnfw::cker::Logistic(cker_input_shape, input_start, cker_output_shape, out);
- }
- else
- {
- const auto &act_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::ElementwiseActivation &>(node);
- evalFloat<act_type>(input_start, out, elements, act_node.param().alpha,
- act_node.param().beta);
- }
- }
- else
- {
- throw std::runtime_error{"Interp(" + node.name() + "): NYI - Support float only"};
- }
-}
-
-void invokeElementwiseActivation(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &act_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::ElementwiseActivation &>(node);
- switch (act_node.param().op_type)
- {
- case ir::operation::ElementwiseActivation::Type::LOGISTIC:
- invoke<ActivationType::Logistic>(env, node);
- break;
- case ir::operation::ElementwiseActivation::Type::RELU:
- invoke<ActivationType::ReLU>(env, node);
- break;
- case ir::operation::ElementwiseActivation::Type::TANH:
- invoke<ActivationType::Tanh>(env, node);
- break;
- default:
- throw std::runtime_error("Interp(" + node.name() + "): NYI - Unsupported activation");
- }
-}
-
-} // namespace
-
-OpKernel *getElementwiseActivation()
-{
- static OpKernel kernel = {prepare, invokeElementwiseActivation};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/FullyConnected.cc b/runtime/onert/core/src/interp/operations/FullyConnected.cc
deleted file mode 100644
index 12f529dab..000000000
--- a/runtime/onert/core/src/interp/operations/FullyConnected.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/FullyConnected.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/FullyConnected.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace fc
-{
-
-void prepareFC(ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(ir::operation::FullyConnected::INPUT);
- const auto kernel_index = node.getInputs().at(ir::operation::FullyConnected::WEIGHT);
- const auto bias_index = node.getInputs().at(ir::operation::FullyConnected::BIAS);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- const auto kernel_tensor = env->tensorAt(kernel_index);
- const auto bias_tensor = env->tensorAt(bias_index);
-
- UNUSED_RELEASE(in_tensor);
- UNUSED_RELEASE(kernel_tensor);
- UNUSED_RELEASE(bias_tensor);
-
- assert(in_tensor->num_dimensions() >= 2);
- assert(kernel_tensor->num_dimensions() == 2);
- assert(bias_tensor->num_dimensions() == 1);
-
- const auto input_size_with_batch = in_tensor->num_elements();
- const auto num_units = kernel_tensor->dimension(0);
- const auto input_size = kernel_tensor->dimension(1);
- const auto batch_size = input_size_with_batch / input_size;
- assert(input_size_with_batch % input_size == 0);
- assert(num_units == bias_tensor->dimension(0));
-
- // Make output tensor info
- ir::Shape output_shape(2);
- output_shape.dim(0) = batch_size;
- output_shape.dim(1) = num_units;
- const auto out_info =
- ir::OperandInfo::createStaticInfo(output_shape, in_tensor->tensorInfo().typeInfo());
- env->allocateIfNeeded(out_index, out_info);
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Handle same ifm & ofm data type only
- assert(in_tensor->data_type() == out_tensor->data_type());
- assert(out_tensor->num_dimensions() == 2);
- assert(out_tensor->dimension(0) == batch_size);
- assert(out_tensor->dimension(1) == num_units);
-}
-
-void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
- const ITensor *ofm_tensor, const ir::operation::FullyConnected::Param &param)
-{
- const auto ifm_buffer = ifm_tensor->bufferRO();
- const auto ker_buffer = ker_tensor->bufferRO();
- const auto bias_buffer = bias_tensor->bufferRO();
- auto ofm_buffer = ofm_tensor->buffer();
-
- // Calculate
- nnfw::cker::FullyConnectedParams cker_param;
- cker_param.activation = convertActivationType(param.activation);
- calculateActivationRange(param.activation, &cker_param.float_activation_min,
- &cker_param.float_activation_max);
- const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
- const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
- const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
- const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
- const float *ifm_ptr = reinterpret_cast<const float *>(ifm_buffer);
- const float *ker_ptr = reinterpret_cast<const float *>(ker_buffer);
- const float *bias_ptr = reinterpret_cast<const float *>(bias_buffer);
- float *ofm_ptr = reinterpret_cast<float *>(ofm_buffer);
-
- nnfw::cker::FullyConnected(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr,
- cker_bias_shape, bias_ptr, cker_ofm_shape, ofm_ptr);
-}
-
-void invokeFC(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &conv_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::FullyConnected &>(node);
-
- const auto ifm_index = node.getInputs().at(ir::operation::FullyConnected::INPUT);
- const auto ker_index = node.getInputs().at(ir::operation::FullyConnected::WEIGHT);
- const auto bias_index = node.getInputs().at(ir::operation::FullyConnected::BIAS);
- const auto ofm_index = node.getOutputs().at(0);
-
- const auto ifm_tensor = env->tensorAt(ifm_index);
- const auto ker_tensor = env->tensorAt(ker_index);
- const auto bias_tensor = env->tensorAt(bias_index);
- const auto ofm_tensor = env->tensorAt(ofm_index);
-
- const auto data_type = ifm_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Support float only"};
- }
-}
-} // namespace fc
-
-OpKernel *getFullyConnected()
-{
- static OpKernel kernel = {fc::prepareFC, fc::invokeFC};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Gather.cc b/runtime/onert/core/src/interp/operations/Gather.cc
deleted file mode 100644
index 9e82def5f..000000000
--- a/runtime/onert/core/src/interp/operations/Gather.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/Gather.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Gather.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-void prepareGather(ExecEnv *env, const ir::Operation &node)
-{
- const auto input_index = node.getInputs().at(ir::operation::Gather::INPUT);
- const auto indices_index = node.getInputs().at(ir::operation::Gather::INDICES);
- const auto output_index = node.getOutputs().at(0);
-
- const auto input_tensor = env->tensorAt(input_index);
- const auto indices_tensor = env->tensorAt(indices_index);
-
- // TODO handle unspecified output shape:
- // calculate output shape using ifm shape, kernel shape, padding, stride
- const auto output_info = env->graph().operands().at(output_index).info();
- if (output_info.total_size() == 0)
- {
- throw std::runtime_error{"Interp(Gather): NYI for unspecified output shape"};
- }
- else
- {
- env->allocateIfNeeded(output_index, output_info);
- }
-
- if (indices_tensor->data_type() != ir::DataType::INT32)
- {
- throw std::runtime_error{"Interp(Gather): Invalid indices data type"};
- }
-
- auto output_tensor = env->tensorAt(output_index);
- auto output_rank = input_tensor->num_dimensions() + indices_tensor->num_dimensions() - 1;
-
- if (output_rank != output_tensor->num_dimensions())
- {
- throw std::runtime_error{"Interp(Gather): Invalid output rank"};
- }
- if (output_tensor->data_type() != input_tensor->data_type())
- {
- throw std::runtime_error{"Interp(Gather): Invalid output data type"};
- }
-
- if (input_tensor->data_type() == ir::DataType::QUANT_UINT8_ASYMM &&
- input_tensor->tensorInfo().typeInfo() != output_tensor->tensorInfo().typeInfo())
- {
- throw std::runtime_error{
- "Interp(Gather): Cannot handle different I/O QUANT_UINT8_ASYMM scale/offset"};
- }
-}
-
-template <typename raw_type>
-void invoke(const ITensor *input_tensors, const ITensor *indices_tensors,
- const ITensor *output_tensor, uint32_t axis)
-{
- // Calculate
- nnfw::cker::GatherParams cker_param;
- cker_param.axis = (int8_t)axis;
-
- const auto cker_input_shapes = convertShape(input_tensors->tensorInfo().shape());
- const auto cker_indices_shape = convertShape(indices_tensors->tensorInfo().shape());
- const auto cker_output_shape = convertShape(output_tensor->tensorInfo().shape());
- const raw_type *input_ptr = reinterpret_cast<const raw_type *>(input_tensors->bufferRO());
- const int32_t *indices_ptr = reinterpret_cast<const int32_t *>(indices_tensors->bufferRO());
- raw_type *output_ptr = reinterpret_cast<raw_type *>(output_tensor->buffer());
-
- nnfw::cker::Gather<raw_type>(cker_param, cker_input_shapes, input_ptr, cker_indices_shape,
- indices_ptr, cker_output_shape, output_ptr);
-}
-
-void invokeGather(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &gather_node = nnfw::misc::polymorphic_downcast<const ir::operation::Gather &>(node);
- const int32_t axis_raw = gather_node.param().axis;
-
- const auto input_index = node.getInputs().at(ir::operation::Gather::INPUT);
- const auto indices_index = node.getInputs().at(ir::operation::Gather::INDICES);
- const auto output_index = node.getOutputs().at(0);
-
- const auto input_tensor = env->tensorAt(input_index);
- const auto indices_tensor = env->tensorAt(indices_index);
- const auto output_tensor = env->tensorAt(output_index);
- const uint32_t axis = (axis_raw < 0) ? (axis_raw + input_tensor->num_dimensions()) : axis_raw;
-
- const auto data_type = input_tensor->data_type();
-
- switch (data_type)
- {
- case ir::DataType::FLOAT32:
- invoke<float>(input_tensor, indices_tensor, output_tensor, axis);
- break;
- case ir::DataType::INT32:
- invoke<int32_t>(input_tensor, indices_tensor, output_tensor, axis);
- break;
- case ir::DataType::QUANT_UINT8_ASYMM:
- invoke<uint8_t>(input_tensor, indices_tensor, output_tensor, axis);
- break;
- default:
- throw std::runtime_error{"Interp(Gather): NYI - Not supported type"};
- }
-}
-
-} // namespace
-
-OpKernel *getGather()
-{
- static OpKernel kernel = {prepareGather, invokeGather};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/InstanceNorm.cc b/runtime/onert/core/src/interp/operations/InstanceNorm.cc
deleted file mode 100644
index 2538bcc39..000000000
--- a/runtime/onert/core/src/interp/operations/InstanceNorm.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/InstanceNorm.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/InstanceNorm.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace instancenorm
-{
-
-void prepareInstanceNorm(ExecEnv *env, const ir::Operation &node)
-{
- const auto &instancenorm_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::InstanceNorm &>(node);
-
- const auto input_index = node.getInputs().at(instancenorm_node.INPUT);
- const auto output_index = node.getOutputs().at(0);
- const auto input_tensor = env->tensorAt(input_index);
-
- if (input_tensor->num_dimensions() != 4)
- {
- throw std::runtime_error{"Interp(InstanceNorm): Input should be 4D-tensor"};
- }
-
- // Output shape should be same with input
- env->allocateIfNeeded(output_index, input_tensor->tensorInfo());
-
- auto output_tensor = env->tensorAt(output_index);
- UNUSED_RELEASE(output_tensor);
-
- // Handle same ifm & ofm data type only
- assert(input_tensor->data_type() == output_tensor->data_type());
- assert(input_tensor->tensorInfo().shape() == output_tensor->tensorInfo().shape());
-}
-
-inline void setActivationParams(float min, float max, nnfw::cker::InstanceNormParams *params)
-{
- params->float_activation_min = min;
- params->float_activation_max = max;
-}
-
-void invoke(const ITensor *input_tensor, const ITensor *gamma_tensor, const ITensor *beta_tensor,
- const ITensor *output_tensor, const ir::operation::InstanceNorm::Param &param)
-{
- // Calculate
- float activation_min, activation_max;
- calculateActivationRange(param.activation, &activation_min, &activation_max);
-
- nnfw::cker::InstanceNormParams cker_param;
- cker_param.epsilon = param.epsilon;
- cker_param.float_activation_min = activation_min;
- cker_param.float_activation_max = activation_max;
-
- const auto cker_input_shape = convertShape(input_tensor->tensorInfo().shape());
- const auto cker_gamma_shape = convertShape(gamma_tensor->tensorInfo().shape());
- const auto cker_beta_shape = convertShape(beta_tensor->tensorInfo().shape());
- const auto cker_output_shape = convertShape(output_tensor->tensorInfo().shape());
- const float *input_ptr = reinterpret_cast<const float *>(input_tensor->bufferRO());
- const float *gamma_ptr = reinterpret_cast<const float *>(gamma_tensor->bufferRO());
- const float *beta_ptr = reinterpret_cast<const float *>(beta_tensor->bufferRO());
- float *output_ptr = reinterpret_cast<float *>(output_tensor->buffer());
-
- nnfw::cker::InstanceNorm(cker_param, cker_input_shape, input_ptr, cker_gamma_shape, gamma_ptr,
- cker_beta_shape, beta_ptr, cker_output_shape, output_ptr);
-}
-
-void invokeInstanceNorm(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &instancenorm_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::InstanceNorm &>(node);
-
- const auto input_index = node.getInputs().at(instancenorm_node.INPUT);
- const auto gamma_index = node.getInputs().at(instancenorm_node.GAMMA);
- const auto beta_index = node.getInputs().at(instancenorm_node.BETA);
- const auto out_index = node.getOutputs().at(0);
- const auto input_tensor = env->tensorAt(input_index);
- const auto gamma_tensor = env->tensorAt(gamma_index);
- const auto beta_tensor = env->tensorAt(beta_index);
- const auto out_tensor = env->tensorAt(out_index);
- const auto data_type = input_tensor->data_type();
-
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(input_tensor, gamma_tensor, beta_tensor, out_tensor, instancenorm_node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Unsupported data type"};
- }
-}
-} // namespace instancenorm
-
-OpKernel *getInstanceNorm()
-{
- static OpKernel kernel = {instancenorm::prepareInstanceNorm, instancenorm::invokeInstanceNorm};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/OperationUtil.h b/runtime/onert/core/src/interp/operations/OperationUtil.h
deleted file mode 100644
index 2fdf098f0..000000000
--- a/runtime/onert/core/src/interp/operations/OperationUtil.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_INTERP_OPERATIONS_OPERATION_UTILS_H_
-#define __ONERT_INTERP_OPERATIONS_OPERATION_UTILS_H_
-
-#include "ir/Shape.h"
-#include "ir/InternalType.h"
-#include "ir/Padding.h"
-
-#include <cker/Shape.h>
-#include <cker/Types.h>
-
-namespace onert
-{
-namespace interp
-{
-
-inline nnfw::cker::Shape convertShape(const ir::Shape &shape)
-{
- auto dimensions = std::vector<uint32_t>(shape.dims().begin(), shape.dims().end());
-
- std::vector<int32_t> raw_shape;
- raw_shape.resize(dimensions.size());
-
- for (uint32_t i = 0; i < dimensions.size(); ++i)
- {
- raw_shape[i] = dimensions[i];
- }
-
- return nnfw::cker::GetShape(raw_shape);
-}
-
-inline nnfw::cker::Shape convertExtendShape(const ir::Shape &shape)
-{
- auto dimensions = std::vector<uint32_t>(shape.dims().begin(), shape.dims().end());
-
- const int32_t extended_rank = 4;
- int32_t raw_shape[extended_rank];
- uint32_t start = extended_rank - dimensions.size();
-
- for (uint32_t i = 0; i < extended_rank; ++i)
- {
- if (i < start)
- {
- raw_shape[i] = 1;
- }
- else
- {
- raw_shape[i] = dimensions[i - start];
- }
- }
-
- return nnfw::cker::Shape(extended_rank, raw_shape);
-}
-
-inline nnfw::cker::FusedActivationFunctionType
-convertActivationType(const ir::Activation activation)
-{
- switch (activation)
- {
- case ir::Activation::NONE:
- return nnfw::cker::FusedActivationFunctionType::kNone;
- case ir::Activation::RELU:
- return nnfw::cker::FusedActivationFunctionType::kRelu;
- case ir::Activation::RELU1:
- return nnfw::cker::FusedActivationFunctionType::kRelu1;
- case ir::Activation::RELU6:
- return nnfw::cker::FusedActivationFunctionType::kRelu6;
- default:
- throw std::runtime_error{"CPU backend: Cannot convert activation type"};
- }
-}
-
-template <typename T>
-void calculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
-{
- if (activation == ir::Activation::RELU)
- {
- *activation_min = 0;
- *activation_max = std::numeric_limits<T>::max();
- }
- else if (activation == ir::Activation::RELU6)
- {
- *activation_min = 0;
- *activation_max = 6;
- }
- else if (activation == ir::Activation::RELU1)
- {
- *activation_min = -1;
- *activation_max = 1;
- }
- else if (activation == ir::Activation::NONE)
- {
- *activation_min = std::numeric_limits<T>::lowest();
- *activation_max = std::numeric_limits<T>::max();
- }
- else
- {
- throw std::runtime_error{"Unsupported activation type"};
- }
-}
-
-inline ir::Shape calcBroadcastShape(const ir::Shape &lhs, const ir::Shape &rhs, bool &success)
-{
- int lhs_rank = lhs.rank();
- int rhs_rank = rhs.rank();
-
- int out_rank = (lhs_rank > rhs_rank ? lhs_rank : rhs_rank);
- ir::Shape out_shape(out_rank);
-
- int lhs_idim = lhs_rank - 1;
- int rhs_idim = rhs_rank - 1;
- success = true;
- for (int out_idim = out_rank - 1; out_idim >= 0; out_idim--)
- {
- if (lhs_idim == -1 && rhs_idim == -1)
- {
- // invalid result
- success = false;
- break;
- }
-
- if (lhs_idim == -1)
- {
- out_shape.dim(out_idim) = rhs.dim(rhs_idim);
- rhs_idim--;
- }
- else if (rhs_idim == -1)
- {
- out_shape.dim(out_idim) = lhs.dim(lhs_idim);
- lhs_idim--;
- }
- else
- {
- if (lhs.dim(lhs_idim) == rhs.dim(rhs_idim))
- {
- out_shape.dim(out_idim) = lhs.dim(lhs_idim);
- lhs_idim--;
- rhs_idim--;
- }
- else if (lhs.dim(lhs_idim) == 1)
- {
- out_shape.dim(out_idim) = rhs.dim(rhs_idim);
- lhs_idim--;
- rhs_idim--;
- }
- else if (rhs.dim(rhs_idim) == 1)
- {
- out_shape.dim(out_idim) = lhs.dim(lhs_idim);
- lhs_idim--;
- rhs_idim--;
- }
- else
- {
- // invalid result
- success = false;
- break;
- }
- }
- }
-
- if (lhs_idim != -1 || rhs_idim != -1)
- {
- // invalid result
- success = false;
- }
- return out_shape;
-}
-
-inline nnfw::cker::PaddingType convertPaddingType(ir::PaddingType ir_padding_type)
-{
- switch (ir_padding_type)
- {
- case ir::PaddingType::EXPLICIT:
- return nnfw::cker::PaddingType::kNone;
- case ir::PaddingType::SAME:
- return nnfw::cker::PaddingType::kSame;
- case ir::PaddingType::VALID:
- return nnfw::cker::PaddingType::kValid;
- default:
- throw std::runtime_error("Wrong padding type.");
- break;
- }
-}
-
-} // namespace interp
-} // namespace onert
-
-#endif // __ONERT_INTERP_OPERATIONS_OPERATION_UTILS_H_
diff --git a/runtime/onert/core/src/interp/operations/Pad.cc b/runtime/onert/core/src/interp/operations/Pad.cc
deleted file mode 100644
index c8dce698d..000000000
--- a/runtime/onert/core/src/interp/operations/Pad.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/Pad.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Pad.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-void preparePad(ExecEnv *env, const ir::Operation &node)
-{
- const auto input_index = node.getInputs().at(ir::operation::Pad::INPUT);
- const auto output_index = node.getOutputs().at(0);
-
- const auto input_tensor = env->tensorAt(input_index);
-
- const auto output_info = env->graph().operands().at(output_index).info();
-
- // Check shape and type lhs is same with rhs
- // TODO Util function to compare TensorInfo
- if (output_info.total_size() == 0)
- {
- throw std::runtime_error{"Interp(Pad): NYI unspecified output shape"};
- }
- else
- {
- env->allocateIfNeeded(output_index, output_info);
- }
-
- const auto output_tensor = env->tensorAt(output_index);
- if (input_tensor->data_type() != output_tensor->data_type())
- {
- throw std::runtime_error{"Interp(Pad): Invalid output type"};
- }
-}
-
-void invoke(const ITensor *input_tensor, const ITensor *pad_tensor, const ITensor *output_tensor)
-{
- const auto input_buffer = input_tensor->bufferRO();
- const auto pad_buffer = pad_tensor->bufferRO();
- auto output_buffer = output_tensor->buffer();
-
- int32_t pad_rank = pad_tensor->dimension(0);
-
- const auto cker_input_shape = convertShape(input_tensor->tensorInfo().shape());
- const auto cker_output_shape = convertShape(output_tensor->tensorInfo().shape());
- const float *input_ptr = reinterpret_cast<const float *>(input_buffer);
- const int32_t *pad_ptr = reinterpret_cast<const int32_t *>(pad_buffer);
- float *output_ptr = reinterpret_cast<float *>(output_buffer);
-
- nnfw::cker::Pad<float>(pad_ptr, pad_rank, cker_input_shape, input_ptr, cker_output_shape,
- output_ptr, nullptr);
-}
-
-void invokePad(const ExecEnv *env, const ir::Operation &node)
-{
- const auto input_index = node.getInputs().at(ir::operation::Pad::INPUT);
- const auto pad_index = node.getInputs().at(ir::operation::Pad::PAD);
- const auto output_index = node.getOutputs().at(0);
-
- const auto input_tensor = env->tensorAt(input_index);
- const auto pad_tensor = env->tensorAt(pad_index);
- const auto output_tensor = env->tensorAt(output_index);
-
- const auto data_type = input_tensor->data_type();
-
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(input_tensor, pad_tensor, output_tensor);
- }
- else
- {
- throw std::runtime_error{"Interp(Pad): NYI - Unsupported data type"};
- }
-}
-} // namespace
-
-OpKernel *getPad()
-{
- static OpKernel kernel = {preparePad, invokePad};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Pool2D.cc b/runtime/onert/core/src/interp/operations/Pool2D.cc
deleted file mode 100644
index 92f9d70b2..000000000
--- a/runtime/onert/core/src/interp/operations/Pool2D.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/AveragePool.h>
-#include <cker/operation/MaxPool.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Pool2D.h"
-#include "util/Utils.h"
-#include "util/ShapeInference.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace pool2d
-{
-
-void preparePool2D(ExecEnv *env, const ir::Operation &node)
-{
- const auto &pool_node = nnfw::misc::polymorphic_downcast<const ir::operation::Pool2D &>(node);
- const auto in_index = node.getInputs().at(pool_node.INPUT);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- UNUSED_RELEASE(in_tensor);
-
- assert(in_tensor->num_dimensions() == 4);
-
- const auto output_info = env->graph().operands().at(out_index).info();
- if (output_info.total_size() == 0)
- {
- // Handle unspecified output shape
- const auto infered_output_shape =
- shape_inference::inferPoolShape(in_tensor->tensorInfo().shape(), pool_node.param());
- env->allocateIfNeeded(
- out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
- }
- else
- {
- env->allocateIfNeeded(out_index, output_info);
- }
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Handle same ifm & ofm data type only
- assert(in_tensor->data_type() == out_tensor->data_type());
- assert(out_tensor->num_dimensions() == 4);
-}
-
-template <typename T>
-void invoke(const nnfw::cker::PoolParams &params, const nnfw::cker::Shape &in_shape,
- const T *in_ptr, const nnfw::cker::Shape &out_shape, T *out_ptr,
- ir::operation::Pool2D::PoolType op_type)
-{
- switch (op_type)
- {
- case ir::operation::Pool2D::PoolType::AVG:
- nnfw::cker::AveragePool<T>(params, in_shape, in_ptr, out_shape, out_ptr);
- break;
- case ir::operation::Pool2D::PoolType::MAX:
- nnfw::cker::MaxPool<T>(params, in_shape, in_ptr, out_shape, out_ptr);
- break;
- default:
- throw std::runtime_error{"Interp(Pool2D): NYI unsupported operation"};
- break;
- }
-}
-
-void invokePool2DOps(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &pool_node = nnfw::misc::polymorphic_downcast<const ir::operation::Pool2D &>(node);
-
- const auto in_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- // Check lhs shape is same with rhs (with broadcast)
- const auto in_tensor = env->tensorAt(in_index);
- const auto out_tensor = env->tensorAt(out_index);
-
- // TODO support NCHW frontend
- const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- const auto param = pool_node.param();
- const auto padding =
- ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh);
- // Calculate
- nnfw::cker::PoolParams cker_param;
- cker_param.filter_width = param.kw;
- cker_param.filter_height = param.kh;
- cker_param.padding_values.width = padding.left;
- cker_param.padding_values.height = padding.top;
- cker_param.stride_width = param.stride.horizontal;
- cker_param.stride_height = param.stride.vertical;
-
- const auto data_type = in_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- calculateActivationRange(param.activation, &cker_param.float_activation_min,
- &cker_param.float_activation_max);
-
- const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
- const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
- const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
- float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
- // Now, invoke() supports only Pool2D in float
- invoke<float>(cker_param, in_shape, in_ptr, out_shape, out_ptr, param.op_type);
- }
- else
- {
- throw std::runtime_error{"NYI: Support float only"};
- }
-}
-} // namespace pool2d
-
-OpKernel *getPool2D()
-{
- static OpKernel kernel = {pool2d::preparePool2D, pool2d::invokePool2DOps};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Reshape.cc b/runtime/onert/core/src/interp/operations/Reshape.cc
deleted file mode 100644
index 3a118456b..000000000
--- a/runtime/onert/core/src/interp/operations/Reshape.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interp/Registration.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-void prepare(ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- // Unspecified shape is not supported in operation node spec now
- const auto output_info = env->graph().operands().at(out_index).info();
- env->allocateAndShareIfNeeded(out_index, output_info, in_index);
-
- assert(output_info.total_size() == env->graph().operands().at(in_index).info().total_size());
-}
-
-void invoke(const ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- if (env->tensorAt(in_index)->bufferRO() == env->tensorAt(out_index)->bufferRO())
- {
- // Same data
- return;
- }
-
- const auto output_info = env->graph().operands().at(out_index).info();
- memcpy(env->tensorAt(out_index)->buffer(), env->tensorAt(in_index)->bufferRO(),
- output_info.total_size());
-}
-
-} // namespace
-
-OpKernel *getReshape()
-{
- static OpKernel kernel = {prepare, invoke};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/Softmax.cc b/runtime/onert/core/src/interp/operations/Softmax.cc
deleted file mode 100644
index d30f78deb..000000000
--- a/runtime/onert/core/src/interp/operations/Softmax.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/SoftMax.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/Softmax.h"
-#include "misc/polymorphic_downcast.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-void prepareSoftMax(ExecEnv *env, const ir::Operation &node)
-{
- const auto in_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- UNUSED_RELEASE(in_tensor);
-
- assert((in_tensor->num_dimensions() == 4) || (in_tensor->num_dimensions() == 2));
-
- // Output shape should be same with input
- // Output type is pre-defined in model
- const auto output_shape = env->graph().operands().at(in_index).info().shape();
- const auto output_type = env->graph().operands().at(out_index).info().typeInfo();
-
- const auto output_info = ir::OperandInfo::createStaticInfo(output_shape, output_type);
- env->allocateIfNeeded(out_index, output_info);
-
- auto out_tensor = env->tensorAt(out_index);
- UNUSED_RELEASE(out_tensor);
-
- // Check output shape is same with input
- assert(out_tensor->num_dimensions() == out_tensor->num_dimensions());
- for (uint32_t i = 0; i < in_tensor->num_dimensions(); i++)
- {
- assert(in_tensor->dimension(i) == out_tensor->dimension(i));
- }
-}
-
-void invoke(const ITensor *in_tensor, const ITensor *out_tensor,
- const ir::operation::Softmax::Param &param)
-{
- const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
- float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
-
- float beta = param.beta;
-
- if (in_tensor->num_dimensions() == 2)
- {
- uint32_t batch_size = in_tensor->dimension(0);
- uint32_t input_size = in_tensor->dimension(1);
-
- nnfw::cker::Softmax(in_ptr, input_size, batch_size, beta, out_ptr);
- }
- else if (in_tensor->num_dimensions() == 4)
- {
- const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
- const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
-
- nnfw::cker::SoftmaxParams cker_param;
- cker_param.beta = beta;
-
- nnfw::cker::Softmax(cker_param, in_shape, in_ptr, out_shape, out_ptr);
- }
- else
- {
- throw std::runtime_error{"Unsuported input dimension: support 2D or 4D"};
- }
-}
-
-void invokeSoftMax(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &softmax_node = nnfw::misc::polymorphic_downcast<const ir::operation::Softmax &>(node);
-
- const auto in_index = node.getInputs().at(0);
- const auto out_index = node.getOutputs().at(0);
-
- const auto in_tensor = env->tensorAt(in_index);
- const auto out_tensor = env->tensorAt(out_index);
-
- const auto in_data_type = in_tensor->data_type();
- const auto out_data_type = out_tensor->data_type();
- if ((in_data_type == ir::DataType::FLOAT32) && (out_data_type == ir::DataType::FLOAT32))
- {
- invoke(in_tensor, out_tensor, softmax_node.param());
- }
- else
- {
- throw std::runtime_error{"NYI: Support float32 only"};
- }
-}
-
-} // namespace
-
-OpKernel *getSoftmax()
-{
- static OpKernel kernel = {prepareSoftMax, invokeSoftMax};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/interp/operations/TransposeConv.cc b/runtime/onert/core/src/interp/operations/TransposeConv.cc
deleted file mode 100644
index cc2ced26b..000000000
--- a/runtime/onert/core/src/interp/operations/TransposeConv.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cker/operation/TransposeConv.h>
-#include <misc/polymorphic_downcast.h>
-
-#include "OperationUtil.h"
-
-#include "interp/Registration.h"
-#include "ir/operation/TransposeConv.h"
-
-namespace onert
-{
-namespace interp
-{
-namespace
-{
-
-void prepareTransposeConv(ExecEnv *env, const ir::Operation &node)
-{
- const auto ifm_index = node.getInputs().at(ir::operation::TransposeConv::INPUT);
- const auto ker_index = node.getInputs().at(ir::operation::TransposeConv::KERNEL);
- const auto ofm_shape_index = node.getInputs().at(ir::operation::TransposeConv::OUTPUT_SHAPE);
- const auto ofm_index = node.getOutputs().at(0);
-
- const auto ifm_tensor = env->tensorAt(ifm_index);
- const auto ker_tensor = env->tensorAt(ker_index);
- const auto ofm_shape_tensor = env->tensorAt(ofm_shape_index);
-
- assert(ifm_tensor->num_dimensions() == 4);
- assert(ker_tensor->num_dimensions() == 4);
- assert(ofm_shape_tensor->num_dimensions() == 1);
-
- UNUSED_RELEASE(ifm_tensor);
- UNUSED_RELEASE(ker_tensor);
- UNUSED_RELEASE(ofm_shape_tensor);
-
- const auto output_info = env->graph().operands().at(ofm_index).info();
- if (output_info.total_size() == 0)
- {
- // TODO: Handle unspecified output shape
- throw std::runtime_error{"Interp(TConv): NYI unspecified output shape"};
- }
- else
- {
- env->allocateIfNeeded(ofm_index, output_info);
- }
-
- auto ofm_tensor = env->tensorAt(ofm_index);
- UNUSED_RELEASE(ofm_tensor);
-
- // Handle same ifm & ofm data type only
- if (ifm_tensor->data_type() != ofm_tensor->data_type())
- {
- throw std::runtime_error{"Interp(TConv): Different I/O data dype"};
- }
-
- if (ofm_tensor->num_dimensions() != 4)
- {
- throw std::runtime_error{"Interp(TConv): Invalid output rank"};
- }
-}
-
-void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *ofm_tensor,
- const ir::operation::TransposeConv::Param &param)
-{
- const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
- const auto ker_shape = ker_tensor->tensorInfo().shape();
- const auto ker_height = ker_shape.dim(1);
- const auto ker_width = ker_shape.dim(2);
- const auto padding = ir::calculatePadding(param.padding, ofm_shape, ifm_shape, param.stride,
- ker_width, ker_height);
-
- nnfw::cker::TransposeConvParams cker_param;
- cker_param.padding_values.width = padding.left;
- cker_param.padding_values.height = padding.top;
- cker_param.stride_width = param.stride.horizontal;
- cker_param.stride_height = param.stride.vertical;
- cker_param.dilation_width_factor = 1;
- cker_param.dilation_height_factor = 1;
-
- const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
- const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
- const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
- const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
- const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
- float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
-
- nnfw::cker::TransposeConv(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr,
- cker_ofm_shape, ofm_ptr);
-}
-
-void invokeTransposeConv(const ExecEnv *env, const ir::Operation &node)
-{
- const auto &tconv_node =
- nnfw::misc::polymorphic_downcast<const ir::operation::TransposeConv &>(node);
-
- const auto ifm_index = node.getInputs().at(ir::operation::TransposeConv::INPUT);
- const auto ker_index = node.getInputs().at(ir::operation::TransposeConv::KERNEL);
- const auto ofm_index = node.getOutputs().at(0);
-
- const auto ifm_tensor = env->tensorAt(ifm_index);
- const auto ker_tensor = env->tensorAt(ker_index);
- const auto ofm_tensor = env->tensorAt(ofm_index);
-
- const auto data_type = ifm_tensor->data_type();
- if (data_type == ir::DataType::FLOAT32)
- {
- invoke(ifm_tensor, ker_tensor, ofm_tensor, tconv_node.param());
- }
- else
- {
- throw std::runtime_error{"Interp(TConv): Support float32 only"};
- }
-}
-
-} // namespace
-
-OpKernel *getTransposeConv()
-{
- static OpKernel kernel = {prepareTransposeConv, invokeTransposeConv};
- return &kernel;
-}
-
-} // namespace interp
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Coordinates.cc b/runtime/onert/core/src/ir/Coordinates.cc
deleted file mode 100644
index a02a56567..000000000
--- a/runtime/onert/core/src/ir/Coordinates.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Coordinates.h"
-
-#include <cassert>
-
-namespace onert
-{
-namespace ir
-{
-
-Coordinates convertCoordinates(const Coordinates &from_coordinates, Layout from_layout,
- Layout to_layout)
-{
- assert(from_coordinates.size() == 4);
- Coordinates to{from_coordinates};
- if (from_layout == Layout::NHWC && to_layout == Layout::NCHW)
- {
- to.set(0, from_coordinates[0]);
- to.set(1, from_coordinates[3]);
- to.set(2, from_coordinates[1]);
- to.set(3, from_coordinates[2]);
- }
- else if (from_layout == Layout::NCHW && to_layout == Layout::NHWC)
- {
- to.set(0, from_coordinates[0]);
- to.set(1, from_coordinates[2]);
- to.set(2, from_coordinates[3]);
- to.set(3, from_coordinates[1]);
- }
-
- return to;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/DataType.cc b/runtime/onert/core/src/ir/DataType.cc
deleted file mode 100644
index 80c659b3a..000000000
--- a/runtime/onert/core/src/ir/DataType.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/DataType.h"
-
-#include <stdexcept>
-#include <Half.h>
-
-using float16 = Half;
-
-namespace onert
-{
-namespace ir
-{
-
-size_t sizeOfDataType(DataType data_type)
-{
- switch (data_type)
- {
- case DataType::FLOAT32:
- return sizeof(float);
- case DataType::INT32:
- return sizeof(int32_t);
- case DataType::UINT32:
- return sizeof(uint32_t);
- case DataType::BOOL8:
- case DataType::QUANT_UINT8_ASYMM:
- case DataType::UINT8:
- return sizeof(uint8_t);
- case DataType::QUANT_INT8_SYMM:
- return sizeof(int8_t);
- case DataType::FLOAT16:
- return sizeof(float16);
- case DataType::INT64:
- return sizeof(int64_t);
- default:
- throw std::runtime_error{"Unsupported type size"};
- }
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Graph.cc b/runtime/onert/core/src/ir/Graph.cc
deleted file mode 100644
index 605562ebc..000000000
--- a/runtime/onert/core/src/ir/Graph.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Graph.h"
-
-#include <algorithm>
-#include <bitset>
-#include <sstream>
-
-#include "util/logging.h"
-#include "verifier/Verifier.h"
-#include "ir/operation/LowerInfo.h"
-#include "ir/operand/LowerInfo.h"
-#include "ir/operand/PermuteFactor.h"
-#include "ir/OperandIndexMap.h"
-#include "ir/GraphIterator.h"
-#include "backend/IConfig.h"
-
-namespace onert
-{
-namespace ir
-{
-
-Graph::Graph() = default;
-
-Graph::~Graph(void) = default;
-
-OperandIndex Graph::addOperand(const Shape &shape, const TypeInfo &type)
-{
- return _operands.emplace(shape, type);
-}
-
-OperationIndex Graph::addOperation(std::unique_ptr<Operation> &&node)
-{
- assert(isBuildingPhase());
- return _operations.push(std::move(node));
-}
-
-void Graph::setOperandValue(const OperandIndex &ind, std::shared_ptr<Data> data)
-{
- assert(isBuildingPhase());
- assert(_operands.exist(ind));
- _operands.at(ind).data(std::move(data));
-}
-
-void Graph::addInput(const OperandIndex &ind, const std::string &name)
-{
- assert(isBuildingPhase());
- if (!name.empty())
- _name_to_input.emplace(name, IOIndex{_inputs.size()});
- _inputs.append(ind);
-}
-
-void Graph::addOutput(const OperandIndex &ind, const std::string &name)
-{
- assert(isBuildingPhase());
- if (!name.empty())
- _name_to_output.emplace(name, IOIndex{_outputs.size()});
- _outputs.append(ind);
-}
-
-IOIndex Graph::getInputIndex(const std::string &name) const
-{
- auto itr = _name_to_input.find(name);
- return (itr == _name_to_input.end()) ? IOIndex{} : itr->second;
-}
-
-IOIndex Graph::getOutputIndex(const std::string &name) const
-{
- auto itr = _name_to_output.find(name);
- return (itr == _name_to_output.end()) ? IOIndex{} : itr->second;
-}
-
-void Graph::finishBuilding(void)
-{
- assert(isBuildingPhase());
- _phase = Phase::MODEL;
-
- initializeUseDef();
- sweepGarbageOperands();
-
- // Call graph verifications for the MODEL phase
- {
- assert(verifier::DAGChecker().verify(*this));
- assert(verifier::EdgeConsistencyChecker().verify(*this));
- }
-}
-
-void Graph::initializeUseDef()
-{
- operations().iterate([&](const OperationIndex &index, const Operation &node) -> void {
- auto outputs = node.getOutputs();
- for (auto output : outputs | ir::Remove::UNDEFINED)
- {
- operands().at(output).setDef(index);
- }
-
- for (auto input : node.getInputs() | ir::Remove::UNDEFINED)
- {
- operands().at(input).insertUse(index);
- }
- });
-}
-
-void Graph::sweepGarbageOperands()
-{
- // Remove operands that are not used by any operations, except Graph inputs/outputs
- ir::OperandIndexMap<bool> visited;
-
- operations().iterate([&](const OperationIndex &, const Operation &node) {
- for (auto ind : node.getInputs() + node.getOutputs())
- {
- visited[ind] = true;
- }
- });
-
- // Graph's inputs/outputs are always reachable
- for (auto ind : getInputs() + getOutputs())
- {
- visited[ind] = true;
- }
-
- operands().iterate([&](const OperandIndex &ind, const Operand &) {
- if (!visited[ind])
- {
- VERBOSE(Graph::sweepGarbageOperands) << "Sweep garbage operand " << ind.value() << std::endl;
- operands().remove(ind);
- }
- });
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/GraphIterator.cc b/runtime/onert/core/src/ir/GraphIterator.cc
deleted file mode 100644
index ac67771c4..000000000
--- a/runtime/onert/core/src/ir/GraphIterator.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "GraphIterator.h"
-
-#include "ir/OperationIndexMap.h"
-#include "compiler/LoweredGraph.h"
-
-namespace onert
-{
-namespace ir
-{
-
-//
-// Graph::DefaultIterator
-//
-
-template <bool is_const>
-void DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
-{
- graph.operations().iterate(
- [&](const OperationIndex &index, NodeRef node) -> void { fn(index, node); });
-}
-
-//
-// Graph::PostDfsIterator
-//
-
-template <bool is_const>
-void PostDfsIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
-{
- assert(!graph.isBuildingPhase()); // Restrict iteration condition
-
- OperationIndexMap<bool> visited;
- graph.operations().iterate([&](const OperationIndex &index, NodeRef) { visited[index] = false; });
-
- std::function<void(const OperationIndex &, NodeRef)> dfs_recursive =
- [&](const OperationIndex &index, NodeRef node) -> void {
- if (visited[index])
- return;
- visited[index] = true;
-
- for (const auto output : node.getOutputs() | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- const auto &operand = graph.operands().at(output);
- for (const auto &use : operand.getUses())
- {
- dfs_recursive(use, graph.operations().at(use));
- }
- }
-
- fn(index, node);
- };
-
- graph.operations().iterate(dfs_recursive);
-
- // All of the operations(nodes) must have been visited.
- assert(std::all_of(visited.begin(), visited.end(),
- [](const std::pair<const OperationIndex, bool> &v) { return v.second; }));
-}
-
-template <bool is_const>
-void PostDfsIterator<is_const>::iterateOpSeqs(LoweredGraphRef lowered_graph,
- const OpSeqIterFn &fn) const
-{
- std::unordered_map<OpSequenceIndex, bool> visited;
- lowered_graph.op_seqs().iterate(
- [&](const OpSequenceIndex &index, OpSequenceRef) { visited[index] = false; });
-
- std::function<void(const OpSequenceIndex &, OpSequenceRef)> dfs_recursive =
- [&](const OpSequenceIndex &index, OpSequenceRef op_seq) -> void {
- if (visited[index])
- return;
- visited[index] = true;
-
- for (const auto output : op_seq.getOutputs() | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- const auto &operand = lowered_graph.graph().operands().at(output);
- for (const auto &use : operand.getUses())
- {
- const auto use_op_seq_index = lowered_graph.op_seqs().getOperation(use);
- dfs_recursive(use_op_seq_index, lowered_graph.op_seqs().at(use_op_seq_index));
- }
- }
-
- fn(index, op_seq);
- };
-
- lowered_graph.op_seqs().iterate(dfs_recursive);
-
- // All of the operations(nodes) must have been visited.
- assert(std::all_of(visited.begin(), visited.end(),
- [](const std::pair<const OpSequenceIndex, bool> &v) { return v.second; }));
-}
-
-// Explicit instantiations to have implementation in the source file.
-// NOTE If these instatiations were in the top of this file, `iterate` is compiled and saved in
-// `GraphIterator.cc.o` but `iterateOpSeqs`. This happens only when cross-building for Android.
-// (Maybe a bug of NDK toolchain(clang)?)
-
-template class DefaultIterator<true>;
-template class DefaultIterator<false>;
-
-template class PostDfsIterator<true>;
-template class PostDfsIterator<false>;
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/GraphIterator.h b/runtime/onert/core/src/ir/GraphIterator.h
deleted file mode 100644
index b54314e0e..000000000
--- a/runtime/onert/core/src/ir/GraphIterator.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_GRAPH_ITERATOR_H__
-#define __ONERT_IR_GRAPH_ITERATOR_H__
-
-#include <type_traits>
-
-#include "ir/Index.h"
-
-namespace onert
-{
-namespace compiler
-{
-class LoweredGraph;
-} // namespace compiler
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-
-class Graph;
-class Operation;
-class OpSequence;
-
-template <bool is_const> class Iterator
-{
-public:
- using GraphRef = typename std::conditional<is_const, const Graph &, Graph &>::type;
- using IndexRef = const OperationIndex &;
- using NodeRef = typename std::conditional<is_const, const Operation &, Operation &>::type;
- using IterFn = std::function<void(IndexRef, NodeRef)>;
-
-public:
- virtual ~Iterator() = default;
- virtual void iterate(GraphRef graph, const IterFn &fn) const = 0;
-};
-
-template <bool is_const = false> class DefaultIterator final : public Iterator<is_const>
-{
-public:
- using GraphRef = typename Iterator<is_const>::GraphRef;
- using IndexRef = typename Iterator<is_const>::IndexRef;
- using NodeRef = typename Iterator<is_const>::NodeRef;
- using IterFn = typename Iterator<is_const>::IterFn;
-
-public:
- void iterate(GraphRef graph, const IterFn &fn) const;
-};
-using DefaultConstIterator = DefaultIterator<true>;
-
-template <bool is_const = false> class PostDfsIterator final : public Iterator<is_const>
-{
-public:
- using GraphRef = typename Iterator<is_const>::GraphRef;
- using IndexRef = typename Iterator<is_const>::IndexRef;
- using NodeRef = typename Iterator<is_const>::NodeRef;
- using IterFn = typename Iterator<is_const>::IterFn;
- using LoweredGraphRef =
- typename std::conditional<is_const, const typename compiler::LoweredGraph &,
- typename compiler::LoweredGraph &>::type;
- using OpSequenceRef = typename std::conditional<is_const, const OpSequence &, OpSequence &>::type;
- using OpSeqIndexRef = const OpSequenceIndex &;
- using OpSeqIterFn = std::function<void(OpSeqIndexRef, OpSequenceRef)>;
-
-public:
- void iterate(GraphRef graph, const IterFn &fn) const;
- void iterateOpSeqs(LoweredGraphRef lowered_graph, const OpSeqIterFn &f) const;
-};
-using PostDfsConstIterator = PostDfsIterator<true>;
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_GRAPH_ITERATOR_H__
diff --git a/runtime/onert/core/src/ir/LayoutSet.cc b/runtime/onert/core/src/ir/LayoutSet.cc
deleted file mode 100644
index bd3f438ad..000000000
--- a/runtime/onert/core/src/ir/LayoutSet.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LayoutSet.h"
-
-namespace onert
-{
-namespace ir
-{
-
-LayoutSet::LayoutSet(std::initializer_list<Layout> layouts)
-{
- for (auto layout : layouts)
- {
- _set.insert(layout);
- }
-}
-
-LayoutSet LayoutSet::operator|(const LayoutSet &other) const
-{
- auto ret = *this;
- for (auto layout : other)
- {
- ret.add(layout);
- }
- return ret;
-}
-
-LayoutSet LayoutSet::operator&(const LayoutSet &other) const
-{
- LayoutSet ret;
- for (auto layout : other)
- {
- if (contains(layout))
- {
- ret.add(layout);
- }
- }
- return ret;
-}
-
-LayoutSet LayoutSet::operator-(const LayoutSet &other) const
-{
- auto ret = *this;
- for (auto layout : other)
- {
- ret.remove(layout);
- }
- return ret;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/LayoutSet.h b/runtime/onert/core/src/ir/LayoutSet.h
deleted file mode 100644
index 6ce4e38c6..000000000
--- a/runtime/onert/core/src/ir/LayoutSet.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_LAYOUT_SET_H__
-#define __ONERT_IR_LAYOUT_SET_H__
-
-#include <initializer_list>
-#include <unordered_set>
-
-#include "ir/Layout.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class LayoutSet
-{
-public:
- LayoutSet() = default;
- LayoutSet(std::initializer_list<Layout> layouts);
-
-public:
- void add(const Layout &layout) { _set.insert(layout); }
- void remove(const Layout &layout) { _set.erase(layout); }
- uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
- bool contains(const Layout &layout) const { return _set.find(layout) != _set.end(); }
-
-public:
- LayoutSet operator|(const LayoutSet &other) const; // Union
- LayoutSet operator&(const LayoutSet &other) const; // Intersect
- LayoutSet operator-(const LayoutSet &other) const; // Minus
-
-public:
- std::unordered_set<Layout>::const_iterator begin() const { return _set.begin(); }
- std::unordered_set<Layout>::const_iterator end() const { return _set.end(); }
-
-private:
- std::unordered_set<Layout> _set;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_LAYOUT_SET_H__
diff --git a/runtime/onert/core/src/ir/OpCode.cc b/runtime/onert/core/src/ir/OpCode.cc
deleted file mode 100644
index ef3411f6d..000000000
--- a/runtime/onert/core/src/ir/OpCode.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/OpCode.h"
-
-#include <unordered_map>
-
-namespace onert
-{
-namespace ir
-{
-
-const char *toString(OpCode opcode)
-{
- static const std::unordered_map<OpCode, const char *> map{{OpCode::Invalid, "Invalid"},
-#define OP(Name) {OpCode::Name, #Name},
-#include "ir/Operations.lst"
-#undef OP
- {OpCode::COUNT, "COUNT"}};
- return map.at(opcode);
-}
-
-OpCode toOpCode(const std::string str)
-{
- static const std::unordered_map<std::string, OpCode> map{
-#define OP(Name) {#Name, OpCode::Name},
-#include "ir/Operations.lst"
-#undef OP
- };
- return map.at(str);
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OpSequence.cc b/runtime/onert/core/src/ir/OpSequence.cc
deleted file mode 100644
index e2b989d8c..000000000
--- a/runtime/onert/core/src/ir/OpSequence.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/OpSequence.h"
-
-#include "ir/Operations.h"
-#include "ir/OperationVisitor.h"
-#include <sstream>
-
-namespace
-{
-
-std::string getStrFromIndice(const onert::ir::OperandIndexSequence &indice)
-{
- std::string str;
- for (const auto &ind : indice)
- {
- str += std::to_string(ind.value());
- str.push_back(',');
- }
- if (str.back() == ',')
- str.pop_back();
-
- return str;
-}
-}
-
-namespace onert
-{
-namespace ir
-{
-
-OpSequence::OpSequence(Layout layout) : _layout{layout}, _has_dynamic_tensor{false}
-{
- // DO NOTHING
-}
-
-void OpSequence::accept(OperationVisitor &v) const { v.visit(*this); }
-
-// TODO: Impl Dumper instead of this method
-std::string getStrFromOpSeq(const OpSequence &op_seq, const Operations &operations)
-{
- // " OpSequence IN(0,1,2) -> { op0(0,1,2:3), op1(3:4), op2(4:5) } -> OUT(5)"
- std::stringstream ss;
- ss << " OpSequence IN(" << getStrFromIndice(op_seq.getInputs()) << ") -> {";
- for (const auto &op_idx : op_seq)
- {
- ss << " " << op_idx.value() << "(" << operations.at(op_idx).name() << ":"
- << getStrFromIndice(operations.at(op_idx).getInputs()) << ":"
- << getStrFromIndice(operations.at(op_idx).getOutputs()) << ")";
- }
- ss << " } -> OUT(" << getStrFromIndice(op_seq.getOutputs()) << ")";
- return ss.str();
-}
-
-void OpSequence::remove(const OperationIndex &index)
-{
- assert(exist(index));
- for (auto it = _operations.cbegin(); it != _operations.cend(); ++it)
- {
- if (*it == index)
- {
- _operations.erase(it);
- break;
- }
- }
-}
-
-bool OpSequence::exist(const OperationIndex &index) const
-{
- for (const auto &inner_op_idx : _operations)
- {
- if (inner_op_idx == index)
- {
- return true;
- }
- }
- return false;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OpSequences.cc b/runtime/onert/core/src/ir/OpSequences.cc
deleted file mode 100644
index 68884783e..000000000
--- a/runtime/onert/core/src/ir/OpSequences.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/OpSequences.h"
-#include "util/logging.h"
-#include <memory>
-
-#include <cassert>
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-
-OpSequenceIndex OpSequences::emplace(const OperationIndex &index, Layout layout)
-{
- std::unique_ptr<OpSequence> op_seq = std::make_unique<OpSequence>(layout);
- op_seq->appendOperation(index);
- const OpSequenceIndex &seq_index = push(std::move(op_seq));
- cacheSequenceIndex(seq_index, index);
- return seq_index;
-}
-
-OpSequenceIndex OpSequences::emplace(std::unique_ptr<OpSequence> &&op_seq)
-{
- auto &operations = op_seq->operations();
- const OpSequenceIndex &seq_index = push(std::move(op_seq));
- for (const auto &op_idx : operations)
- {
- cacheSequenceIndex(seq_index, op_idx);
- }
- return seq_index;
-}
-
-void OpSequences::cacheSequenceIndex(const OpSequenceIndex &seq_index,
- const OperationIndex &op_index) const
-{
- _seq_indexes.emplace(op_index, seq_index);
-}
-
-OpSequenceIndex *OpSequences::findSequenceIndex(const OperationIndex &operation_index) const
-{
- // If opration_index is cached, return sequence_index from cache
- if (_seq_indexes.count(operation_index))
- {
- auto &op_seq_index = _seq_indexes.at(operation_index);
- if (_objects.count(op_seq_index) && _objects.at(op_seq_index)->exist(operation_index))
- {
- return &op_seq_index;
- }
- else
- {
- _seq_indexes.erase(operation_index);
- return nullptr;
- }
- }
- return nullptr;
-}
-
-bool OpSequences::containsOperation(const OperationIndex &operation_index) const
-{
- return findOperation(operation_index).valid();
-}
-
-OpSequenceIndex OpSequences::getOperation(const OperationIndex &operation_index) const
-{
- OpSequenceIndex ret = findOperation(operation_index);
- assert(ret.valid());
- return ret;
-}
-
-void OpSequences::removeFromOpSequence(const OperationIndex &operation_index)
-{
- const auto op_seq_index = findOperation(operation_index);
- auto &op_seq = at(op_seq_index);
- _seq_indexes.erase(operation_index);
- op_seq.remove(operation_index);
- if (op_seq.size() == 0)
- {
- remove(op_seq_index);
- }
-}
-
-OpSequenceIndex OpSequences::findOperation(const OperationIndex &operation_index) const
-{
- if (OpSequenceIndex *op_seq_index = findSequenceIndex(operation_index))
- return *op_seq_index;
-
- for (auto &e : _objects)
- {
- OpSequence &object = *e.second;
- auto it = find(object.operations().begin(), object.operations().end(), operation_index);
- if (it != object.operations().end())
- {
- cacheSequenceIndex(e.first, operation_index);
- return e.first;
- }
- }
- throw std::runtime_error("Operation not found");
-}
-
-void dumpOpSequences(const OpSequences &op_seqs, const Operations &operations)
-{
- op_seqs.iterate([&](const OpSequenceIndex &idx, const OpSequence &op_seq) {
- VERBOSE(OpSequences) << idx.value() << "] " << getStrFromOpSeq(op_seq, operations) << std::endl;
- });
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Operand.cc b/runtime/onert/core/src/ir/Operand.cc
deleted file mode 100644
index e29c7a6ec..000000000
--- a/runtime/onert/core/src/ir/Operand.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Operand.h"
-
-namespace onert
-{
-namespace ir
-{
-
-size_t Operand::operandSize(void) const
-{
- const uint32_t ranks = shape().rank();
- int32_t elements = 1;
-
- for (uint32_t rank = 0; rank < ranks; rank++)
- {
- elements *= shape().dim(rank);
- }
-
- DataType type = typeInfo().type();
- size_t element_size = sizeOfDataType(type);
-
- // Value of type is matched with OperandCode enum in NeuralNetworks.h
- return element_size * elements;
-}
-
-void Operand::insertUse(const OperationIndex &idx) { _uses.insert(idx); }
-
-void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); }
-
-void Operand::setDef(const OperationIndex &idx) { _def = idx; }
-
-void Operand::unsetDef() { _def = OperationIndex{}; }
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OperandIndexSequence.cc b/runtime/onert/core/src/ir/OperandIndexSequence.cc
deleted file mode 100644
index 73f928280..000000000
--- a/runtime/onert/core/src/ir/OperandIndexSequence.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/OperandIndexSequence.h"
-
-#include <algorithm>
-#include <sstream>
-
-namespace onert
-{
-namespace ir
-{
-
-OperandIndexSequence::OperandIndexSequence(std::initializer_list<OperandIndex> list) : _vec(list)
-{
- // DO NOTHING
-}
-
-OperandIndexSequence::OperandIndexSequence(std::initializer_list<int32_t> list)
-{
- for (auto val : list)
- {
- _vec.emplace_back(static_cast<uint32_t>(val));
- }
-}
-
-OperandIndexSequence::OperandIndexSequence(std::initializer_list<uint32_t> list)
-{
- for (auto val : list)
- {
- _vec.emplace_back(val);
- }
-}
-
-bool OperandIndexSequence::contains(const OperandIndex &index) const
-{
- return std::find(_vec.begin(), _vec.end(), index) != _vec.end();
-}
-
-void OperandIndexSequence::replace(const OperandIndex &from, const OperandIndex &to)
-{
- std::replace(_vec.begin(), _vec.end(), from, to);
-}
-
-OperandIndexSequence OperandIndexSequence::operator+(const OperandIndexSequence &other) const
-{
- OperandIndexSequence ret = *this;
- ret.append(other);
- return ret;
-}
-
-std::ostream &operator<<(std::ostream &o, const OperandIndexSequence &op_seq)
-{
- std::string delimeter;
- for (const auto &ind : op_seq._vec)
- {
- o << delimeter << ind;
- delimeter = ',';
- }
- return o;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Operands.cc b/runtime/onert/core/src/ir/Operands.cc
deleted file mode 100644
index ab32e478a..000000000
--- a/runtime/onert/core/src/ir/Operands.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Operands.h"
-
-#include <memory>
-#include "util/logging.h"
-
-namespace onert
-{
-namespace ir
-{
-
-Operands::Operands(const Operands &obj)
-{
- obj.iterate([&](const OperandIndex &index, const Operand &operand) {
- _objects.emplace(index, std::make_unique<Operand>(operand));
- });
- _index_count = obj._index_count;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Operation.cc b/runtime/onert/core/src/ir/Operation.cc
deleted file mode 100644
index 4af878541..000000000
--- a/runtime/onert/core/src/ir/Operation.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Operation.h"
-
-#include <cassert>
-
-namespace onert
-{
-namespace ir
-{
-
-Operation::Operation(OperandConstraint input_constr, const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, OperandConstraint output_constr)
- : _input_constr{input_constr}, _output_constr{output_constr}
-{
- setInputs(inputs);
- setOutputs(outputs);
-}
-
-Operation::Operation(OperandConstraint input_constr, OperandConstraint output_constr)
- : _input_constr{input_constr}, _output_constr{output_constr}
-{
-}
-
-Operation::~Operation() = default;
-
-void Operation::setInputs(const OperandIndexSequence &indexes)
-{
- if (!_input_constr.check(indexes.size()))
- throw std::runtime_error{"Invalid number of input tensors for this operation."};
- _inputs = indexes;
-}
-
-void Operation::setOutputs(const OperandIndexSequence &indexes)
-{
- if (!_output_constr.check(indexes.size()))
- throw std::runtime_error{"Invalid number of output tensors for this operation."};
- _outputs = indexes;
-}
-
-void Operation::replaceInputs(const OperandIndex &from, const OperandIndex &to)
-{
- _inputs.replace(from, to);
-}
-
-void Operation::replaceOutputs(const OperandIndex &from, const OperandIndex &to)
-{
- _outputs.replace(from, to);
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OperationCloner.cc b/runtime/onert/core/src/ir/OperationCloner.cc
deleted file mode 100644
index b4e60f0bc..000000000
--- a/runtime/onert/core/src/ir/OperationCloner.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationCloner.h"
-
-#include <assert.h>
-
-namespace onert
-{
-namespace ir
-{
-
-#define OP(Name) \
- void OperationCloner::visit(const operation::Name &o) \
- { \
- assert(!_return_op); \
- _return_op = std::make_unique<operation::Name>(o); \
- }
-#include "ir/Operations.lst"
-#undef OP
-
-std::unique_ptr<Operation> OperationCloner::releaseClone()
-{
- assert(_return_op);
- return std::move(_return_op);
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OperationCloner.h b/runtime/onert/core/src/ir/OperationCloner.h
deleted file mode 100644
index 0e8cda2a0..000000000
--- a/runtime/onert/core/src/ir/OperationCloner.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_IR_OPERATION_CLONER_H__
-#define __ONERT_IR_OPERATION_CLONER_H__
-
-#include <memory>
-#include "ir/OperationVisitor.h"
-#include "ir/Operation.h"
-
-namespace onert
-{
-namespace ir
-{
-
-class OperationCloner : public OperationVisitor
-{
-public:
-#define OP(Name) void visit(const operation::Name &o) override;
-#include "ir/Operations.lst"
-#undef OP
-
-public:
- std::unique_ptr<Operation> releaseClone();
-
-private:
- std::unique_ptr<Operation> _return_op;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_IR_OPERATION_CLONER_H__
diff --git a/runtime/onert/core/src/ir/OperationDumper.cc b/runtime/onert/core/src/ir/OperationDumper.cc
deleted file mode 100644
index eecfe81cc..000000000
--- a/runtime/onert/core/src/ir/OperationDumper.cc
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationDumper.h"
-
-#include <string>
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace ir
-{
-
-using namespace operation;
-
-namespace
-{
-void dumpUnaryInputOp(const Operation &node, const std::string &adding_input = "")
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(0) << ") " << adding_input
- << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void dumpBinaryInputOp(const Operation &node, const std::string &adding_input = "")
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(0) << ", " << node.getInputs().at(1)
- << ") " << adding_input << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void dumpConvOp(const Operation &node, const std::string &padding_type)
-{
- VERBOSE(LIR) << "* " << node.name() << "(" << padding_type << ")" << std::endl;
- VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Conv2D::Input::INPUT) << ") Kernel("
- << node.getInputs().at(Conv2D::Input::KERNEL) << ") Bias("
- << node.getInputs().at(Conv2D::Input::BIAS) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void dumpPackingOp(const Operation &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- std::string inputs;
- for (auto i : node.getInputs())
- {
- inputs += std::to_string(i.value()) + ",";
- }
- VERBOSE(LIR) << " - Inputs : Inputs(" << inputs << ")" << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-} // namespace
-
-OperationDumper::OperationDumper(const std::string &start_msg)
-{
- VERBOSE(LIR) << start_msg << std::endl;
-}
-
-void OperationDumper::visit(const ArgMax &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const BatchToSpaceND &node)
-{
- std::string block_size =
- "BlockSize(" +
- std::to_string(node.getInputs().at(BatchToSpaceND::Input::BLOCK_SIZE).value()) + ")";
- dumpUnaryInputOp(node, block_size);
-}
-
-void OperationDumper::visit(const BCQFullyConnected &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(BCQFullyConnected::Input::INPUT)
- << ") WeightsBinary("
- << node.getInputs().at(BCQFullyConnected::Input::WEIGHTS_BINARY)
- << ") WeightsScales("
- << node.getInputs().at(BCQFullyConnected::Input::WEIGHTS_SCALES)
- << ") WeightsClusters("
- << node.getInputs().at(BCQFullyConnected::Input::WEIGHTS_CLUSTERS) << ") Bias("
- << node.getInputs().at(BCQFullyConnected::Input::BIAS) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const BinaryArithmetic &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const operation::BroadcastTo &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const Comparison &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const Concat &node) { dumpPackingOp(node); }
-
-void OperationDumper::visit(const Conv2D &node)
-{
- std::string padding_type =
- node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
- dumpConvOp(node, padding_type);
-}
-
-void OperationDumper::visit(const ConvertFp16ToFp32 &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const ConvertFp32ToFp16 &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const DepthToSpace &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const DepthwiseConv2D &node)
-{
- std::string padding_type =
- node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
- dumpConvOp(node, padding_type);
-}
-
-void OperationDumper::visit(const ElementwiseActivation &node)
-{
- std::string params;
- if (node.param().op_type == ElementwiseActivation::Type::RELU)
- {
- params = " lower value(" + std::to_string(node.param().alpha) + ") upper value(" +
- std::to_string(node.param().beta) + ")";
- }
- else if (node.param().op_type == ElementwiseActivation::Type::LEAKY_RELU)
- {
- params = " alpha value(" + std::to_string(node.param().alpha) + ")";
- }
- dumpUnaryInputOp(node, params);
-}
-
-void OperationDumper::visit(const ElementwiseBinary &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const ElementwiseUnary &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const EmbeddingLookup &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : Lookups(" << node.getInputs().at(EmbeddingLookup::Input::LOOKUPS)
- << ") VALUES(" << node.getInputs().at(EmbeddingLookup::Input::VALUES) << ")"
- << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const ExpandDims &node)
-{
- std::string axis =
- "AXIS(" + std::to_string(node.getInputs().at(ExpandDims::Input::AXIS).value()) + ")";
- dumpUnaryInputOp(node, axis);
-}
-
-void OperationDumper::visit(const FullyConnected &node)
-{
- std::string inputs =
- "Weight(" + std::to_string(node.getInputs().at(FullyConnected::Input::WEIGHT).value()) +
- ") Bias(" + std::to_string(node.getInputs().at(FullyConnected::Input::BIAS).value()) + ")";
- dumpUnaryInputOp(node, inputs);
-}
-
-void OperationDumper::visit(const Gather &node)
-{
- std::string indices =
- "Indices(" + std::to_string(node.getInputs().at(Gather::Input::INDICES).value()) + ")";
- dumpUnaryInputOp(node, indices);
-}
-
-void OperationDumper::visit(const HashtableLookup &node)
-{
- VERBOSE(LIR) << "* HashTableLookup" << std::endl;
- VERBOSE(LIR) << " - Inputs : Lookups(" << node.getInputs().at(HashtableLookup::Input::LOOKUPS)
- << ") Keys(" << node.getInputs().at(HashtableLookup::Input::KEYS) << ") Values("
- << node.getInputs().at(HashtableLookup::Input::VALUES) << ")" << std::endl;
- VERBOSE(LIR) << " - Outputs : Output(" << node.getInputs().at(HashtableLookup::Output::OUTPUT)
- << ") Hits(" << node.getInputs().at(HashtableLookup::Output::HITS) << ")"
- << std::endl;
-}
-
-void OperationDumper::visit(const InstanceNorm &node)
-{
- std::string inputs =
- "Gamma(" + std::to_string(node.getInputs().at(InstanceNorm::Input::GAMMA).value()) +
- ") Beta(" + std::to_string(node.getInputs().at(InstanceNorm::Input::BETA).value()) + ")";
- dumpUnaryInputOp(node, inputs);
-}
-
-void OperationDumper::visit(const L2Normalization &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const LocalResponseNormalization &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const LSTM &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR)
- << " - Inputs : Input(" << node.getInputs().at(LSTM::Input::INPUT)
- << ") Input To Input Weights(" << node.getInputs().at(LSTM::Input::INPUT_TO_INPUT_WEIGHTS)
- << ") Input To Forget Weights(" << node.getInputs().at(LSTM::Input::INPUT_TO_FORGET_WEIGHTS)
- << ") Input To Cell Weights(" << node.getInputs().at(LSTM::Input::INPUT_TO_CELL_WEIGHTS)
- << ") Input To Output Weights(" << node.getInputs().at(LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)
- << ") Recurrent To Input Weights("
- << node.getInputs().at(LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)
- << ") Recurrent To Forget Weights("
- << node.getInputs().at(LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)
- << ") Recurrent To Cell Weights("
- << node.getInputs().at(LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)
- << ") Recurrent To Output Weights("
- << node.getInputs().at(LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS) << ") Cell To Input Weights("
- << node.getInputs().at(LSTM::Input::CELL_TO_INPUT_WEIGHTS) << ") Cell To Forget Weights("
- << node.getInputs().at(LSTM::Input::CELL_TO_FORGET_WEIGHTS) << ") Cell To OUTPUT Weights("
- << node.getInputs().at(LSTM::Input::CELL_TO_OUTPUT_WEIGHTS) << ") Input Gate Bias("
- << node.getInputs().at(LSTM::Input::INPUT_GATE_BIAS) << ") Forget Gate Bias("
- << node.getInputs().at(LSTM::Input::FORGET_GATE_BIAS) << ") Cell Bias("
- << node.getInputs().at(LSTM::Input::CELL_BIAS) << ") Output Gate Bias("
- << node.getInputs().at(LSTM::Input::OUTPUT_GATE_BIAS) << ") Projection Weights("
- << node.getInputs().at(LSTM::Input::PROJECTION_WEIGHTS) << ") Projection Bias("
- << node.getInputs().at(LSTM::Input::PROJECTION_BIAS) << ") Output State In("
- << node.getInputs().at(LSTM::Input::OUTPUT_STATE_IN) << ") Cell State In("
- << node.getInputs().at(LSTM::Input::CELL_STATE_IN);
- if (node.getInputs().size() == 24)
- {
- VERBOSE(LIR) << ") Input Layer Normalization Weights("
- << node.getInputs().at(LSTM::Input::INPUT_LAYER_NORMALIZATION_WEIGHTS)
- << ") Forget Layer Normalization Weights("
- << node.getInputs().at(LSTM::Input::FORGET_LAYER_NORMALIZATION_WEIGHTS)
- << ") Cell Layer Normalization Weights("
- << node.getInputs().at(LSTM::Input::CELL_LAYER_NORMALIZATION_WEIGHTS)
- << ") Ouput Layer Normalization Weights("
- << node.getInputs().at(LSTM::Input::OUTPUT_LAYER_NORMALIZATION_WEIGHTS);
- }
- VERBOSE(LIR) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : Scratch Buffer("
- << node.getOutputs().at(LSTM::Output::SCRATCH_BUFFER) << ") Output State Out("
- << node.getOutputs().at(LSTM::Output::OUTPUT_STATE_OUT) << ") Cell State Out("
- << node.getOutputs().at(LSTM::Output::CELL_STATE_OUT) << ") Output("
- << node.getOutputs().at(LSTM::Output::OUTPUT) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Pack &node) { dumpPackingOp(node); }
-
-void OperationDumper::visit(const Pad &node)
-{
- std::string pad = "Pad(" + std::to_string(node.getInputs().at(Pad::Input::PAD).value()) + ")";
- dumpUnaryInputOp(node, pad);
-}
-
-void OperationDumper::visit(const Permute &node)
-{
- std::string permute_type = "Unknown";
- switch (node.getPermuteType())
- {
- case Permute::Type::COPY:
- permute_type = "Copy";
- break;
- case Permute::Type::NHWC_TO_NCHW:
- permute_type = "NHWC to NCHW";
- break;
- case Permute::Type::NCHW_TO_NHWC:
- permute_type = "NCHW to NHWC";
- break;
- }
-
- VERBOSE(LIR) << "* Permute(" + permute_type + ")" << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(0) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Pool2D &node)
-{
- std::string padding_type =
- node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
- VERBOSE(LIR) << "* " << node.name() << "(" << padding_type << ")" << std::endl;
- VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Pool2D::Input::INPUT) << ")"
- << std::endl;
- VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Pow &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const PReLU &node)
-{
- std::string alpha =
- "Alpha(" + std::to_string(node.getInputs().at(PReLU::Input::ALPHA).value()) + ")";
- dumpUnaryInputOp(node, alpha);
-}
-
-void OperationDumper::visit(const Rank &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Reduce &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Reshape &node)
-{
- // optional param
- std::string shape =
- node.getInputs().size() == 2
- ? "Shape(" + std::to_string(node.getInputs().at(Reshape::Input::SHAPE).value()) + ")"
- : "Shape(not provided)";
- dumpUnaryInputOp(node, shape);
-}
-
-void OperationDumper::visit(const ResizeBilinear &node)
-{
- if (node.getInputs().size() == 1)
- {
- dumpUnaryInputOp(node);
- }
- else if (node.getInputs().size() == 2)
- {
- dumpBinaryInputOp(node);
- }
- else
- {
- VERBOSE(LIR) << "* " << node.name() << " is set wrong" << std::endl;
- }
-}
-
-void OperationDumper::visit(const ResizeNearestNeighbor &node)
-{
- if (node.getInputs().size() == 1)
- {
- dumpUnaryInputOp(node);
- }
- else if (node.getInputs().size() == 2)
- {
- dumpBinaryInputOp(node);
- }
- else
- {
- VERBOSE(LIR) << "* " << node.name() << " is set wrong" << std::endl;
- }
-}
-
-void OperationDumper::visit(const Reverse &node)
-{
- std::string axis =
- "Axis(" + std::to_string(node.getInputs().at(Reverse::Input::AXIS).value()) + ")";
- dumpUnaryInputOp(node, axis);
-}
-
-void OperationDumper::visit(const RNN &node)
-{
- VERBOSE(LIR) << "* RNN" << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(RNN::Input::INPUT) << ") Weights("
- << node.getInputs().at(RNN::Input::WEIGHTS) << ") Recurrent Weights("
- << node.getInputs().at(RNN::Input::RECURRENT_WEIGHTS) << ") Bias("
- << node.getInputs().at(RNN::Input::BIAS) << ") Hidden State("
- << node.getInputs().at(RNN::Input::HIDDEN_STATE_IN) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(RNN::Output::OUTPUT)
- << ") Hidden State(" << node.getInputs().at(RNN::Output::HIDDEN_STATE_OUT) << ")"
- << std::endl;
-}
-
-void OperationDumper::visit(const Range &node)
-{
- VERBOSE(LIR) << "* Range" << std::endl;
- VERBOSE(LIR) << " - Inputs : Start(" << node.getInputs().at(Range::Input::START) << ")"
- << " Limit(" << node.getInputs().at(Range::Input::LIMIT) << ")"
- << " Delta(" << node.getInputs().at(Range::Input::DELTA) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Select &node)
-{
- VERBOSE(LIR) << "* Select" << std::endl;
- VERBOSE(LIR) << " - Inputs : Condition(" << node.getInputs().at(Select::Input::CONDITION) << ")"
- << " Input_X(" << node.getInputs().at(Select::Input::INPUT_TRUE) << ")"
- << " Input_Y(" << node.getInputs().at(Select::Input::INPUT_FALSE) << ")"
- << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const ir::operation::Shape &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Softmax &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const SpaceToBatchND &node)
-{
- std::string inputs =
- "BlockSize(" +
- std::to_string(node.getInputs().at(SpaceToBatchND::Input::BLOCK_SIZE).value()) +
- ") Paddings(" + std::to_string(node.getInputs().at(SpaceToBatchND::Input::PADDINGS).value()) +
- ")";
- dumpUnaryInputOp(node, inputs);
-}
-
-void OperationDumper::visit(const SpaceToDepth &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Split &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const SquaredDifference &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const StatelessRandomUniform &node)
-{
- VERBOSE(LIR) << "* StatelessRandomUniform" << std::endl;
- VERBOSE(LIR) << " - Inputs : Shape(" << node.getInputs().at(StatelessRandomUniform::Input::SHAPE)
- << " Seed(" << node.getInputs().at(StatelessRandomUniform::Input::SEED) << ")"
- << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Squeeze &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Slice &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const StridedSlice &node) { dumpUnaryInputOp(node); }
-
-void OperationDumper::visit(const Tile &node)
-{
- std::string multiples =
- "Multiples(" + std::to_string(node.getInputs().at(Tile::Input::MULTIPLES).value()) + ")";
- dumpUnaryInputOp(node, multiples);
-}
-
-void OperationDumper::visit(const TopKV2 &node)
-{
- VERBOSE(LIR) << "* TopKV2" << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(TopKV2::Input::INPUT) << ")"
- << std::endl;
- VERBOSE(LIR) << " - Outputs : Values(" << node.getOutputs().at(TopKV2::Output::OUTPUT_VALUES)
- << ") Indices(" << node.getOutputs().at(TopKV2::Output::OUTPUT_INDICES) << ")"
- << std::endl;
-}
-
-void OperationDumper::visit(const TransposeConv &node)
-{
- std::string padding_type =
- node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
- VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl;
- VERBOSE(LIR) << " - Inputs : Output Shape("
- << node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE) << ") KERNEL("
- << node.getInputs().at(TransposeConv::Input::KERNEL) << ") IFM("
- << node.getInputs().at(TransposeConv::Input::INPUT) << ")" << std::endl;
- VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const Transpose &node) { dumpBinaryInputOp(node); }
-
-void OperationDumper::visit(const Unpack &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Unpack::Input::INPUT) << ")"
- << std::endl;
- std::string outputs;
- const auto &output_indices = node.getOutputs();
- for (auto it = std::begin(output_indices); it != std::end(output_indices); ++it)
- {
- outputs += std::to_string(it->value());
- if (std::next(it) != std::end(output_indices))
- outputs += ", ";
- }
- VERBOSE(LIR) << " - Outputs : Outputs(" << outputs << ")" << std::endl;
-}
-
-void OperationDumper::visit(const OneHot &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- VERBOSE(LIR) << " - Inputs : "
- << "Indices(" << node.getInputs().at(OneHot::Input::INDICES) << ") " << std::endl;
- VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl;
-}
-
-void OperationDumper::visit(const If &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- std::string inputs;
- const auto &input_indices = node.getInputs();
- for (auto it = std::begin(input_indices); it != std::end(input_indices); ++it)
- {
- inputs += std::to_string(it->value());
- if (std::next(it) != std::end(input_indices))
- inputs += ", ";
- }
- VERBOSE(LIR) << " - Inputs : "
- << "Then subgraph (" << node.param().then_subg_index << ") Else subgraph ("
- << node.param().else_subg_index << ") Inputs(" << inputs << ")" << std::endl;
- std::string outputs;
- const auto &output_indices = node.getOutputs();
- for (auto it = std::begin(output_indices); it != std::end(output_indices); ++it)
- {
- outputs += std::to_string(it->value());
- if (std::next(it) != std::end(output_indices))
- outputs += ", ";
- }
- VERBOSE(LIR) << " - Output : Outputs(" << outputs << ")" << std::endl;
-}
-
-void OperationDumper::visit(const While &node)
-{
- VERBOSE(LIR) << "* " << node.name() << std::endl;
- std::string inputs;
- const auto &input_indices = node.getInputs();
- for (auto it = std::begin(input_indices); it != std::end(input_indices); ++it)
- {
- inputs += std::to_string(it->value());
- if (std::next(it) != std::end(input_indices))
- inputs += ", ";
- }
- VERBOSE(LIR) << " - Inputs : "
- << "Cond subgraph (" << node.param().cond_subg_index << ") Body subgraph ("
- << node.param().cond_subg_index << ") Inputs(" << inputs << ")" << std::endl;
- std::string outputs;
- const auto &output_indices = node.getOutputs();
- for (auto it = std::begin(output_indices); it != std::end(output_indices); ++it)
- {
- outputs += std::to_string(it->value());
- if (std::next(it) != std::end(output_indices))
- outputs += ", ";
- }
- VERBOSE(LIR) << " - Output : Outputs(" << outputs << ")" << std::endl;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/OperationDumper.h b/runtime/onert/core/src/ir/OperationDumper.h
deleted file mode 100644
index 91642ab13..000000000
--- a/runtime/onert/core/src/ir/OperationDumper.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_OPERATION_DUMPER_H__
-#define __ONERT_OPERATION_DUMPER_H__
-
-#include "ir/OperationVisitor.h"
-#include <string>
-
-namespace onert
-{
-namespace ir
-{
-
-class OperationDumper : public OperationVisitor
-{
-public:
- OperationDumper(const std::string &start_msg);
-
-public:
- void visit(const operation::ArgMax &) override;
- void visit(const operation::BatchToSpaceND &node) override;
- void visit(const operation::BCQFullyConnected &node) override;
- void visit(const operation::BinaryArithmetic &node) override;
- void visit(const operation::BroadcastTo &) override;
- void visit(const operation::Comparison &) override;
- void visit(const operation::Concat &node) override;
- void visit(const operation::Conv2D &node) override;
- void visit(const operation::ConvertFp16ToFp32 &node) override;
- void visit(const operation::ConvertFp32ToFp16 &node) override;
- void visit(const operation::DepthToSpace &) override;
- void visit(const operation::DepthwiseConv2D &node) override;
- void visit(const operation::ElementwiseActivation &) override;
- void visit(const operation::ElementwiseBinary &) override;
- void visit(const operation::ElementwiseUnary &) override;
- void visit(const operation::EmbeddingLookup &) override;
- void visit(const operation::ExpandDims &) override;
- void visit(const operation::FullyConnected &node) override;
- void visit(const operation::Gather &) override;
- void visit(const operation::HashtableLookup &) override;
- void visit(const operation::InstanceNorm &) override;
- void visit(const operation::L2Normalization &) override;
- void visit(const operation::LocalResponseNormalization &) override;
- void visit(const operation::LSTM &) override;
- void visit(const operation::Pack &) override;
- void visit(const operation::Pad &) override;
- void visit(const operation::Permute &node) override;
- void visit(const operation::Pool2D &node) override;
- void visit(const operation::Pow &node) override;
- void visit(const operation::PReLU &) override;
- void visit(const operation::Range &) override;
- void visit(const operation::Rank &) override;
- void visit(const operation::Reduce &) override;
- void visit(const operation::Reshape &node) override;
- void visit(const operation::ResizeBilinear &) override;
- void visit(const operation::ResizeNearestNeighbor &) override;
- void visit(const operation::Reverse &) override;
- void visit(const operation::RNN &) override;
- void visit(const operation::Select &node) override;
- void visit(const operation::Shape &node) override;
- void visit(const operation::Softmax &node) override;
- void visit(const operation::SpaceToBatchND &) override;
- void visit(const operation::SpaceToDepth &) override;
- void visit(const operation::Split &) override;
- void visit(const operation::SquaredDifference &) override;
- void visit(const operation::Squeeze &) override;
- void visit(const operation::Slice &) override;
- void visit(const operation::StridedSlice &) override;
- void visit(const operation::StatelessRandomUniform &) override;
- void visit(const operation::Tile &) override;
- void visit(const operation::TopKV2 &) override;
- void visit(const operation::TransposeConv &) override;
- void visit(const operation::Transpose &) override;
- void visit(const operation::Unpack &) override;
- void visit(const operation::OneHot &) override;
- void visit(const operation::If &) override;
- void visit(const operation::While &) override;
-};
-
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_OPERATION_DUMPER_H__
diff --git a/runtime/onert/core/src/ir/OperationIndexSet.cc b/runtime/onert/core/src/ir/OperationIndexSet.cc
deleted file mode 100644
index 750ffffa6..000000000
--- a/runtime/onert/core/src/ir/OperationIndexSet.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/OperationIndexSet.h"
-
-#include <algorithm>
-
-namespace onert
-{
-namespace ir
-{
-
-OperationIndexSet::OperationIndexSet(std::initializer_list<OperationIndex> list) : _set(list)
-{
- // DO NOTHING
-}
-
-bool OperationIndexSet::contains(const OperationIndex &index) const
-{
- return std::find(_set.begin(), _set.end(), index) != _set.end();
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Operations.cc b/runtime/onert/core/src/ir/Operations.cc
deleted file mode 100644
index 64d0bd6f0..000000000
--- a/runtime/onert/core/src/ir/Operations.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Operations.h"
-
-#include "OperationCloner.h"
-
-namespace onert
-{
-namespace ir
-{
-
-Operations::Operations(const Operations &obj)
-{
- obj.iterate([&](const OperationIndex &index, const Operation &op) {
- OperationCloner cloner;
- op.accept(cloner);
- _objects.emplace(index, cloner.releaseClone());
- });
- _index_count = obj._index_count;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Padding.cc b/runtime/onert/core/src/ir/Padding.cc
deleted file mode 100644
index d74f80217..000000000
--- a/runtime/onert/core/src/ir/Padding.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Padding.h"
-
-#include "util/Utils.h"
-
-#include <stdexcept>
-#include <cassert>
-
-namespace onert
-{
-namespace ir
-{
-namespace
-{
-
-inline ExplicitPadding validPadding(void)
-{
- //
- // ANEURALNETWORKS_PADDING_VALID
- //
- // VALID padding. No padding.
- //
- // When the input size is not evenly divisible by the filter size,
- // the input at the end that could not fill the whole filter tile
- // will simply be ignored.
- //
- ExplicitPadding padding;
-
- padding.top = 0;
- padding.bottom = 0;
- padding.left = 0;
- padding.right = 0;
-
- return padding;
-}
-
-inline ExplicitPadding samePaddingUsingIFM(const FeatureShape &ifm_shape, const Stride &stride,
- uint32_t kw, uint32_t kh, uint32_t dwf, uint32_t dhf)
-{
- ExplicitPadding padding;
-
- // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
- //
- // SAME padding. Padding on both ends are the "same":
- //
- // padding_to_beginning = total_padding / 2
- // padding_to_end = (total_padding + 1)/2.
- //
- const int32_t effective_filter_h_size = (kh - 1) * dhf + 1;
- const int32_t effective_filter_w_size = (kw - 1) * dwf + 1;
-
- const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
- const int32_t horizontal_expected_output =
- (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
-
- const int32_t vertical_needed_input =
- (vertical_expected_output - 1) * stride.vertical + effective_filter_h_size;
- const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
-
- const int32_t horizontal_needed_input =
- (horizontal_expected_output - 1) * stride.horizontal + effective_filter_w_size;
- const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
-
- padding.top = vertical_total_padding / 2;
- padding.bottom = (vertical_total_padding + 1) / 2;
- padding.left = horizontal_total_padding / 2;
- padding.right = (horizontal_total_padding + 1) / 2;
-
- return padding;
-}
-
-inline ExplicitPadding samePadding(const FeatureShape &ifm_shape, const FeatureShape &ofm_shape,
- const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf,
- uint32_t dhf)
-{
- const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
- const int32_t horizontal_expected_output =
- (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
- assert(vertical_expected_output == ofm_shape.H);
- assert(horizontal_expected_output == ofm_shape.W);
-
- UNUSED_RELEASE(ofm_shape);
- UNUSED_RELEASE(vertical_expected_output);
- UNUSED_RELEASE(horizontal_expected_output);
-
- return samePaddingUsingIFM(ifm_shape, stride, kw, kh, dwf, dhf);
-}
-
-} // namespace
-
-inline std::string to_string(const PaddingType type)
-{
- switch (type)
- {
- case PaddingType::EXPLICIT:
- return "Padding::EXPLICIT";
- case PaddingType::SAME:
- return "Padding::SAME";
- case PaddingType::VALID:
- return "Padding::VALID";
- default:
- throw std::runtime_error{"Fail to convert string: wrong padding type"};
- }
-}
-
-Padding::Padding(void) : type{PaddingType::EXPLICIT}, param{0, 0, 0, 0}
-{
- // DO NOTHING
-}
-
-Padding::Padding(PaddingType paddingType) : type{paddingType}, param{0, 0, 0, 0}
-{
- assert(paddingType != PaddingType::EXPLICIT);
-}
-
-Padding::Padding(uint32_t left, uint32_t right, uint32_t top, uint32_t bottom)
- : type{PaddingType::EXPLICIT}, param{left, right, top, bottom}
-{
- // DO NOTHING
-}
-
-const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape,
- const FeatureShape &ofm_shape, const Stride &stride,
- uint32_t kw, uint32_t kh, uint32_t dwf, uint32_t dhf)
-{
- if (padding.type == PaddingType::EXPLICIT)
- {
- return padding.param;
- }
- else if (padding.type == PaddingType::SAME)
- {
- return samePadding(ifm_shape, ofm_shape, stride, kw, kh, dwf, dhf);
- }
- else if (padding.type == PaddingType::VALID)
- {
- return validPadding();
- }
- else
- {
- throw std::runtime_error{"Cannot handle padding type"};
- }
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/Shape.cc b/runtime/onert/core/src/ir/Shape.cc
deleted file mode 100644
index 322df7b4c..000000000
--- a/runtime/onert/core/src/ir/Shape.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/Shape.h"
-
-#include <cassert>
-#include <functional>
-#include <numeric>
-#include <algorithm>
-
-namespace onert
-{
-namespace ir
-{
-
-int32_t const Shape::UNSPECIFIED_DIM = -1;
-
-// NNFW_MAX_RANK is 6
-int32_t const Shape::MAX_RANK = 6;
-
-FeatureShape Shape::asFeature(Layout layout) const
-{
- assert(rank() == 4);
-
- if (layout == Layout::NHWC)
- {
- // Feature Map in NHWC layout
- // - Dimension(0) -> Batch
- // - Dimension(1) -> Height
- // - Dimension(2) -> Width
- // - Dimension(3) -> Depth
- const auto batch = dim(0);
- const auto depth = dim(3);
- const auto height = dim(1);
- const auto width = dim(2);
-
- return {batch, depth, height, width};
- }
- else if (layout == Layout::NCHW)
- {
- // Feature Map in NHWC layout
- // - Dimension(0) -> Batch
- // - Dimension(1) -> Depth
- // - Dimension(2) -> Height
- // - Dimension(3) -> Width
- const auto batch = dim(0);
- const auto depth = dim(1);
- const auto height = dim(2);
- const auto width = dim(3);
-
- return {batch, depth, height, width};
- }
- else
- {
- throw std::runtime_error("Wrong Layout");
- }
-}
-
-// Extended dimension is filled with 1.
-void Shape::extendRank(int to_rank)
-{
- assert(to_rank - rank() >= 0);
- _dimensions.insert(_dimensions.cbegin(), to_rank - rank(), 1);
-}
-
-uint64_t Shape::num_elements() const
-{
- // if dimension is 0, it means unspecified and cannot calculate the total number of elements
- if (std::any_of(_dimensions.begin(), _dimensions.end(),
- [](const int32_t &v) { return v == UNSPECIFIED_DIM; }))
- throw std::runtime_error("num_elements() cannot calculate when any dimension is unspecified");
-
- return std::accumulate(_dimensions.cbegin(), _dimensions.cend(), UINT64_C(1),
- std::multiplies<uint64_t>());
-}
-
-Shape permuteShape(const Shape &shape, Layout frontend_layout, Layout backend_layout)
-{
- assert(shape.rank() <= Shape::MAX_RANK);
- Shape backend_shape{shape};
- if (shape.rank() >= 4 && frontend_layout == Layout::NHWC && backend_layout == Layout::NCHW)
- {
- // Permutation changing layout beyond 4-D is not supported yet
- assert(shape.rank() <= 4);
- backend_shape.dim(1) = shape.dim(3);
- backend_shape.dim(2) = shape.dim(1);
- backend_shape.dim(3) = shape.dim(2);
- }
- else if (shape.rank() >= 4 && frontend_layout == Layout::NCHW && backend_layout == Layout::NHWC)
- {
- // Permutation changing layout beyond 4-D is not supported yet
- assert(shape.rank() <= 4);
- backend_shape.dim(1) = shape.dim(2);
- backend_shape.dim(2) = shape.dim(3);
- backend_shape.dim(3) = shape.dim(1);
- }
- return backend_shape;
-}
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/TypeInfo.cc b/runtime/onert/core/src/ir/TypeInfo.cc
deleted file mode 100644
index ab8af287e..000000000
--- a/runtime/onert/core/src/ir/TypeInfo.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/TypeInfo.h"
-
-namespace onert
-{
-namespace ir
-{
-
-bool operator==(const TypeInfo &lhs, const TypeInfo &rhs)
-{
- if (lhs.type() != rhs.type())
- {
- return false;
- }
-
- if (lhs.offset() != rhs.offset())
- {
- return false;
- }
-
- if (lhs.scale() != rhs.scale())
- {
- return false;
- }
-
- return true;
-}
-
-bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs) { return !(lhs == rhs); }
-
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ArgMax.cc b/runtime/onert/core/src/ir/operation/ArgMax.cc
deleted file mode 100644
index f3bd8fd73..000000000
--- a/runtime/onert/core/src/ir/operation/ArgMax.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ArgMax.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ArgMax::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ArgMax::ArgMax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BCQFullyConnected.cc b/runtime/onert/core/src/ir/operation/BCQFullyConnected.cc
deleted file mode 100644
index 9dc54e6e9..000000000
--- a/runtime/onert/core/src/ir/operation/BCQFullyConnected.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BCQFullyConnected.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void BCQFullyConnected::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BCQFullyConnected::BCQFullyConnected(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(5u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BCQGather.cc b/runtime/onert/core/src/ir/operation/BCQGather.cc
deleted file mode 100644
index 80efa6460..000000000
--- a/runtime/onert/core/src/ir/operation/BCQGather.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BCQGather.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void BCQGather::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BCQGather::BCQGather(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(4u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BatchMatMul.cc b/runtime/onert/core/src/ir/operation/BatchMatMul.cc
deleted file mode 100644
index b9616158d..000000000
--- a/runtime/onert/core/src/ir/operation/BatchMatMul.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BatchMatMul.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void BatchMatMul::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BatchMatMul::BatchMatMul(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc b/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc
deleted file mode 100644
index 34be79dd2..000000000
--- a/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BatchToSpaceND.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void BatchToSpaceND::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BatchToSpaceND::BatchToSpaceND(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createInRange(2u, 3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BinaryArithmetic.cc b/runtime/onert/core/src/ir/operation/BinaryArithmetic.cc
deleted file mode 100644
index 2b1422c73..000000000
--- a/runtime/onert/core/src/ir/operation/BinaryArithmetic.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BinaryArithmetic.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void BinaryArithmetic::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BinaryArithmetic::BinaryArithmetic(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-std::string BinaryArithmetic::name() const
-{
- using ArithmeticType = onert::ir::operation::BinaryArithmetic::ArithmeticType;
- static const std::unordered_map<ArithmeticType, std::string> name_map{
- {ArithmeticType::ADD, std::string{"Add"}},
- {ArithmeticType::SUB, std::string{"Sub"}},
- {ArithmeticType::MUL, std::string{"Mul"}},
- {ArithmeticType::DIV, std::string{"Div"}}};
- return name_map.at(_param.arithmetic_type);
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/BroadcastTo.cc b/runtime/onert/core/src/ir/operation/BroadcastTo.cc
deleted file mode 100644
index a8f5e59cf..000000000
--- a/runtime/onert/core/src/ir/operation/BroadcastTo.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/BroadcastTo.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void BroadcastTo::accept(OperationVisitor &v) const { v.visit(*this); }
-
-BroadcastTo::BroadcastTo(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Comparison.cc b/runtime/onert/core/src/ir/operation/Comparison.cc
deleted file mode 100644
index 2f6775411..000000000
--- a/runtime/onert/core/src/ir/operation/Comparison.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Comparison.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Comparison::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Comparison::Comparison(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Concat.cc b/runtime/onert/core/src/ir/operation/Concat.cc
deleted file mode 100644
index 608bc29a6..000000000
--- a/runtime/onert/core/src/ir/operation/Concat.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Concat.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Concat::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Concat::Concat(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Conv2D.cc b/runtime/onert/core/src/ir/operation/Conv2D.cc
deleted file mode 100644
index 3a2e1d1fe..000000000
--- a/runtime/onert/core/src/ir/operation/Conv2D.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Conv2D.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Conv2D::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Conv2D::Conv2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ConvertFp16ToFp32.cc b/runtime/onert/core/src/ir/operation/ConvertFp16ToFp32.cc
deleted file mode 100644
index 676e039fa..000000000
--- a/runtime/onert/core/src/ir/operation/ConvertFp16ToFp32.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ConvertFp16ToFp32.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ConvertFp16ToFp32::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ConvertFp16ToFp32::ConvertFp16ToFp32(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ConvertFp32ToFp16.cc b/runtime/onert/core/src/ir/operation/ConvertFp32ToFp16.cc
deleted file mode 100644
index bcfcbfc04..000000000
--- a/runtime/onert/core/src/ir/operation/ConvertFp32ToFp16.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ConvertFp32ToFp16.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ConvertFp32ToFp16::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ConvertFp32ToFp16::ConvertFp32ToFp16(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Custom.cc b/runtime/onert/core/src/ir/operation/Custom.cc
deleted file mode 100644
index 25c53e1ba..000000000
--- a/runtime/onert/core/src/ir/operation/Custom.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Custom.h"
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Custom::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Custom::Custom(OperandConstraint input_constr, const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, std::string id, const Userdata &userdata)
- : Operation{input_constr, inputs, outputs}, _id(std::move(id)), _userdata(userdata)
-{
-}
-
-const std::string &Custom::id() const { return _id; }
-
-const Custom::Userdata &Custom::userdata() const { return _userdata; }
-
-std::string Custom::name() const { return id(); }
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/DepthToSpace.cc b/runtime/onert/core/src/ir/operation/DepthToSpace.cc
deleted file mode 100644
index f2d6c7c1b..000000000
--- a/runtime/onert/core/src/ir/operation/DepthToSpace.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/DepthToSpace.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void DepthToSpace::accept(OperationVisitor &v) const { v.visit(*this); }
-
-DepthToSpace::DepthToSpace(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/DepthwiseConv2D.cc b/runtime/onert/core/src/ir/operation/DepthwiseConv2D.cc
deleted file mode 100644
index d587a5591..000000000
--- a/runtime/onert/core/src/ir/operation/DepthwiseConv2D.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/DepthwiseConv2D.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void DepthwiseConv2D::accept(OperationVisitor &v) const { v.visit(*this); }
-
-DepthwiseConv2D::DepthwiseConv2D(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Einsum.cc b/runtime/onert/core/src/ir/operation/Einsum.cc
deleted file mode 100644
index 3c1473aaa..000000000
--- a/runtime/onert/core/src/ir/operation/Einsum.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Einsum.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Einsum::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Einsum::Einsum(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ElementwiseActivation.cc b/runtime/onert/core/src/ir/operation/ElementwiseActivation.cc
deleted file mode 100644
index f6718b656..000000000
--- a/runtime/onert/core/src/ir/operation/ElementwiseActivation.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ElementwiseActivation.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ElementwiseActivation::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ElementwiseActivation::ElementwiseActivation(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
- if (param.op_type == Type::LOGISTIC)
- {
- assert(param.alpha == 0.0f && param.beta == 0.0f && "Logistic will be supported only as "
- "sigmoid function(L=1, k=1, x0=0). So, do "
- "not use alpha and beta");
- }
- else if (param.op_type == Type::RELU)
- {
- assert(param.alpha >= param.beta && "ReLU's alpha must be equal or greater than beta");
- }
- else if (param.op_type == Type::TANH)
- {
- assert(param.alpha == 1.0f && param.beta == 1.0f && "f(x) = alpha * tanh(beta * x), Tanh is "
- "supported only the values of alpha and "
- "beta are 1.f");
- }
-}
-
-std::string ElementwiseActivation::name() const
-{
- using ElementwiseActivationType = onert::ir::operation::ElementwiseActivation::Type;
- static const std::unordered_map<Type, std::string> name_map{
- {ElementwiseActivationType::ELU, "ELU"},
- {ElementwiseActivationType::LOGISTIC, "Logistic"},
- {ElementwiseActivationType::RELU, "ReLU"},
- {ElementwiseActivationType::TANH, "Tanh"},
- {ElementwiseActivationType::LEAKY_RELU, "LeakyRelu"}};
- return name_map.at(_param.op_type);
-}
-
-float ElementwiseActivation::infinity = std::numeric_limits<float>::infinity();
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ElementwiseBinary.cc b/runtime/onert/core/src/ir/operation/ElementwiseBinary.cc
deleted file mode 100644
index 3287fc0a3..000000000
--- a/runtime/onert/core/src/ir/operation/ElementwiseBinary.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ElementwiseBinary.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ElementwiseBinary::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ElementwiseBinary::ElementwiseBinary(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-std::string ElementwiseBinary::name() const
-{
- using ElementwiseBinaryType = onert::ir::operation::ElementwiseBinary::ElementwiseBinaryType;
- static const std::unordered_map<ElementwiseBinaryType, std::string> name_map{
- {ElementwiseBinaryType::LOGICAL_AND, std::string{"LogicalAnd"}},
- {ElementwiseBinaryType::LOGICAL_OR, std::string{"LogicalOr"}},
- {ElementwiseBinaryType::MAX, std::string{"Max"}},
- {ElementwiseBinaryType::MIN, std::string{"Min"}}};
- return name_map.at(_param.op_type);
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ElementwiseUnary.cc b/runtime/onert/core/src/ir/operation/ElementwiseUnary.cc
deleted file mode 100644
index 6a0be7eb8..000000000
--- a/runtime/onert/core/src/ir/operation/ElementwiseUnary.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ElementwiseUnary.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ElementwiseUnary::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ElementwiseUnary::ElementwiseUnary(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs,
- OperandConstraint::createExact(1u)},
- _param{param}
-{
-}
-
-std::string ElementwiseUnary::name() const
-{
- using ElementwiseUnaryType = onert::ir::operation::ElementwiseUnary::Type;
- static const std::unordered_map<ElementwiseUnaryType, std::string> name_map{
- {ElementwiseUnaryType::ABS, std::string{"Abs"}},
- {ElementwiseUnaryType::CAST, std::string{"Cast"}},
- {ElementwiseUnaryType::COS, std::string{"Cos"}},
- {ElementwiseUnaryType::DEQUANTIZE, std::string{"Dequantize"}},
- {ElementwiseUnaryType::ERF, std::string{"Erf"}},
- {ElementwiseUnaryType::EXP, std::string{"Exp"}},
- {ElementwiseUnaryType::FLOOR, std::string{"Floor"}},
- {ElementwiseUnaryType::LOG, std::string{"Log"}},
- {ElementwiseUnaryType::LOGICAL_NOT, std::string{"LogicalNot"}},
- {ElementwiseUnaryType::NEG, std::string{"Neg"}},
- {ElementwiseUnaryType::QUANTIZE, std::string{"Quantize"}},
- {ElementwiseUnaryType::ROUND, std::string{"Round"}},
- {ElementwiseUnaryType::RSQRT, std::string{"RSqrt"}},
- {ElementwiseUnaryType::SIN, std::string{"Sin"}},
- {ElementwiseUnaryType::SQRT, std::string{"Sqrt"}},
- {ElementwiseUnaryType::SQURE, std::string{"Squre"}},
- {ElementwiseUnaryType::ZEROS_LIKE, std::string{"ZerosLike"}}};
- return name_map.at(_param.op_type);
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/EmbeddingLookup.cc b/runtime/onert/core/src/ir/operation/EmbeddingLookup.cc
deleted file mode 100644
index b300b004e..000000000
--- a/runtime/onert/core/src/ir/operation/EmbeddingLookup.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/EmbeddingLookup.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void EmbeddingLookup::accept(OperationVisitor &v) const { v.visit(*this); }
-
-EmbeddingLookup::EmbeddingLookup(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ExpandDims.cc b/runtime/onert/core/src/ir/operation/ExpandDims.cc
deleted file mode 100644
index 3f555bd23..000000000
--- a/runtime/onert/core/src/ir/operation/ExpandDims.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ExpandDims.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ExpandDims::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ExpandDims::ExpandDims(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Fill.cc b/runtime/onert/core/src/ir/operation/Fill.cc
deleted file mode 100644
index b8b97d1c0..000000000
--- a/runtime/onert/core/src/ir/operation/Fill.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Fill.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Fill::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Fill::Fill(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/FullyConnected.cc b/runtime/onert/core/src/ir/operation/FullyConnected.cc
deleted file mode 100644
index 9837a3137..000000000
--- a/runtime/onert/core/src/ir/operation/FullyConnected.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/FullyConnected.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void FullyConnected::accept(OperationVisitor &v) const { v.visit(*this); }
-
-FullyConnected::FullyConnected(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createInRange(2u, 3u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/FusedBatchNorm.cc b/runtime/onert/core/src/ir/operation/FusedBatchNorm.cc
deleted file mode 100644
index 7b9301ea6..000000000
--- a/runtime/onert/core/src/ir/operation/FusedBatchNorm.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/FusedBatchNorm.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void FusedBatchNorm::accept(OperationVisitor &v) const { v.visit(*this); }
-
-FusedBatchNorm::FusedBatchNorm(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createAtLeast(5u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Gather.cc b/runtime/onert/core/src/ir/operation/Gather.cc
deleted file mode 100644
index 11d46e75b..000000000
--- a/runtime/onert/core/src/ir/operation/Gather.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Gather.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Gather::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Gather::Gather(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/HashtableLookup.cc b/runtime/onert/core/src/ir/operation/HashtableLookup.cc
deleted file mode 100644
index e9a7a82ff..000000000
--- a/runtime/onert/core/src/ir/operation/HashtableLookup.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/HashtableLookup.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void HashtableLookup::accept(OperationVisitor &v) const { v.visit(*this); }
-
-HashtableLookup::HashtableLookup(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/If.cc b/runtime/onert/core/src/ir/operation/If.cc
deleted file mode 100644
index 599751dfd..000000000
--- a/runtime/onert/core/src/ir/operation/If.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/If.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void If::accept(OperationVisitor &v) const { v.visit(*this); }
-If::If(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createAny(), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/InstanceNorm.cc b/runtime/onert/core/src/ir/operation/InstanceNorm.cc
deleted file mode 100644
index 2334560ef..000000000
--- a/runtime/onert/core/src/ir/operation/InstanceNorm.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/InstanceNorm.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void InstanceNorm::accept(OperationVisitor &v) const { v.visit(*this); }
-
-InstanceNorm::InstanceNorm(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/L2Normalization.cc b/runtime/onert/core/src/ir/operation/L2Normalization.cc
deleted file mode 100644
index 9a7d3eb61..000000000
--- a/runtime/onert/core/src/ir/operation/L2Normalization.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/L2Normalization.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void L2Normalization::accept(OperationVisitor &v) const { v.visit(*this); }
-
-L2Normalization::L2Normalization(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/LSTM.cc b/runtime/onert/core/src/ir/operation/LSTM.cc
deleted file mode 100644
index 5cd7c793a..000000000
--- a/runtime/onert/core/src/ir/operation/LSTM.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/LSTM.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void LSTM::accept(OperationVisitor &v) const { v.visit(*this); }
-
-LSTM::LSTM(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createInRange(20u, 24u), inputs, outputs}, _param{param}
-{
-}
-
-std::string LSTM::name() const
-{
- if (getOutputs().at(Output::SCRATCH_BUFFER).undefined())
- return std::string{"UnidirectionalSequenceLSTM"};
- else
- return Operation::name();
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/LocalResponseNormalization.cc b/runtime/onert/core/src/ir/operation/LocalResponseNormalization.cc
deleted file mode 100644
index 1ae97c142..000000000
--- a/runtime/onert/core/src/ir/operation/LocalResponseNormalization.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/LocalResponseNormalization.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void LocalResponseNormalization::accept(OperationVisitor &v) const { v.visit(*this); }
-
-LocalResponseNormalization::LocalResponseNormalization(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/LogSoftmax.cc b/runtime/onert/core/src/ir/operation/LogSoftmax.cc
deleted file mode 100644
index 73c6580ec..000000000
--- a/runtime/onert/core/src/ir/operation/LogSoftmax.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/LogSoftmax.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void LogSoftmax::accept(OperationVisitor &v) const { v.visit(*this); }
-
-LogSoftmax::LogSoftmax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/LowerInfo.cc b/runtime/onert/core/src/ir/operation/LowerInfo.cc
deleted file mode 100644
index 249918bd6..000000000
--- a/runtime/onert/core/src/ir/operation/LowerInfo.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/LowerInfo.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-LowerInfo::LowerInfo(const backend::Backend *backend, Layout layout)
- : _permute_factor{backend, layout}
-{
- // DO NOTHING
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/MatrixBandPart.cc b/runtime/onert/core/src/ir/operation/MatrixBandPart.cc
deleted file mode 100644
index bac31f13e..000000000
--- a/runtime/onert/core/src/ir/operation/MatrixBandPart.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/MatrixBandPart.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void MatrixBandPart::accept(OperationVisitor &v) const { v.visit(*this); }
-
-MatrixBandPart::MatrixBandPart(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/OneHot.cc b/runtime/onert/core/src/ir/operation/OneHot.cc
deleted file mode 100644
index 22935e7d6..000000000
--- a/runtime/onert/core/src/ir/operation/OneHot.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/OneHot.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void OneHot::accept(OperationVisitor &v) const { v.visit(*this); }
-
-OneHot::OneHot(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(4u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/PReLU.cc b/runtime/onert/core/src/ir/operation/PReLU.cc
deleted file mode 100644
index a2e37e0ad..000000000
--- a/runtime/onert/core/src/ir/operation/PReLU.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/PReLU.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void PReLU::accept(OperationVisitor &v) const { v.visit(*this); }
-
-PReLU::PReLU(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Pack.cc b/runtime/onert/core/src/ir/operation/Pack.cc
deleted file mode 100644
index 784d4162a..000000000
--- a/runtime/onert/core/src/ir/operation/Pack.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/Pack.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void Pack::accept(OperationVisitor &v) const { v.visit(*this); }
-Pack::Pack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Pad.cc b/runtime/onert/core/src/ir/operation/Pad.cc
deleted file mode 100644
index 0c56e92e3..000000000
--- a/runtime/onert/core/src/ir/operation/Pad.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Pad.h"
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Pad::accept(OperationVisitor &v) const { v.visit(*this); }
-
-// PAD: 2 inputs
-// PADV2: 3 inputs
-Pad::Pad(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createInRange(2u, 3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Permute.cc b/runtime/onert/core/src/ir/operation/Permute.cc
deleted file mode 100644
index eefb6c542..000000000
--- a/runtime/onert/core/src/ir/operation/Permute.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Permute.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Permute::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Permute::Permute(const OperandIndex &input, const OperandIndex &output, Type type)
- : Operation{OperandConstraint::createExact(1u)}, _type{type}
-{
- setInputs({input});
- setOutputs({output});
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Pool2D.cc b/runtime/onert/core/src/ir/operation/Pool2D.cc
deleted file mode 100644
index 761d14c3d..000000000
--- a/runtime/onert/core/src/ir/operation/Pool2D.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Pool2D.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Pool2D::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Pool2D::Pool2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-std::string Pool2D::name() const
-{
- using PoolType = onert::ir::operation::Pool2D::PoolType;
- static const std::unordered_map<PoolType, std::string> name_map{
- {PoolType::AVG, "Avg" + std::string{toString(opcode())}},
- {PoolType::L2, "L2" + std::string{toString(opcode())}},
- {PoolType::MAX, "Max" + std::string{toString(opcode())}}};
- return name_map.at(_param.op_type);
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Pow.cc b/runtime/onert/core/src/ir/operation/Pow.cc
deleted file mode 100644
index 940b1391a..000000000
--- a/runtime/onert/core/src/ir/operation/Pow.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Pow.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Pow::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Pow::Pow(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/RNN.cc b/runtime/onert/core/src/ir/operation/RNN.cc
deleted file mode 100644
index 298c5e745..000000000
--- a/runtime/onert/core/src/ir/operation/RNN.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/RNN.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void RNN::accept(OperationVisitor &v) const { v.visit(*this); }
-
-RNN::RNN(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(5u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Range.cc b/runtime/onert/core/src/ir/operation/Range.cc
deleted file mode 100644
index 96ab04c1b..000000000
--- a/runtime/onert/core/src/ir/operation/Range.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Range.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Range::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Range::Range(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Rank.cc b/runtime/onert/core/src/ir/operation/Rank.cc
deleted file mode 100644
index c357e9018..000000000
--- a/runtime/onert/core/src/ir/operation/Rank.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Rank.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Rank::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Rank::Rank(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Reduce.cc b/runtime/onert/core/src/ir/operation/Reduce.cc
deleted file mode 100644
index d6a1d953c..000000000
--- a/runtime/onert/core/src/ir/operation/Reduce.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Reduce.h"
-
-#include <cassert>
-#include <unordered_map>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Reduce::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Reduce::Reduce(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-
-std::string Reduce::name() const
-{
- using ReduceType = onert::ir::operation::Reduce::ReduceType;
- static const std::unordered_map<ReduceType, std::string> name_map{
- {ReduceType::ALL, std::string{toString(opcode())} + "All"},
- {ReduceType::ANY, std::string{toString(opcode())} + "Any"},
- {ReduceType::MAX, std::string{toString(opcode())} + "Max"},
- {ReduceType::MEAN, std::string{toString(opcode())} + "Mean"},
- {ReduceType::MIN, std::string{toString(opcode())} + "Min"},
- {ReduceType::PROD, std::string{toString(opcode())} + "Prod"},
- {ReduceType::SUM, std::string{toString(opcode())} + "SUM"}};
- return name_map.at(_param.reduce_type);
- // return std::string(toString(opcode())) + reduce_type_str_map.at(_param.reduce_type);
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Reshape.cc b/runtime/onert/core/src/ir/operation/Reshape.cc
deleted file mode 100644
index 92aa89ac6..000000000
--- a/runtime/onert/core/src/ir/operation/Reshape.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Reshape.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Reshape::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Reshape::Reshape(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param(param)
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ResizeBilinear.cc b/runtime/onert/core/src/ir/operation/ResizeBilinear.cc
deleted file mode 100644
index 71925bb44..000000000
--- a/runtime/onert/core/src/ir/operation/ResizeBilinear.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ResizeBilinear.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ResizeBilinear::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ResizeBilinear::ResizeBilinear(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createInRange(1u, 2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/ResizeNearestNeighbor.cc b/runtime/onert/core/src/ir/operation/ResizeNearestNeighbor.cc
deleted file mode 100644
index 98d0b5f26..000000000
--- a/runtime/onert/core/src/ir/operation/ResizeNearestNeighbor.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/ResizeNearestNeighbor.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void ResizeNearestNeighbor::accept(OperationVisitor &v) const { v.visit(*this); }
-
-ResizeNearestNeighbor::ResizeNearestNeighbor(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createInRange(1u, 2u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Reverse.cc b/runtime/onert/core/src/ir/operation/Reverse.cc
deleted file mode 100644
index 4b3c1e1af..000000000
--- a/runtime/onert/core/src/ir/operation/Reverse.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Reverse.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Reverse::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Reverse::Reverse(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Select.cc b/runtime/onert/core/src/ir/operation/Select.cc
deleted file mode 100644
index 1f22b5234..000000000
--- a/runtime/onert/core/src/ir/operation/Select.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Select.h"
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Select::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Select::Select(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Shape.cc b/runtime/onert/core/src/ir/operation/Shape.cc
deleted file mode 100644
index 2a63d6dcf..000000000
--- a/runtime/onert/core/src/ir/operation/Shape.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Shape.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Shape::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Shape::Shape(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Slice.cc b/runtime/onert/core/src/ir/operation/Slice.cc
deleted file mode 100644
index 888b563fb..000000000
--- a/runtime/onert/core/src/ir/operation/Slice.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Slice.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Slice::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Slice::Slice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Softmax.cc b/runtime/onert/core/src/ir/operation/Softmax.cc
deleted file mode 100644
index 3f1aa0af1..000000000
--- a/runtime/onert/core/src/ir/operation/Softmax.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Softmax.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Softmax::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Softmax::Softmax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/SpaceToBatchND.cc b/runtime/onert/core/src/ir/operation/SpaceToBatchND.cc
deleted file mode 100644
index 53fab4fa9..000000000
--- a/runtime/onert/core/src/ir/operation/SpaceToBatchND.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/SpaceToBatchND.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void SpaceToBatchND::accept(OperationVisitor &v) const { v.visit(*this); }
-
-SpaceToBatchND::SpaceToBatchND(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/SpaceToDepth.cc b/runtime/onert/core/src/ir/operation/SpaceToDepth.cc
deleted file mode 100644
index d8a45aee5..000000000
--- a/runtime/onert/core/src/ir/operation/SpaceToDepth.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/SpaceToDepth.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void SpaceToDepth::accept(OperationVisitor &v) const { v.visit(*this); }
-
-SpaceToDepth::SpaceToDepth(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Split.cc b/runtime/onert/core/src/ir/operation/Split.cc
deleted file mode 100644
index b538e9206..000000000
--- a/runtime/onert/core/src/ir/operation/Split.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/Split.h"
-#include <cassert>
-#include "ir/OperationVisitor.h"
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void Split::accept(OperationVisitor &v) const { v.visit(*this); }
-Split::Split(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/SplitV.cc b/runtime/onert/core/src/ir/operation/SplitV.cc
deleted file mode 100644
index e638c9ac9..000000000
--- a/runtime/onert/core/src/ir/operation/SplitV.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/SplitV.h"
-#include <cassert>
-#include "ir/OperationVisitor.h"
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void SplitV::accept(OperationVisitor &v) const { v.visit(*this); }
-SplitV::SplitV(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/SquaredDifference.cc b/runtime/onert/core/src/ir/operation/SquaredDifference.cc
deleted file mode 100644
index 49e58aaf2..000000000
--- a/runtime/onert/core/src/ir/operation/SquaredDifference.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/SquaredDifference.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void SquaredDifference::accept(OperationVisitor &v) const { v.visit(*this); }
-
-SquaredDifference::SquaredDifference(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Squeeze.cc b/runtime/onert/core/src/ir/operation/Squeeze.cc
deleted file mode 100644
index 8cf928fb4..000000000
--- a/runtime/onert/core/src/ir/operation/Squeeze.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Squeeze.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Squeeze::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Squeeze::Squeeze(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param(param)
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc b/runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc
deleted file mode 100644
index cbb0ff251..000000000
--- a/runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/StatelessRandomUniform.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void StatelessRandomUniform::accept(OperationVisitor &v) const { v.visit(*this); }
-
-StatelessRandomUniform::StatelessRandomUniform(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/StridedSlice.cc b/runtime/onert/core/src/ir/operation/StridedSlice.cc
deleted file mode 100644
index 2a7905995..000000000
--- a/runtime/onert/core/src/ir/operation/StridedSlice.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/StridedSlice.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void StridedSlice::accept(OperationVisitor &v) const { v.visit(*this); }
-
-StridedSlice::StridedSlice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(4u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Tile.cc b/runtime/onert/core/src/ir/operation/Tile.cc
deleted file mode 100644
index 5ba3df2ad..000000000
--- a/runtime/onert/core/src/ir/operation/Tile.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Tile.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Tile::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Tile::Tile(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/TopKV2.cc b/runtime/onert/core/src/ir/operation/TopKV2.cc
deleted file mode 100644
index a5e6c6a85..000000000
--- a/runtime/onert/core/src/ir/operation/TopKV2.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/TopKV2.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void TopKV2::accept(OperationVisitor &v) const { v.visit(*this); }
-
-TopKV2::TopKV2(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Transpose.cc b/runtime/onert/core/src/ir/operation/Transpose.cc
deleted file mode 100644
index 997f98ab0..000000000
--- a/runtime/onert/core/src/ir/operation/Transpose.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/Transpose.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void Transpose::accept(OperationVisitor &v) const { v.visit(*this); }
-
-Transpose::Transpose(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
- : Operation{OperandConstraint::createExact(2u), inputs, outputs}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/TransposeConv.cc b/runtime/onert/core/src/ir/operation/TransposeConv.cc
deleted file mode 100644
index 7f29ca44e..000000000
--- a/runtime/onert/core/src/ir/operation/TransposeConv.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir/operation/TransposeConv.h"
-
-#include <cassert>
-
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-
-void TransposeConv::accept(OperationVisitor &v) const { v.visit(*this); }
-
-TransposeConv::TransposeConv(const OperandIndexSequence &inputs,
- const OperandIndexSequence &outputs, const Param &param)
- : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
-{
-}
-
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/Unpack.cc b/runtime/onert/core/src/ir/operation/Unpack.cc
deleted file mode 100644
index 67aa54ab5..000000000
--- a/runtime/onert/core/src/ir/operation/Unpack.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/Unpack.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void Unpack::accept(OperationVisitor &v) const { v.visit(*this); }
-Unpack::Unpack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/operation/While.cc b/runtime/onert/core/src/ir/operation/While.cc
deleted file mode 100644
index 2505c60e3..000000000
--- a/runtime/onert/core/src/ir/operation/While.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "ir/operation/While.h"
-#include "ir/OperationVisitor.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace operation
-{
-void While::accept(OperationVisitor &v) const { v.visit(*this); }
-While::While(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
- const Param &param)
- : Operation{OperandConstraint::createAny(), inputs, outputs}, _param{param}
-{
-}
-} // namespace operation
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/verifier/Verifier.cc b/runtime/onert/core/src/ir/verifier/Verifier.cc
deleted file mode 100644
index 489845971..000000000
--- a/runtime/onert/core/src/ir/verifier/Verifier.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Verifier.h"
-
-#include "ir/Graph.h"
-#include "ir/OperationIndexMap.h"
-
-#include "util/logging.h"
-
-namespace onert
-{
-namespace ir
-{
-namespace verifier
-{
-
-//
-// DAGChecker
-//
-
-bool DAGChecker::verify(const Graph &graph) const noexcept
-{
- auto &operations = graph.operations();
- bool cyclic = false;
-
- OperationIndexMap<bool> visited;
- operations.iterate(
- [&](const OperationIndex &index, const Operation &) { visited[index] = false; });
- OperationIndexMap<bool> on_stack = visited; // Copy from visited
-
- std::function<void(const OperationIndex &index, const Operation &)> dfs_recursive =
- [&](const OperationIndex &index, const Operation &node) -> void {
- if (on_stack[index])
- cyclic = true;
- if (visited[index])
- return;
- visited[index] = true;
- on_stack[index] = true;
-
- for (auto output : node.getOutputs() | Remove::DUPLICATED | Remove::UNDEFINED)
- {
- const auto &operand = graph.operands().at(output);
- for (const auto &use : operand.getUses())
- {
- dfs_recursive(use, graph.operations().at(use));
- }
- }
-
- on_stack[index] = false;
- };
-
- operations.iterate(dfs_recursive);
-
- return !cyclic;
-}
-
-//
-// EdgeConsistencyVerifier
-//
-
-bool EdgeConsistencyChecker::verify(const Graph &graph) const noexcept
-{
- auto &operations = graph.operations();
- uint32_t errors = 0;
- operations.iterate([&](const OperationIndex &index, const Operation &node) {
- for (auto operand_index : node.getInputs() | ir::Remove::UNDEFINED)
- {
- try
- {
- auto &operand = graph.operands().at(operand_index);
- bool operand_has_use = operand.getUses().contains(index);
- if (!operand_has_use)
- {
- VERBOSE(EdgeConsistencyChecker) << "[ERROR] EDGE MISMATCH : Missing USE edge - Operand "
- << operand_index << " to Operation " << index
- << std::endl;
- errors += 1;
- }
- }
- catch (const std::out_of_range &e)
- {
- VERBOSE(EdgeConsistencyChecker)
- << "[ERROR] OPEARAND NOT FOUND : Operation " << index << " has Operand "
- << operand_index << ", but the operand object is not present in the graph" << std::endl;
- errors += 1;
- }
- }
- for (auto operand_index : node.getOutputs() | ir::Remove::UNDEFINED)
- {
- try
- {
- auto &operand = graph.operands().at(operand_index);
- if (operand.getDef() != index)
- {
- VERBOSE(EdgeConsistencyChecker) << "[ERROR] EDGE MISMATCH : Missing DEF edge - Operand"
- << operand_index << " to Operation " << index
- << std::endl;
- errors += 1;
- }
- }
- catch (const std::out_of_range &e)
- {
- VERBOSE(EdgeConsistencyChecker)
- << "[ERROR] OPEARAND NOT FOUND : Operation " << index << " has Operand "
- << operand_index << ", but the operand object is not present in the graph" << std::endl;
- errors += 1;
- }
- }
- });
-
- VERBOSE(EdgeConsistencyChecker) << "Total Number of errors : " << errors << std::endl;
-
- return errors == 0;
-}
-
-} // namespace verifier
-} // namespace ir
-} // namespace onert
diff --git a/runtime/onert/core/src/ir/verifier/Verifier.h b/runtime/onert/core/src/ir/verifier/Verifier.h
deleted file mode 100644
index 0c7b57b04..000000000
--- a/runtime/onert/core/src/ir/verifier/Verifier.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_GRAPH_VERIFIER_VERIFIER_H__
-#define __ONERT_GRAPH_VERIFIER_VERIFIER_H__
-
-namespace onert
-{
-namespace ir
-{
-class Graph;
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace verifier
-{
-
-struct IVerifier
-{
- virtual ~IVerifier() = default;
- virtual bool verify(const Graph &graph) const noexcept = 0;
-};
-
-} // namespace verifier
-} // namespace ir
-} // namespace onert
-
-namespace onert
-{
-namespace ir
-{
-namespace verifier
-{
-
-class DAGChecker : public IVerifier
-{
-public:
- bool verify(const Graph &graph) const noexcept override;
-};
-
-class EdgeConsistencyChecker : public IVerifier
-{
-public:
- bool verify(const Graph &graph) const noexcept override;
-};
-
-} // namespace verifier
-} // namespace ir
-} // namespace onert
-
-#endif // __ONERT_GRAPH_VERIFIER_VERIFIER_H__
diff --git a/runtime/onert/core/src/library_info.cc b/runtime/onert/core/src/library_info.cc
deleted file mode 100644
index 6d7579cca..000000000
--- a/runtime/onert/core/src/library_info.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-volatile const char info[] = "library information : runtime=onert";
diff --git a/runtime/onert/core/src/util/ConfigSource.cc b/runtime/onert/core/src/util/ConfigSource.cc
deleted file mode 100644
index 45cce662e..000000000
--- a/runtime/onert/core/src/util/ConfigSource.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/ConfigSource.h"
-#include "util/GeneralConfigSource.h"
-#include "util/EnvConfigSource.h"
-
-#include <array>
-#include <algorithm>
-#include <cassert>
-
-#include <memory>
-
-namespace onert
-{
-namespace util
-{
-
-static std::unique_ptr<IConfigSource> _source;
-
-void config_source(std::unique_ptr<IConfigSource> &&source) { _source = std::move(source); }
-
-static IConfigSource *config_source()
-{
- if (!_source)
- {
-#ifdef ENVVAR_FOR_DEFAULT_CONFIG
- // Default ConfigSource is EnvConfigSource
- _source = std::make_unique<EnvConfigSource>();
-#else
- _source = std::make_unique<GeneralConfigSource>();
-#endif // ENVVAR_FOR_DEFAULT_CONFIG
- }
- return _source.get();
-}
-
-static std::string getConfigOrDefault(const std::string &key)
-{
- static std::unordered_map<std::string, std::string> defaults;
- if (defaults.empty())
- {
-#define CONFIG(Name, Type, Default) \
- { \
- auto name = std::string{#Name}; \
- defaults.emplace(name, std::string{Default}); \
- }
-
-#include "util/Config.lst"
-
-#undef CONFIG
- }
-
- // Treat empty string and absence of the value to be the same
- auto ret = config_source()->get(key);
- if (ret.empty())
- {
- auto itr = defaults.find(key);
- if (itr != defaults.end())
- {
- // Return the default value if exists
- ret = itr->second;
- }
- }
-
- return ret;
-}
-
-bool toBool(const std::string &val)
-{
- static const std::array<std::string, 5> false_list{"0", "OFF", "FALSE", "N", "NO"};
- auto false_found = std::find(false_list.begin(), false_list.end(), val);
- return false_found == false_list.end();
-}
-
-int toInt(const std::string &val) { return std::stoi(val); }
-
-bool getConfigBool(const std::string &key)
-{
- auto raw = getConfigOrDefault(key);
- return toBool(raw);
-}
-
-int getConfigInt(const std::string &key)
-{
- auto raw = getConfigOrDefault(key);
- return toInt(raw);
-}
-
-std::string getConfigString(const std::string &key) { return getConfigOrDefault(key); }
-
-} // namespace util
-} // namespace onert
-
-namespace onert
-{
-namespace util
-{
-namespace config
-{
-
-#define CONFIG(Name, Type, Default) const char *Name = #Name;
-
-#include "util/Config.lst"
-
-#undef CONFIG
-
-} // namespace config
-} // namespace util
-} // namespace onert
diff --git a/runtime/onert/core/src/util/EnvConfigSource.cc b/runtime/onert/core/src/util/EnvConfigSource.cc
deleted file mode 100644
index 0d25b7353..000000000
--- a/runtime/onert/core/src/util/EnvConfigSource.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/EnvConfigSource.h"
-
-#include <cstdlib>
-
-namespace onert
-{
-namespace util
-{
-
-std::string EnvConfigSource::get(const std::string &key) const
-{
- const char *value = std::getenv(key.c_str());
- if (value != nullptr)
- {
- return value;
- }
- else
- {
- return GeneralConfigSource::get(key);
- }
-}
-
-} // namespace util
-} // namespace onert
diff --git a/runtime/onert/core/src/util/EventCollector.cc b/runtime/onert/core/src/util/EventCollector.cc
deleted file mode 100644
index de37276bf..000000000
--- a/runtime/onert/core/src/util/EventCollector.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/EventCollector.h"
-
-// C++ standard libraries
-#include <chrono>
-
-// POSIX standard libraries
-#include <sys/time.h>
-#include <sys/resource.h>
-
-namespace
-{
-
-std::string timestamp(void)
-{
- auto now = std::chrono::steady_clock::now();
- return std::to_string(
- std::chrono::duration_cast<std::chrono::microseconds>(now.time_since_epoch()).count());
-}
-
-class DurationEventBuilder
-{
-public:
- DurationEventBuilder(const std::string &ts) : _ts{ts} {}
-
- DurationEvent build(const std::string &tid, const std::string &name, const std::string &ph) const
- {
- DurationEvent evt;
-
- evt.name = name;
- evt.tid = tid;
- evt.ph = ph;
- evt.ts = _ts;
-
- return evt;
- }
-
-private:
- std::string _ts;
-};
-
-#ifdef DEBUG
-inline void emit_rusage(EventRecorder *rec, const std::string &ts)
-{
- struct rusage ru;
-
- getrusage(RUSAGE_SELF, &ru);
- {
- CounterEvent evt;
-
- evt.name = "maxrss";
- evt.ph = "C";
- evt.ts = ts;
- evt.values["value"] = std::to_string(ru.ru_maxrss);
-
- rec->emit(evt);
- }
-
- {
- CounterEvent evt;
-
- evt.name = "minflt";
- evt.ph = "C";
- evt.ts = ts;
- evt.values["value"] = std::to_string(ru.ru_minflt);
-
- rec->emit(evt);
- }
-}
-#endif
-
-} // namespace
-
-void EventCollector::onEvent(const Event &event)
-{
- auto ts = timestamp();
-
- switch (event.edge)
- {
- case Edge::BEGIN:
- _rec->emit(DurationEventBuilder(ts).build(event.backend, event.label, "B"));
- break;
-
- case Edge::END:
- _rec->emit(DurationEventBuilder(ts).build(event.backend, event.label, "E"));
- break;
- }
-
-// TODO: Add resurece measurement(e.g. RSS)
-// when ready with low overhead in release build
-#ifdef DEBUG
- emit_rusage(_rec, ts);
-#endif
-}
diff --git a/runtime/onert/core/src/util/EventCollector.h b/runtime/onert/core/src/util/EventCollector.h
deleted file mode 100644
index 8154be592..000000000
--- a/runtime/onert/core/src/util/EventCollector.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_EVENT_COLLECTOR_H__
-#define __ONERT_UTIL_EVENT_COLLECTOR_H__
-
-#include "util/EventRecorder.h"
-
-class EventCollector
-{
-public:
- enum class Edge
- {
- BEGIN,
- END
- };
-
- struct Event
- {
- Edge edge;
- std::string backend;
- std::string label;
- };
-
-public:
- EventCollector(EventRecorder *rec) : _rec{rec}
- {
- // DO NOTHING
- }
-
-public:
- void onEvent(const Event &event);
-
-protected:
- EventRecorder *_rec;
-};
-
-#endif // __ONERT_UTIL_EVENT_COLLECTOR_H__
diff --git a/runtime/onert/core/src/util/EventCollectorGlobal.cc b/runtime/onert/core/src/util/EventCollectorGlobal.cc
deleted file mode 100644
index 6c03a5b9a..000000000
--- a/runtime/onert/core/src/util/EventCollectorGlobal.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/EventCollectorGlobal.h"
-
-#include <cassert>
-#include <fstream>
-#include <iostream>
-
-#include "util/ConfigSource.h"
-#include "util/EventWriter.h"
-
-namespace onert
-{
-namespace util
-{
-
-EventCollectorGlobal::EventCollectorGlobal() : _recorder{}, _collector{&_recorder}
-{
- // DO NOTHING
-}
-
-EventCollectorGlobal::~EventCollectorGlobal()
-{
- if (!_recorder.empty())
- {
- try
- {
- // TODO Need better way for saved file path than the hardcoded path
- EventWriter{_recorder}.writeToFile("trace.global.json",
- EventWriter::WriteFormat::CHROME_TRACING);
- }
- catch (const std::exception &e)
- {
- std::cerr << "E: Fail to record event in EventCollectorGlobal: " << e.what() << std::endl;
- }
- }
-}
-
-EventCollectorGlobal &EventCollectorGlobal::get()
-{
- static EventCollectorGlobal instance;
- return instance;
-}
-
-EventDurationBlock::EventDurationBlock(const std::string &tag) : _tag{tag}
-{
- auto &glob = EventCollectorGlobal::get();
- glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, "0", _tag});
-}
-EventDurationBlock::~EventDurationBlock()
-{
- auto &glob = EventCollectorGlobal::get();
- glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::END, "0", _tag});
-}
-
-EventDurationManual::EventDurationManual(const std::string &tag) : _tag{tag}, _pair{true} {}
-
-EventDurationManual::~EventDurationManual()
-{
- // Check if it has called begin-end pair
- assert(_pair);
-}
-
-void EventDurationManual::begin()
-{
- _pair = false;
- auto &glob = EventCollectorGlobal::get();
- glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::BEGIN, "0", _tag});
-}
-
-void EventDurationManual::end()
-{
- assert(!_pair);
- _pair = true;
- auto &glob = EventCollectorGlobal::get();
- glob.collector().onEvent(EventCollector::Event{EventCollector::Edge::END, "0", _tag});
-}
-
-} // namespace util
-} // namespace onert
diff --git a/runtime/onert/core/src/util/EventCollectorGlobal.h b/runtime/onert/core/src/util/EventCollectorGlobal.h
deleted file mode 100644
index 1027ec84d..000000000
--- a/runtime/onert/core/src/util/EventCollectorGlobal.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_EVENT_COLLECTOR_GLOBAL_H__
-#define __ONERT_UTIL_EVENT_COLLECTOR_GLOBAL_H__
-
-#include "util/EventRecorder.h"
-#include "util/EventCollector.h"
-
-namespace onert
-{
-namespace util
-{
-
-/**
- * @brief Singleton class for event collection from anywhere in code
- *
- */
-class EventCollectorGlobal
-{
-public:
- /**
- * @brief Get the singleton object of this class
- *
- * @return EventCollectorGlobal& Singleton object
- */
- static EventCollectorGlobal &get();
-
-public:
- /**
- * @brief Getter for event collector object
- *
- * @return EventCollector& Collector object
- */
- EventCollector &collector() { return _collector; }
-
-private:
- EventCollectorGlobal();
- ~EventCollectorGlobal();
-
-private:
- EventRecorder _recorder;
- EventCollector _collector;
-};
-
-/**
- * @brief Helper class for emitting duration event which is handled automatically with ctor/dtor
- *
- */
-class EventDurationBlock
-{
-public:
- /**
- * @brief Raise a duration event with type of BEGIN
- *
- * @param tag A label for the duration event
- */
- EventDurationBlock(const std::string &tag);
- /**
- * @brief Raise a duration event with type of END
- *
- */
- ~EventDurationBlock();
-
-private:
- std::string _tag;
-};
-
-/**
- * @brief Helper class for emitting duration event which is handled manually
- *
- * Usage:
- * {
- * ...
- * EventDurationManual duration("some tag");
- * duration.begin();
- * ...
- * ... // Code for duration
- * ...
- * duration.end();
- * }
- *
- */
-class EventDurationManual
-{
-public:
- /**
- * @brief Construct a new Event Duration Manual object
- *
- * @param tag A label for the duration object
- */
- EventDurationManual(const std::string &tag);
- /**
- * @brief Destroy the Event Duration Manual object
- *
- */
- ~EventDurationManual();
-
- /**
- * @brief Raise a duration event with type of BEGIN
- *
- */
- void begin();
- /**
- * @brief Raise a duration event with type of END
- *
- */
- void end();
-
-private:
- std::string _tag;
- bool _pair;
-};
-
-} // namespace util
-} // namespace onert
-
-/**
- * Helper Macro Definitions
- *
- * HOW TO USE
- *
- * void f(args)
- * {
- * EVENT_DURATION_FUNCTION();
- * ...
- * if(cond)
- * {
- * EVENT_DURATION_REGION("if branch");
- * ...
- * }
- * ...
- * }
- */
-
-#define EVENT_DURATION_FUNCTION() \
- ::onert::util::EventDurationBlock __event_duration__##__LINE__ { __FUNCTION__ }
-
-#define EVENT_DURATION_REGION(tag) \
- ::onert::util::EventDurationBlock __event_duration__##__LINE__ { tag }
-
-#endif // __ONERT_UTIL_EVENT_COLLECTOR_GLOBAL_H__
diff --git a/runtime/onert/core/src/util/EventRecorder.cc b/runtime/onert/core/src/util/EventRecorder.cc
deleted file mode 100644
index 3714e4f02..000000000
--- a/runtime/onert/core/src/util/EventRecorder.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/EventRecorder.h"
-
-void EventRecorder::emit(const DurationEvent &evt)
-{
- std::lock_guard<std::mutex> lock{_mu};
-
- _duration_events.push_back(evt);
-}
-
-void EventRecorder::emit(const CounterEvent &evt)
-{
- std::lock_guard<std::mutex> lock{_mu};
-
- _counter_events.push_back(evt);
-}
diff --git a/runtime/onert/core/src/util/EventRecorder.h b/runtime/onert/core/src/util/EventRecorder.h
deleted file mode 100644
index 7af4c7ddb..000000000
--- a/runtime/onert/core/src/util/EventRecorder.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_EVENT_RECORDER_H__
-#define __ONERT_UTIL_EVENT_RECORDER_H__
-
-#include <map>
-#include <memory>
-#include <mutex>
-
-#include <vector>
-
-struct Event
-{
- std::string name;
- std::string tid;
- std::string ph; /* REQUIRED */
- std::string ts; /* REQUIRED */
-};
-
-struct DurationEvent : public Event
-{
- // TO BE FILLED
-};
-
-struct CounterEvent : public Event
-{
- std::map<std::string, std::string> values;
-};
-
-//
-// Record Event as Chrome Trace Event File Format
-//
-// Refrence: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
-//
-class EventRecorder
-{
-public:
- EventRecorder() = default;
-
-public:
- void emit(const DurationEvent &evt);
- void emit(const CounterEvent &evt);
-
-public:
- bool empty() { return _duration_events.empty() && _counter_events.empty(); }
- const std::vector<DurationEvent> &duration_events() const { return _duration_events; }
- const std::vector<CounterEvent> &counter_events() const { return _counter_events; }
-
-private:
- std::mutex _mu;
- std::vector<DurationEvent> _duration_events;
- std::vector<CounterEvent> _counter_events;
-};
-
-#endif // __ONERT_UTIL_EVENT_RECORDER_H__
diff --git a/runtime/onert/core/src/util/EventWriter.cc b/runtime/onert/core/src/util/EventWriter.cc
deleted file mode 100644
index dacb40e64..000000000
--- a/runtime/onert/core/src/util/EventWriter.cc
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/EventWriter.h"
-
-#include <sstream>
-#include <vector>
-#include <unordered_map>
-#include <json/json.h>
-#include <assert.h>
-#include <utility>
-#include <map>
-#include <set>
-#include <stdint.h>
-#include <fstream>
-
-// json type for Chrome Event Trace
-namespace
-{
-
-std::string quote(const std::string &value)
-{
- std::stringstream ss;
- ss << '"' << value << '"';
- return ss.str();
-}
-
-std::string field(const std::string &k, const std::string &v)
-{
- std::stringstream ss;
- ss << quote(k) << " : " << quote(v);
- return ss.str();
-}
-
-struct Content // One Entry in Chrome Event Trace
-{
- std::vector<std::pair<std::string, std::string>> flds;
- std::vector<std::pair<std::string, std::string>> args;
-};
-
-std::string object(const Content &content)
-{
- std::stringstream ss;
-
- ss << "{ ";
-
- ss << field(content.flds[0].first, content.flds[0].second);
-
- for (uint32_t n = 1; n < content.flds.size(); ++n)
- {
- ss << ", " << field(content.flds.at(n).first, content.flds.at(n).second);
- }
-
- if (content.args.size() > 0)
- {
- ss << ", " << quote("args") << " : { ";
- ss << field(content.args.at(0).first, content.args.at(0).second);
-
- for (uint32_t n = 1; n < content.args.size(); ++n)
- {
- ss << ", " << field(content.args.at(n).first, content.args.at(n).second);
- }
-
- ss << "}";
- }
-
- ss << " }";
-
- return ss.str();
-}
-
-void fill(Content &content, const Event &evt)
-{
- content.flds.emplace_back("name", evt.name);
- content.flds.emplace_back("pid", "0");
- content.flds.emplace_back("tid", evt.tid);
- content.flds.emplace_back("ph", evt.ph);
- content.flds.emplace_back("ts", evt.ts);
-}
-
-std::string object(const DurationEvent &evt)
-{
- Content content;
-
- fill(content, evt);
-
- return ::object(content);
-}
-
-std::string object(const CounterEvent &evt)
-{
- Content content;
-
- fill(content, evt);
-
- for (auto it = evt.values.begin(); it != evt.values.end(); ++it)
- {
- content.args.emplace_back(it->first, it->second);
- }
-
- return ::object(content);
-}
-
-} // namespace
-
-// md table type
-namespace
-{
-
-void writeMDTableRow(std::ostream &os, const std::vector<std::string> &list)
-{
- os << "| ";
- for (auto &key : list)
- {
- os << key << " | ";
- }
- os << "\n";
-}
-
-struct MDContent
-{
- std::string name;
- uint64_t begin_ts;
- uint64_t end_ts;
- uint32_t min_rss;
- uint32_t max_rss;
- uint32_t min_page_reclaims;
- uint32_t max_page_reclaims;
-
- MDContent()
- : begin_ts(0), end_ts(0), min_rss(UINT32_MAX), max_rss(0), min_page_reclaims(UINT32_MAX),
- max_page_reclaims(0)
- {
- // DO NOTHING
- }
-
- virtual ~MDContent() = default;
-
- void updateRss(uint32_t rss)
- {
- if (min_rss == UINT32_MAX)
- min_rss = rss;
- if (max_rss == 0)
- max_rss = rss;
-
- if (min_rss > rss)
- min_rss = rss;
- else if (max_rss < rss)
- max_rss = rss;
- }
-
- void updateMinflt(uint32_t minflt)
- {
- if (min_page_reclaims == UINT32_MAX)
- min_page_reclaims = minflt;
- if (max_page_reclaims == 0)
- max_page_reclaims = minflt;
-
- if (min_page_reclaims > minflt)
- min_page_reclaims = minflt;
- else if (max_page_reclaims < minflt)
- max_page_reclaims = minflt;
- }
-
- virtual void write(std::ostream &os) const = 0;
-};
-
-struct OpSeq : public MDContent
-{
- std::string backend;
- uint64_t graph_latency;
-
- struct OpSeqCmp
- {
- bool operator()(const OpSeq &lhs, const OpSeq &rhs) const
- {
- return lhs.begin_ts < rhs.begin_ts;
- }
- bool operator()(const OpSeq &lhs, const OpSeq &rhs) { return lhs.begin_ts < rhs.begin_ts; }
- bool operator()(OpSeq &lhs, OpSeq &rhs) { return lhs.begin_ts < rhs.begin_ts; }
- };
-
- void write(std::ostream &os) const override
- {
- uint64_t opseq_latency = end_ts - begin_ts;
- double opseq_per = static_cast<double>(opseq_latency) / graph_latency * 100.0;
- writeMDTableRow(os, {name, backend, std::to_string(opseq_latency), std::to_string(opseq_per),
- std::to_string(min_rss), std::to_string(max_rss),
- std::to_string(min_page_reclaims), std::to_string(max_page_reclaims)});
- }
-};
-
-struct Graph : public MDContent
-{
- std::set<OpSeq, OpSeq::OpSeqCmp> opseqs;
-
- void setOpSeqs(const std::map<std::string, OpSeq> &name_to_opseq)
- {
- uint64_t graph_latency = end_ts - begin_ts;
- for (auto it : name_to_opseq)
- {
- auto opseq = it.second;
- opseq.graph_latency = graph_latency;
-
- opseqs.insert(opseq);
-
- updateRss(opseq.min_rss);
- updateRss(opseq.max_rss);
- updateMinflt(opseq.min_page_reclaims);
- updateMinflt(opseq.max_page_reclaims);
- }
- }
-
- void write(std::ostream &os) const override
- {
- static std::vector<std::string> graph_headers{"latency(us)", "rss_min(kb)", "rss_max(kb)",
- "page_reclaims_min", "page_reclaims_max"};
-
- static std::vector<std::string> graph_headers_line{"-----------", "-------", "-------",
- "-----------------", "-----------------"};
-
- // Graph's Header
- writeMDTableRow(os, graph_headers);
- writeMDTableRow(os, graph_headers_line);
-
- // Graph's contents
- writeMDTableRow(os, {std::to_string(end_ts - begin_ts), std::to_string(min_rss),
- std::to_string(max_rss), std::to_string(min_page_reclaims),
- std::to_string(max_page_reclaims)});
-
- os << "\n";
-
- static std::vector<std::string> opseq_headers{
- "OpSeq name", "backend", "latency(us)", "latency(%)",
- "rss_min(kb)", "rss_max(kb)", "page_reclaims_min", "page_reclaims_max"};
-
- static std::vector<std::string> opseq_headers_line{
- "----------", "-------", "-----------", "-----------",
- "-------", "-------", "-----------------", "-----------------"};
-
- os << "## OpSequences \n";
-
- // OpSeq's Header
- writeMDTableRow(os, opseq_headers);
- writeMDTableRow(os, opseq_headers_line);
-
- // OpSeq's contents
- for (auto opseq : opseqs)
- {
- opseq.write(os);
- }
-
- os << "\n";
- }
-};
-
-struct MDTableBuilder
-{
- MDTableBuilder(const std::vector<DurationEvent> &duration_events,
- const std::vector<CounterEvent> &counter_events)
- : _duration_events(duration_events), _counter_events(counter_events)
- {
-// when ready with low overhead in release build
-#ifdef DEBUG
- for (const auto &evt : _counter_events)
- {
- uint64_t ts = std::stoull(evt.ts);
- auto &name = evt.name;
- assert(name.compare("maxrss") == 0 || name.compare("minflt") == 0);
- assert(evt.values.size() == 1);
- auto &val = evt.values.begin()->second;
- if (_ts_to_values.find(ts) == _ts_to_values.end())
- {
- std::pair<uint32_t, uint32_t> values;
- if (name.compare("maxrss") == 0)
- values.first = std::stoul(val);
- else
- values.second = std::stoul(val);
- _ts_to_values.insert({ts, values});
- }
- else
- {
- auto &values = _ts_to_values.at(ts);
- if (name.compare("maxrss") == 0)
- values.first = std::stoul(val);
- else
- values.second = std::stoul(val);
- }
- }
-#endif
- }
-
- MDTableBuilder &build()
- {
- for (auto &it : divideGraph())
- {
- size_t begin_idx = it.first;
- size_t end_idx = it.second;
- std::map<std::string, OpSeq> name_to_opseq;
- for (size_t i = begin_idx + 1; i < end_idx; ++i)
- {
- const auto &evt = _duration_events[i];
- assert(evt.name.compare("Graph") != 0);
- assert(evt.ph.compare("B") == 0 || evt.ph.compare("E") == 0);
- if (evt.ph.compare("B") == 0)
- {
- assert(name_to_opseq.find(evt.name) == name_to_opseq.end());
- name_to_opseq.insert({evt.name, makeOpSeq(evt)});
- }
- else
- {
- assert(name_to_opseq.find(evt.name) != name_to_opseq.end());
- auto &opseq = name_to_opseq.at(evt.name);
- updateOpSeq(opseq, evt);
- }
- }
-
- _graphs.emplace_back(makeGraph(begin_idx, end_idx, name_to_opseq));
- }
-
- return *this;
- }
-
- std::vector<std::pair<size_t, size_t>> divideGraph()
- {
- std::vector<std::pair<size_t, size_t>> graph_idx_list; // pair<begin_idx, end_idx>
- for (size_t i = 0, begin_idx = 0; i < _duration_events.size(); ++i)
- {
- const auto &evt = _duration_events.at(i);
- if (evt.name.compare("Graph") == 0)
- {
- if (evt.ph.compare("B") == 0)
- begin_idx = i;
- else
- graph_idx_list.emplace_back(begin_idx, i);
- }
- }
- return graph_idx_list;
- }
-
- OpSeq makeOpSeq(const DurationEvent &evt)
- {
- OpSeq opseq;
- opseq.name = evt.name;
- opseq.begin_ts = std::stoull(evt.ts);
- opseq.backend = evt.tid;
-#ifdef DEBUG
- opseq.updateRss(_ts_to_values.at(opseq.begin_ts).first);
- opseq.updateMinflt(_ts_to_values.at(opseq.begin_ts).second);
-#else
- opseq.updateRss(0);
- opseq.updateMinflt(0);
-#endif
- return opseq;
- }
-
- void updateOpSeq(OpSeq &opseq, const DurationEvent &evt)
- {
- opseq.end_ts = std::stoull(evt.ts);
-#ifdef DEBUG
- opseq.updateRss(_ts_to_values.at(opseq.end_ts).first);
- opseq.updateMinflt(_ts_to_values.at(opseq.end_ts).second);
-#else
- opseq.updateRss(0);
- opseq.updateMinflt(0);
-#endif
- }
-
- Graph makeGraph(size_t begin_idx, size_t end_idx,
- const std::map<std::string, OpSeq> &name_to_opseq)
- {
- Graph graph;
- graph.name = "Graph";
- graph.begin_ts = std::stoull(_duration_events[begin_idx].ts);
- graph.end_ts = std::stoull(_duration_events[end_idx].ts);
- graph.setOpSeqs(name_to_opseq);
-#ifdef DEBUG
- graph.updateRss(_ts_to_values.at(graph.begin_ts).first);
- graph.updateMinflt(_ts_to_values.at(graph.begin_ts).second);
- graph.updateRss(_ts_to_values.at(graph.end_ts).first);
- graph.updateMinflt(_ts_to_values.at(graph.end_ts).second);
-#else
- graph.updateRss(0);
- graph.updateMinflt(0);
-#endif
- return graph;
- }
-
- void write(std::ostream &os)
- {
- // Write contents
- for (size_t i = 0; i < _graphs.size(); ++i)
- {
- os << "# Graph " << i << "\n";
- _graphs.at(i).write(os);
- }
- }
-
- const std::vector<DurationEvent> &_duration_events;
- const std::vector<CounterEvent> &_counter_events;
- // timestamp to std::pair<maxrss, minflt>
- std::unordered_map<uint64_t, std::pair<uint32_t, uint32_t>> _ts_to_values;
- std::vector<Graph> _graphs;
-};
-
-} // namespace
-
-EventWriter::EventWriter(const EventRecorder &recorder) : _recorder(recorder)
-{
- // DO NOTHING
-}
-
-void EventWriter::writeToFiles(const std::string &base_filepath)
-{
- // Note. According to an internal issue, let snpe json as just file name not '.snpe.json'
- writeToFile(base_filepath, WriteFormat::SNPE_BENCHMARK);
- writeToFile(base_filepath + ".chrome.json", WriteFormat::CHROME_TRACING);
- writeToFile(base_filepath + ".table.md", WriteFormat::MD_TABLE);
-}
-
-void EventWriter::writeToFile(const std::string &filepath, WriteFormat write_format)
-{
- std::ofstream os{filepath, std::ofstream::out};
- switch (write_format)
- {
- case WriteFormat::CHROME_TRACING:
- writeChromeTrace(os);
- break;
- case WriteFormat::SNPE_BENCHMARK:
- writeSNPEBenchmark(os);
- break;
- case WriteFormat::MD_TABLE:
- writeMDTable(os);
- break;
- default:
- assert(!"Invalid value");
- break;
- }
-}
-
-void EventWriter::writeSNPEBenchmark(std::ostream &os)
-{
- Json::Value root;
- auto &exec_data = root["Execution_Data"] = Json::Value{Json::objectValue};
-
- struct Stat
- {
- uint64_t sum = 0;
- uint64_t count = 0;
- uint64_t max = 0;
- uint64_t min = std::numeric_limits<uint64_t>::max();
-
- void accumulate(uint64_t val)
- {
- sum += val;
- count++;
- max = std::max(max, val);
- min = std::min(min, val);
- }
- };
-
- // Memory
- {
- std::unordered_map<std::string, Stat> mem_stats;
- for (auto &evt : _recorder.counter_events())
- {
- auto &mem_stat = mem_stats[evt.name];
- uint64_t val = std::stoull(evt.values.at("value"));
- mem_stat.accumulate(val);
- }
-
- auto &mem = exec_data["memory"] = Json::Value{Json::objectValue};
- for (auto &kv : mem_stats)
- {
- auto &key = kv.first;
- auto &val = kv.second;
- mem[key]["Avg_Size"] = val.sum / val.count;
- mem[key]["Max_Size"] = val.max;
- mem[key]["Min_Size"] = val.min;
- mem[key]["Runtime"] = "NA";
- }
- }
-
- // Operation Execution Time
- {
- // NOTE This assumes _duration_events is sorted by "ts" ascending
-
- // 2D keys : stats[tid][name]
- std::unordered_map<std::string, std::unordered_map<std::string, Stat>> stats;
- std::unordered_map<std::string, std::unordered_map<std::string, uint64_t>> begin_timestamps;
- for (auto &evt : _recorder.duration_events())
- {
- auto &stat = stats[evt.tid][evt.name];
- auto &begin_ts = begin_timestamps[evt.tid][evt.name];
- uint64_t timestamp = std::stoull(evt.ts);
- if (evt.ph == "B")
- {
- if (begin_ts != 0)
- throw std::runtime_error{"Invalid Data"};
- begin_ts = timestamp;
- }
- else if (evt.ph == "E")
- {
- if (begin_ts == 0 || timestamp < begin_ts)
- throw std::runtime_error{"Invalid Data"};
- stat.accumulate(timestamp - begin_ts);
- begin_ts = 0;
- }
- else
- throw std::runtime_error{"Invalid Data - invalid value for \"ph\" : \"" + evt.ph + "\""};
- }
-
- for (auto &kv : begin_timestamps)
- for (auto &kv2 : kv.second)
- if (kv2.second != 0)
- throw std::runtime_error{"Invalid Data - B and E pair does not match."};
-
- for (auto &kv : stats)
- {
- auto &tid = kv.first;
- auto &map = kv.second;
- auto &json_tid = exec_data[tid] = Json::Value{Json::objectValue};
- for (auto &kv : map)
- {
- auto &name = kv.first;
- auto &val = kv.second;
- json_tid[name]["Avg_Time"] = val.sum / val.count;
- json_tid[name]["Max_Time"] = val.max;
- json_tid[name]["Min_Time"] = val.min;
- json_tid[name]["Runtime"] = tid;
- }
- }
- }
-
- os << root;
-}
-
-void EventWriter::writeChromeTrace(std::ostream &os)
-{
- os << "{\n";
- os << " " << quote("traceEvents") << ": [\n";
-
- for (auto &evt : _recorder.duration_events())
- {
- os << " " << object(evt) << ",\n";
- }
-
- for (auto &evt : _recorder.counter_events())
- {
- os << " " << object(evt) << ",\n";
- }
-
- os << " { }\n";
- os << " ]\n";
- os << "}\n";
-}
-
-void EventWriter::writeMDTable(std::ostream &os)
-{
- MDTableBuilder(_recorder.duration_events(), _recorder.counter_events()).build().write(os);
-}
diff --git a/runtime/onert/core/src/util/EventWriter.h b/runtime/onert/core/src/util/EventWriter.h
deleted file mode 100644
index 7e838ca82..000000000
--- a/runtime/onert/core/src/util/EventWriter.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_UTIL_EVENT_WRITER_H__
-#define __ONERT_UTIL_EVENT_WRITER_H__
-
-#include "EventRecorder.h"
-
-#include <string>
-#include <ostream>
-
-class EventWriter
-{
-public:
- enum class WriteFormat
- {
- CHROME_TRACING,
- SNPE_BENCHMARK,
- MD_TABLE,
- };
-
-public:
- EventWriter(const EventRecorder &recorder);
-
-public:
- void writeToFiles(const std::string &base_filepath);
- void writeToFile(const std::string &filepath, WriteFormat write_format);
-
-private:
- void writeSNPEBenchmark(std::ostream &os);
- void writeChromeTrace(std::ostream &os);
- void writeMDTable(std::ostream &os);
-
-private:
- const EventRecorder &_recorder;
-};
-
-#endif // __ONERT_UTIL_EVENT_WRITER_H__
diff --git a/runtime/onert/core/src/util/GeneralConfigSource.cc b/runtime/onert/core/src/util/GeneralConfigSource.cc
deleted file mode 100644
index 7d2757e58..000000000
--- a/runtime/onert/core/src/util/GeneralConfigSource.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/GeneralConfigSource.h"
-#include "util/logging.h"
-
-namespace onert
-{
-namespace util
-{
-
-std::string GeneralConfigSource::get(const std::string &key) const
-{
- auto itr = _map.find(key);
- if (itr == _map.end())
- {
- return "";
- }
- else
- {
- return itr->second;
- }
-}
-
-void GeneralConfigSource::set(const std::string &key, const std::string &val)
-{
- VERBOSE(GeneralConfigSource) << key << " : " << val << std::endl;
- _map[key] = val;
-}
-
-} // namespace util
-} // namespace onert
diff --git a/runtime/onert/core/src/util/ShapeInference.cc b/runtime/onert/core/src/util/ShapeInference.cc
deleted file mode 100644
index 0278df4d2..000000000
--- a/runtime/onert/core/src/util/ShapeInference.cc
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/Utils.h"
-#include "ir/InternalType.h"
-#include "ir/Shape.h"
-#include "util/ShapeInference.h"
-#include "util/logging.h"
-
-#include <cassert>
-#include <numeric>
-#include <sstream>
-#include <cmath>
-
-namespace onert
-{
-namespace shape_inference
-{
-
-//
-// Helper functions
-//
-
-namespace
-{
-
-template <typename T, typename U>
-typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value,
- typename std::common_type<T, U>::type>::type
-ceil_div(T dividend, U divisor)
-{
- assert(dividend > 0 && divisor > 0 && "this implementations is for positive numbers only");
- return (dividend + divisor - 1) / divisor;
-}
-
-// Calculate the result of broadcast of two shapes
-ir::Shape broadcastShapes(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape)
-{
- ir::Shape out_shape;
- auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
-
- for (int idx = 0; idx < max_rank; ++idx)
- {
- // Go over operands dimensions from right to left
- int lhs_idx = lhs_shape.rank() - idx - 1;
- int rhs_idx = rhs_shape.rank() - idx - 1;
-
- int32_t lhs_dim = lhs_idx >= 0 ? lhs_shape.dim(lhs_idx) : 1;
- int32_t rhs_dim = rhs_idx >= 0 ? rhs_shape.dim(rhs_idx) : 1;
-
- if (lhs_dim != 1 && rhs_dim != 1 && lhs_dim != rhs_dim)
- throw std::runtime_error("Incompatible shapes for broadcast");
-
- out_shape.prepend(std::max(lhs_dim, rhs_dim));
- }
-
- return out_shape;
-}
-
-} // namespace
-
-namespace bcq
-{
-inline int getOutputSize(const ir::Shape &cluster_shape, const int32_t *cluster_buf)
-{
- int size = 0;
- for (int idx = 0; idx < cluster_shape.dim(0); idx++)
- {
- size += cluster_buf[idx * 2 + 1];
- }
- return size;
-}
-} // namespace bcq
-
-//
-// Shape inference
-//
-
-// Calculate output height and width of convolution-like operation
-std::pair<int, int> calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h,
- const int ker_w, const ir::Padding pad,
- const ir::Stride stride,
- const ir::Dilation dilation = {1, 1})
-{
- int32_t out_h = 0, out_w = 0;
- int32_t effective_filter_w_size = (ker_w - 1) * dilation.width_factor + 1;
- int32_t effective_filter_h_size = (ker_h - 1) * dilation.height_factor + 1;
- switch (pad.type)
- {
- case ir::PaddingType::SAME:
- out_h = ceil_div(in_h, stride.vertical);
- out_w = ceil_div(in_w, stride.horizontal);
- break;
- case ir::PaddingType::VALID:
- out_h = ceil_div(in_h - effective_filter_h_size + 1, stride.vertical);
- out_w = ceil_div(in_w - effective_filter_w_size + 1, stride.horizontal);
- break;
- case ir::PaddingType::EXPLICIT:
- out_h =
- (in_h + pad.param.top + pad.param.bottom - effective_filter_h_size) / stride.vertical + 1;
- out_w =
- (in_w + pad.param.left + pad.param.right - effective_filter_w_size) / stride.horizontal +
- 1;
- break;
- default:
- assert(false);
- }
-
- return {out_h, out_w};
-}
-
-ir::Shape inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape)
-{
- return broadcastShapes(lhs_shape, rhs_shape);
-}
-
-ir::Shape inferArgMaxShape(const ir::Shape &input_shape, int axis, int rank)
-{
- if (axis < 0 || axis >= rank)
- {
- throw std::runtime_error("ArgMax shape inference: Wrong axis value " + std::to_string(axis));
- }
-
- ir::Shape out_shape;
- for (int idx = 0; idx < rank; ++idx)
- {
- if (idx != axis)
- {
- int32_t input_dim = input_shape.dim(idx);
- out_shape.append(input_dim);
- }
- }
-
- return out_shape;
-}
-
-ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector<int> &axes,
- bool keep_dims)
-{
- int num_axis = axes.size();
- int input_num_dims = input_shape.rank();
- if (input_num_dims == 0)
- {
- ir::Shape out_shape(0);
- return out_shape;
- }
- if (keep_dims)
- {
- ir::Shape out_shape;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axes[axis_idx] == idx || axes[axis_idx] + input_num_dims == idx)
- {
- is_axis = true;
- break;
- }
- }
- if (is_axis)
- {
- out_shape.append(1);
- }
- else
- {
- out_shape.append(input_shape.dim(idx));
- }
- }
- return out_shape;
- }
- else
- {
- // Calculates size of reducing axis.
- int num_reduce_axis = num_axis;
- for (int i = 0; i < num_axis; ++i)
- {
- int current = axes[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- assert(0 <= current && current < input_num_dims);
- for (int j = 0; j < i; ++j)
- {
- int previous = axes[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- ir::Shape out_shape;
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axes[axis_idx] == idx || axes[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- out_shape.append(input_shape.dim(idx));
- }
- }
- return out_shape;
- }
-}
-
-ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape,
- const ir::operation::BatchMatMul::Param &param)
-{
- bool adj_x = param.adj_x;
- bool adj_y = param.adj_y;
- ir::Shape output_shape;
-
- int output_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
-
- // Extend lhs and rhs shape
- ir::Shape extended_lhs_shape(lhs_shape);
- ir::Shape extended_rhs_shape(rhs_shape);
- extended_lhs_shape.extendRank(output_rank);
- extended_rhs_shape.extendRank(output_rank);
-
- for (int i = 0; i < output_rank - 2; i++)
- {
- const int lhs_dim = extended_lhs_shape.dim(i);
- const int rhs_dim = extended_rhs_shape.dim(i);
- int broadcast_dim = lhs_dim;
- if (lhs_dim != rhs_dim)
- {
- if (lhs_dim == 1)
- {
- broadcast_dim = rhs_dim;
- }
- else if (rhs_dim != 1)
- {
- throw std::runtime_error{"BatchMatMul shape inference: invalid brodcasting input shape"};
- }
- }
-
- output_shape.append(broadcast_dim);
- }
-
- // Fill in the matmul dimensions.
- int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
- int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
-
- output_shape.append(extended_lhs_shape.dim(lhs_rows_index));
- output_shape.append(extended_rhs_shape.dim(rhs_cols_index));
-
- return output_shape;
-}
-
-/*
- * shp_shape : SHAPE input tensor's shape
- * shp_buf : SHAPE input tensor's buffer
- */
-ir::Shape inferBroadcastToShape(const ir::Shape shp_shape, const int32_t *shp_buf)
-{
-
- const int num_elements = shp_shape.num_elements();
-
- assert(num_elements != 0);
- assert(shp_buf);
-
- ir::Shape new_shape(num_elements);
-
- for (int i = 0; i < num_elements; ++i)
- {
- assert(shp_buf[i] != 0); // It shouldn't be 0.
- new_shape.dim(i) = shp_buf[i];
- }
-
- return new_shape;
-}
-
-ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param &param)
-{
- const int32_t concat_axis = param.axis >= 0 ? param.axis : in_shapes[0].rank() + param.axis;
- const auto &first_in_shape = in_shapes[0];
-
- // Check that all shapes are equal except for concat axis dimension
- for (const auto &in_shape : in_shapes)
- {
- if (in_shape.rank() != first_in_shape.rank())
- throw std::runtime_error("Rank in all input tensors should be same");
-
- for (int64_t dim_idx = 0; dim_idx < in_shape.rank(); ++dim_idx)
- if (!(dim_idx == concat_axis || in_shape.dim(dim_idx) == first_in_shape.dim(dim_idx)))
- throw std::runtime_error("All tensor should have same dimension "
- "except dimension on passed axis");
- }
-
- // Calculate output shape
- ir::Shape out_shape(first_in_shape);
- out_shape.dim(concat_axis) = 0;
- for (const auto &in_shape : in_shapes)
- out_shape.dim(concat_axis) += in_shape.dim(concat_axis);
- return out_shape;
-}
-
-ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
- const ir::operation::Conv2D::Param &param, ir::Layout layout)
-{
- if (param.stride.horizontal == 0 || param.stride.vertical == 0)
- throw std::runtime_error{"Conv2D: stride values must be positive"};
-
- auto ifm_shape = in_shape.asFeature(layout);
-
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in]
- auto kf_shape = ker_shape.asFeature(layout);
- assert(ifm_shape.C == kf_shape.C);
-
- const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
- param.padding, param.stride, param.dilation);
-
- return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N};
-}
-
-ir::Shape inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
- const ir::operation::DepthwiseConv2D::Param &param,
- ir::Layout layout)
-{
- if (param.stride.horizontal == 0 || param.stride.vertical == 0)
- throw std::runtime_error{"DepthwiseConv2D: stride values must be positive"};
-
- assert(layout == ir::Layout::NHWC);
- auto ifm_shape = in_shape.asFeature(layout);
-
- // Kernel format is [1, kernel_height, kernel_width, depth_out]
- auto kf_shape = ker_shape.asFeature(layout);
- assert(kf_shape.C == static_cast<int32_t>(ifm_shape.C * param.multiplier));
- assert(kf_shape.N == 1);
-
- const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
- param.padding, param.stride);
-
- return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C};
-}
-
-ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis)
-{
- ir::Shape out_shape(in_shape.rank() + 1);
-
- axis = ((axis >= 0) ? axis : /* when axis < 0 */ (out_shape.rank() + axis));
- if (!(0 <= axis && axis <= in_shape.rank()))
- throw std::runtime_error("axis of dim is out of range");
-
- for (int x = 0, out_x = 0; out_x < out_shape.rank(); ++out_x)
- {
- if (out_x == axis)
- out_shape.dim(out_x) = 1;
- else
- out_shape.dim(out_x) = in_shape.dim(x++);
- }
-
- return out_shape;
-}
-
-ir::Shape inferFillShape(const ir::Shape &in_shape, const int32_t *in_buf)
-{
- ir::Shape out_shape(in_shape.dim(0));
-
- for (int out_x = 0; out_x < out_shape.rank(); ++out_x)
- {
- out_shape.dim(out_x) = in_buf[out_x];
- }
-
- return out_shape;
-}
-
-ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape)
-{
- assert(in_shape.rank() >= 2);
- assert(ker_shape.rank() == 2);
-
- const auto input_size_with_batch = in_shape.num_elements();
- const auto num_units = ker_shape.dim(0);
- const auto input_size = ker_shape.dim(1);
- const auto batch_size = input_size_with_batch / input_size;
- assert(input_size_with_batch % input_size == 0);
-
- return {ir::Shape({static_cast<int32_t>(batch_size), num_units})};
-}
-
-ir::Shape inferBCQFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &cluster_shape,
- const int32_t *cluster_buf)
-{
- assert(cluster_shape.rank() == 2);
- assert(cluster_shape.dim(1) == 2);
-
- const auto input_size = in_shape.dim(1);
- const auto output_size = bcq::getOutputSize(cluster_shape, cluster_buf);
-
- return {ir::Shape({output_size, input_size})};
-}
-
-ir::Shape inferBCQGatherShape(const ir::Shape &indices_shape, const ir::Shape &cluster_shape,
- const int32_t *cluster_buf, int rank,
- const ir::operation::BCQGather::Param &param)
-{
- ir::Shape out_shape;
- ir::Shape in_original_shape;
-
- assert(cluster_shape.rank() == 2);
- assert(cluster_shape.dim(1) == 2);
-
- auto hidden_size = param.input_hidden_size;
- auto axis = param.axis;
-
- in_original_shape.append(bcq::getOutputSize(cluster_shape, cluster_buf));
- in_original_shape.append(hidden_size);
-
- const int indices_rank = indices_shape.rank();
- for (int idx = 0; idx < rank; ++idx)
- {
- if (idx == (int)axis)
- {
- for (int indices_idx = 0; indices_idx < indices_rank; indices_idx++)
- {
- out_shape.append(indices_shape.dim(indices_idx));
- }
- }
- else
- {
- out_shape.append(in_original_shape.dim(idx));
- }
- }
-
- return out_shape;
-}
-
-ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis,
- int rank)
-{
- ir::Shape out_shape;
-
- const int indices_rank = indices_shape.rank();
-
- for (int idx = 0; idx < rank; ++idx)
- {
- if (idx == axis)
- {
- for (int indices_idx = 0; indices_idx < indices_rank; indices_idx++)
- {
- out_shape.append(indices_shape.dim(indices_idx));
- }
- }
- else
- {
- out_shape.append(input_shape.dim(idx));
- }
- }
-
- return out_shape;
-}
-
-ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis)
-{
- assert(depth >= 0);
- const auto rank = input_shape.rank() + 1;
- ir::Shape newShape(rank);
-
- axis = (axis == -1) ? (rank - 1) : axis;
-
- for (int i = 0; i < rank; ++i)
- {
- if (i < axis)
- {
- newShape.dim(i) = input_shape.dim(i);
- }
- else if (i == axis)
- {
- newShape.dim(i) = depth;
- }
- else
- {
- newShape.dim(i) = input_shape.dim(i - 1);
- }
- }
-
- return newShape;
-}
-
-ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num)
-{
- ir::Shape out_shape;
- int in_idx = 0;
-
- for (int out_idx = 0; out_idx < rank; ++out_idx)
- {
- if (out_idx == axis)
- {
- out_shape.append(num);
- }
- else
- {
- out_shape.append(input_shape.dim(in_idx++));
- }
- }
-
- return out_shape;
-}
-
-ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads)
-{
- assert(num_pads % 2 == 0);
- const int32_t rank = num_pads / 2;
-
- ir::Shape ret(rank);
- for (int32_t i = 0; i < rank; ++i)
- {
- const auto before_padding = pad_buf[i * 2];
- const auto after_padding = pad_buf[i * 2 + 1];
-
- ret.dim(i) = in_shape.dim(i) + before_padding + after_padding;
- }
-
- return ret;
-}
-
-ir::Shape inferPoolShape(const ir::Shape &in_shape, const ir::operation::Pool2D::Param &param,
- const ir::Layout layout)
-{
- if (param.stride.horizontal == 0 || param.stride.vertical == 0)
- throw std::runtime_error{"Pool2D: stride values must be positive"};
-
- assert(layout == ir::Layout::NHWC);
- auto ifm_shape = in_shape.asFeature(layout);
- const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
- param.padding, param.stride);
- // Pooling don't change number of channels and batch size
- return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C};
-}
-
-ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height,
- const int32_t output_width)
-{
- assert(in_shape.rank() == 4);
- if (output_height < 0)
- {
- throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_height = " +
- std::to_string(output_height)};
- }
- if (output_width < 0)
- {
- throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_width = " +
- std::to_string(output_width)};
- }
-
- ir::Shape ret(in_shape.rank());
-
- ret.dim(0) = in_shape.dim(0);
- ret.dim(1) = output_height;
- ret.dim(2) = output_width;
- ret.dim(3) = in_shape.dim(3);
-
- return ret;
-}
-
-template <typename T> ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val)
-{
- ir::Shape out_shape(static_cast<int>(1));
-
- out_shape.dim(0) =
- (std::is_integral<T>::value
- ? ((std::abs(start_val - limit_val) + std::abs(delta_val) - 1) / std::abs(delta_val))
- : std::ceil(std::abs((start_val - limit_val) / delta_val)));
- return out_shape;
-}
-
-// template instantiation
-template ir::Shape inferRangeShape(int start_val, int limit_val, int delta_val);
-template ir::Shape inferRangeShape(float start_val, float limit_val, float delta_val);
-
-ir::Shape inferReshapeShape(const int32_t *shape_buf, const int32_t shape_num_elements,
- const size_t total_num_elements)
-{
- ir::Shape ret(shape_num_elements);
- int32_t flatten_dim = ir::Shape::UNSPECIFIED_DIM;
- for (int32_t i = 0; i < shape_num_elements; ++i)
- {
- if (shape_buf[i] < 0)
- {
- if (flatten_dim != ir::Shape::UNSPECIFIED_DIM)
- throw std::runtime_error("Reshape: 2nd param has special dim(for flatten) more than twice");
- flatten_dim = i;
- ret.dim(i) = 1;
- }
- else
- {
- ret.dim(i) = shape_buf[i];
- }
- }
- if (flatten_dim != ir::Shape::UNSPECIFIED_DIM)
- ret.dim(flatten_dim) = total_num_elements / ret.num_elements();
-
- // Check reshapable
- if (total_num_elements != static_cast<size_t>(ret.num_elements()))
- throw std::runtime_error("Reshape: 2nd param is not compatible with the shape of input");
-
- return ret;
-}
-
-ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
- const ir::Shape &input_false_shape)
-{
- auto haveSameShapes = [](const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
- const ir::Shape &input_false_shape) {
- if ((input_cond_shape.rank() != input_true_shape.rank()) ||
- input_cond_shape.rank() != input_false_shape.rank())
- {
- return false;
- }
-
- int rank = input_cond_shape.rank();
- for (int i = 0; i < rank; ++i)
- {
- if (input_cond_shape.dim(i) != input_true_shape.dim(i) ||
- input_cond_shape.dim(i) != input_false_shape.dim(i))
- {
- return false;
- }
- }
-
- return true;
- };
-
- auto calculateShape = [](const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
- const ir::Shape &input_false_shape, ir::Shape &new_shape) {
- ir::Shape cond_shape = input_cond_shape;
- ir::Shape true_shape = input_true_shape;
- ir::Shape false_shape = input_false_shape;
- int most_rank =
- (cond_shape.rank() >= true_shape.rank()) && (cond_shape.rank() >= false_shape.rank())
- ? cond_shape.rank()
- : (false_shape.rank() >= true_shape.rank() ? false_shape.rank() : true_shape.rank());
-
- ir::Shape calculate_shape(most_rank);
-
- cond_shape.extendRank(most_rank);
- true_shape.extendRank(most_rank);
- false_shape.extendRank(most_rank);
-
- for (int i = 0; i < most_rank; ++i)
- {
- calculate_shape.dim(i) =
- (cond_shape.dim(i) >= true_shape.dim(i)) && (cond_shape.dim(i) >= false_shape.dim(i))
- ? cond_shape.dim(i)
- : (false_shape.dim(i) >= true_shape.dim(i) ? false_shape.dim(i) : true_shape.dim(i));
-
- if ((cond_shape.dim(i) != calculate_shape.dim(i) && cond_shape.dim(i) != 1) ||
- (true_shape.dim(i) != calculate_shape.dim(i) && true_shape.dim(i) != 1) ||
- (false_shape.dim(i) != calculate_shape.dim(i) && false_shape.dim(i) != 1))
- {
- return false;
- }
- }
-
- new_shape = calculate_shape;
-
- return true;
- };
-
- bool havesame = haveSameShapes(input_cond_shape, input_true_shape, input_false_shape);
- if (havesame)
- {
- return input_cond_shape;
- }
-
- ir::Shape new_shape;
- bool possible = calculateShape(input_cond_shape, input_true_shape, input_false_shape, new_shape);
-
- if (!possible)
- {
- throw std::runtime_error("Broadcasting is not possible.");
- }
-
- return new_shape;
-}
-
-ir::Shape inferSliceShape(const ir::Shape &input_shape, const int32_t *begins_buf,
- const int32_t *sizes_buf)
-{
- const uint32_t rank = input_shape.rank();
- ir::Shape out_shape(rank);
-
- for (uint32_t idx = 0; idx < rank; ++idx)
- {
- const auto input_dim = input_shape.dim(idx);
-
- // begin is zero-based
- auto begin = begins_buf[idx];
- if (begin < 0)
- throw std::runtime_error("shape inference Slice: Invalid begin.");
-
- // size is one-based
- auto size = sizes_buf[idx];
- if (size < -1)
- throw std::runtime_error("shape inference Slice: Invalid size.");
-
- if (size == -1)
- {
- size = input_dim - begin;
- }
- else
- {
- if (input_dim < begin + size)
- throw std::runtime_error("shape inference Slice: Invalid begin and size.");
- }
- out_shape.dim(idx) = size;
- }
-
- return out_shape;
-}
-
-ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape,
- const ir::Shape &padding_shape, const int32_t *block_shape_buf,
- const int32_t *padding_buf)
-{
- const uint32_t rank = input_shape.rank();
- ir::Shape out_shape(rank);
-
- // Currently, only 4D NHWC input/output op_context are supported.
- // The 4D array need to have exactly 2 spatial dimensions.
- // TODO(nupurgarg): Support arbitrary dimension in SpaceToBatchND.
- const int32_t kInputDimensionNum = 4;
- const int32_t kBlockSizeDimensionNum = 1;
- const int32_t kSpatialDimensionNum = 2;
-
- UNUSED_RELEASE(kInputDimensionNum);
- UNUSED_RELEASE(kBlockSizeDimensionNum);
- UNUSED_RELEASE(block_shape_shape);
- UNUSED_RELEASE(padding_shape);
-
- assert(block_shape_shape.rank() == kBlockSizeDimensionNum);
- assert(block_shape_shape.dim(0) == kSpatialDimensionNum);
- assert(padding_shape.dim(0) == kSpatialDimensionNum);
- assert(padding_shape.dim(1) == 2); // fixed, meaning left/right padding for each element
- assert(padding_shape.rank() == 2); // fixed, meaning dimension(dim 0) and padding length(dim 1)
-
- // Ensures the input height and width (with padding) is a multiple of block
- // shape height and width.
- for (int dim = 0; dim < kSpatialDimensionNum; ++dim)
- {
- int final_dim_size =
- (input_shape.dim(dim + 1) + padding_buf[dim * 2] + padding_buf[dim * 2 + 1]);
-
- assert(final_dim_size % block_shape_buf[dim] == 0);
-
- out_shape.dim(dim + 1) = final_dim_size / block_shape_buf[dim];
- }
-
- const int output_batch_size = input_shape.dim(0) * block_shape_buf[0] * block_shape_buf[1];
- const int output_channel_size = input_shape.dim(3);
-
- out_shape.dim(0) = output_batch_size;
- out_shape.dim(3) = output_channel_size;
-
- return out_shape;
-}
-
-ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits)
-{
- ir::Shape newShape(input_shape);
-
- assert(axis_value >= 0);
- assert(axis_value < input_shape.rank());
-
- const int input_size = input_shape.dim(axis_value);
- assert(input_size % num_splits == 0);
- const int slice_size = input_size / num_splits;
-
- newShape.dim(axis_value) = slice_size;
-
- return newShape;
-}
-
-ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param &param)
-{
- const int ndims = param.ndim;
- const int *squeeze_dims = param.dims;
- bool should_squeeze[8] = {false};
- int num_squeezed_dims = 0;
- int shape_rank = in_shape.rank();
- if (ndims == 0)
- {
- for (int idx = 0; idx < shape_rank; ++idx)
- {
- if (in_shape.dim(idx) == 1)
- {
- should_squeeze[idx] = true;
- ++num_squeezed_dims;
- }
- }
- }
- else
- {
- for (int idx = 0; idx < ndims; ++idx)
- {
- int current = squeeze_dims[idx];
- if (current < 0)
- {
- current += shape_rank;
- }
-
- if (!(current >= 0 && current < shape_rank && in_shape.dim(current) == 1))
- {
- throw std::runtime_error(
- "The following conditions must be met: 0 <= dim < Shape rank, dim == 1");
- }
-
- if (!should_squeeze[current])
- {
- ++num_squeezed_dims;
- }
- should_squeeze[current] = true;
- }
- }
-
- // Set output shape.
- ir::Shape out_shape(shape_rank - num_squeezed_dims);
- for (int in_idx = 0, out_idx = 0; in_idx < shape_rank; ++in_idx)
- {
- if (!should_squeeze[in_idx])
- {
- out_shape.dim(out_idx++) = in_shape.dim(in_idx);
- }
- }
-
- return out_shape;
-}
-
-// helper for for StridedSlice
-template <typename T>
-StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides,
- const uint32_t begin_mask, const uint32_t end_mask,
- const uint32_t shrink_axis_mask, const uint8_t rank)
-{
- StridedSliceParams op_params;
- op_params.start_indices_count = rank;
- op_params.stop_indices_count = rank;
- op_params.strides_count = rank;
-
- for (int i = 0; i < op_params.strides_count; ++i)
- {
- op_params.start_indices[i] = begin[i];
- op_params.stop_indices[i] = end[i];
- op_params.strides[i] = strides[i];
-
- assert(op_params.strides[i] != 0);
- }
-
- op_params.begin_mask = begin_mask;
- op_params.ellipsis_mask = 0; // NYI
- op_params.end_mask = end_mask;
- op_params.new_axis_mask = 0; // NYI
- op_params.shrink_axis_mask = shrink_axis_mask;
-
- assert(sizeof(op_params.begin_mask) * 4 >= rank);
-
- return op_params;
-}
-
-// template instantiation
-template StridedSliceParams
-buildStridedSliceParams(const uint32_t *begin, const uint32_t *end, const uint32_t *strides,
- const uint32_t begin_mask, const uint32_t end_mask,
- const uint32_t shrink_axis_mask, const uint8_t rank);
-
-int Clamp(const int v, const int lo, const int hi)
-{
- assert(!(hi < lo));
- if (hi < v)
- return hi;
- if (v < lo)
- return lo;
- return v;
-}
-
-int StartForAxis(const StridedSliceParams &params, const ir::Shape &input_shape, int axis)
-{
- const auto begin_mask = params.begin_mask;
- const auto *start_indices = params.start_indices;
- const auto *strides = params.strides;
- // Begin with the specified index.
- int start = start_indices[axis];
-
- // begin_mask override
- if (begin_mask & 1 << axis)
- {
- if (strides[axis] > 0)
- {
- // Forward iteration - use the first element. These values will get
- // clamped below (Note: We could have set them to 0 and axis_size-1, but
- // use lowest() and max() to maintain symmetry with StopForAxis())
- start = std::numeric_limits<int>::lowest();
- }
- else
- {
- // Backward iteration - use the last element.
- start = std::numeric_limits<int>::max();
- }
- }
-
- // Handle negative indices
- int axis_size = input_shape.dim(axis);
- if (start < 0)
- {
- start += axis_size;
- }
-
- // Clamping
- start = Clamp(start, 0, axis_size - 1);
-
- return start;
-}
-
-// Return the "real" index for the end of iteration along that axis. This is an
-// "end" in the traditional C sense, in that it points to one past the last
-// element. ie. So if you were iterating through all elements of a 1D array of
-// size 4, this function would return 4 as the stop, because it is one past the
-// "real" indices of 0, 1, 2 & 3.
-int StopForAxis(const StridedSliceParams &params, const ir::Shape &input_shape, int axis,
- int start_for_axis)
-{
- const auto end_mask = params.end_mask;
- const auto shrink_axis_mask = params.shrink_axis_mask;
- const auto *stop_indices = params.stop_indices;
- const auto *strides = params.strides;
-
- // Begin with the specified index
- const bool shrink_axis = shrink_axis_mask & (1 << axis);
- int stop = stop_indices[axis];
-
- // When shrinking an axis, the end position does not matter (and can be
- // incorrect when negative indexing is used, see Issue #19260). Always use
- // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has
- // already been adjusted for negative indices.
- if (shrink_axis)
- {
- stop = start_for_axis + 1;
- }
-
- // end_mask override
- if (end_mask & (1 << axis))
- {
- if (strides[axis] > 0)
- {
- // Forward iteration - use the last element. These values will get
- // clamped below
- stop = std::numeric_limits<int>::max();
- }
- else
- {
- // Backward iteration - use the first element.
- stop = std::numeric_limits<int>::lowest();
- }
- }
-
- // Handle negative indices
-
- const int axis_size = input_shape.dim(axis);
- if (stop < 0)
- {
- stop += axis_size;
- }
-
- // Clamping
- // Because the end index points one past the last element, we need slightly
- // different clamping ranges depending on the direction.
- if (strides[axis] > 0)
- {
- // Forward iteration
- stop = Clamp(stop, 0, axis_size);
- }
- else
- {
- // Backward iteration
- stop = Clamp(stop, -1, axis_size - 1);
- }
-
- return stop;
-}
-
-ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params,
- uint32_t rank)
-{
- ir::Shape out_shape;
-
- for (uint32_t idx = 0; idx < rank; ++idx)
- {
- int32_t stride = op_params.strides[idx];
- int32_t begin = StartForAxis(op_params, input_shape, idx);
- int32_t end = StopForAxis(op_params, input_shape, idx, begin);
-
- // When shrinking an axis, the end position does not matter (and can be
- // incorrect when negative indexing is used, see Issue #19260). Always use
- // begin + 1 to generate a length 1 slice, since begin has
- // already been adjusted for negative indices by StartForAxis.
- const bool shrink_axis = op_params.shrink_axis_mask & (1 << idx);
- if (shrink_axis)
- {
- end = begin + 1;
- }
-
- int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
- dim_shape = dim_shape < 0 ? 0 : dim_shape;
- if (!shrink_axis)
- {
- out_shape.append(dim_shape);
- }
- }
-
- return out_shape;
-}
-
-ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier_buf,
- const int32_t multiplier_size)
-{
- if (multiplier_size != in_shape.rank())
- {
- throw std::runtime_error("inferTileShape failed, input rank: " +
- std::to_string(in_shape.rank()) + ", bad multipliers size: " +
- std::to_string(multiplier_size) + "");
- }
- ir::Shape new_Shape(in_shape.rank());
-
- for (int i = 0; i < in_shape.rank(); ++i)
- {
- assert(multiplier_buf[i]); // multiplier_buf[i] shuld not be 0.
- new_Shape.dim(i) = in_shape.dim(i) * multiplier_buf[i];
- }
- return new_Shape;
-}
-
-ir::Shape inferTransposeShape(const ir::Shape &in_shape, const int32_t *perm_buf,
- const int32_t perm_size)
-{
- const auto rank = in_shape.rank();
- if (perm_size > rank)
- {
- throw std::runtime_error("inferTransposeShape failed, bad permutation size: " +
- std::to_string(perm_size));
- }
-
- const int32_t *perm_data = perm_buf;
- std::vector<int32_t> regular_perm_vec;
- if (perm_size == 0)
- {
- // perm_data will be set to (n-1...0)
- regular_perm_vec.resize(rank);
- std::iota(regular_perm_vec.begin(), regular_perm_vec.end(), 0);
- std::reverse(regular_perm_vec.begin(), regular_perm_vec.end());
- perm_data = regular_perm_vec.data();
- }
- else
- {
- assert(rank == perm_size);
- }
-
- ir::Shape out_shape(rank);
- std::vector<bool> visit_perms(rank, false);
- for (int idx = 0; idx < rank; idx++)
- {
- const auto perm_val = perm_data[idx];
- // Check invalid permutation value
- if (perm_val < 0 || perm_val >= rank)
- {
- throw std::runtime_error("inferTransposeShape failed, bad permutation value: " +
- std::to_string(perm_val));
- }
-
- // Check duplicated permutation value
- if (visit_perms.at(perm_val))
- {
- throw std::runtime_error("inferTransposeShape failed, duplicated permutation value: " +
- std::to_string(perm_val));
- }
- visit_perms.at(perm_val) = true;
-
- out_shape.dim(idx) = in_shape.dim(perm_val);
- }
- return out_shape;
-}
-
-ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank)
-{
- ir::Shape out_shape;
-
- for (int out_idx = 0; out_idx < rank; out_idx++)
- {
- if (out_idx != axis)
- {
- out_shape.append(input_shape.dim(out_idx));
- }
- }
-
- return out_shape;
-}
-
-} // namespace shape_inference
-} // namespace onert
diff --git a/runtime/onert/core/src/util/logging.cc b/runtime/onert/core/src/util/logging.cc
deleted file mode 100644
index 6309d25e5..000000000
--- a/runtime/onert/core/src/util/logging.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the License);
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "util/logging.h"
-
-onert::util::logging::Context &onert::util::logging::Context::get() noexcept
-{
- static Context ctx;
- return ctx;
-}
diff --git a/runtime/onert/frontend/CMakeLists.txt b/runtime/onert/frontend/CMakeLists.txt
deleted file mode 100644
index 5ea6cdadd..000000000
--- a/runtime/onert/frontend/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectories()
diff --git a/runtime/onert/frontend/base_loader/CMakeLists.txt b/runtime/onert/frontend/base_loader/CMakeLists.txt
deleted file mode 100644
index 6f700710c..000000000
--- a/runtime/onert/frontend/base_loader/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-if(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER)
- return()
-endif(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER)
-
-nnfw_find_package(FlatBuffers REQUIRED)
-
-add_library(base_loader INTERFACE)
-target_include_directories(base_loader INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-
-target_link_libraries(base_loader INTERFACE onert_core)
-target_link_libraries(base_loader INTERFACE flatbuffers::flatbuffers)
diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h
deleted file mode 100644
index d21001e59..000000000
--- a/runtime/onert/frontend/base_loader/include/base_loader.h
+++ /dev/null
@@ -1,1523 +0,0 @@
-/*
- * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __BASE_LOADER_BASE_LOADER_H__
-#define __BASE_LOADER_BASE_LOADER_H__
-
-#include "ir/Graph.h"
-#include "ir/Shape.h"
-#include "ir/Operations.Include.h"
-
-#include "flatbuffers/flexbuffers.h"
-
-#include <map>
-#include <memory>
-#include <fstream>
-#include <limits>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <util/logging.h>
-
-namespace onert
-{
-namespace base_loader
-{
-
-template <typename LoaderDomain> class BaseLoader
-{
-protected:
- using Verifier = typename LoaderDomain::Verifier;
- using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType;
- using Buffer = typename LoaderDomain::Buffer;
- using BuiltinOperator = typename LoaderDomain::BuiltinOperator;
- using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat;
- using Model = typename LoaderDomain::Model;
- using Operator = typename LoaderDomain::Operator;
- using Padding = typename LoaderDomain::Padding;
- using Pool2DOptions = typename LoaderDomain::Pool2DOptions;
- using SubGraph = typename LoaderDomain::SubGraph;
- using Tensor = typename LoaderDomain::Tensor;
- using TensorType = typename LoaderDomain::TensorType;
- using DimensionType = typename LoaderDomain::DimensionType;
- using SparseIndexVector = typename LoaderDomain::SparseIndexVector;
-
-protected:
- bool isOptionalInputTensor(std::int32_t idx) { return idx == -1; }
- virtual bool allowOptionalInputTensor(BuiltinOperator) = 0;
-
-public:
- /**
- * @brief Construct a new Loader object
- *
- * @param graph reference on subgraphs
- */
- explicit BaseLoader(std::unique_ptr<ir::Subgraphs> &subgs)
- : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _subgraphs(subgs), _model{nullptr}
- {
- _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA);
- }
-
- /**
- * @brief Load a model from file
- *
- * @param file_path
- */
- void loadFromFile(const char *file_path);
- /**
- * @brief Load a model from a buffer
- *
- * @param buffer buffer pointer
- * @param size buffer size
- */
- void loadFromBuffer(uint8_t *buffer, size_t size);
-
-protected:
- ~BaseLoader() = default;
- void loadModel();
-
- // Helper functions
- ir::Activation convertActivation(ActivationFunctionType type);
- ir::DataType tensorTypeToDataType(TensorType type);
- ir::OperandIndex tensorIdxToOperandIdx(int32_t tensorIdx);
-
- // Create operands form tflite::Tensor
- ir::OperandIndex loadOperand(const Tensor *tensor, ir::Graph &subg);
- void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
- ir::OperandIndexSequence &outputs);
- // Create operations from Operator
- void loadOperation(const Operator *op, ir::Graph &subg);
- // Load Strides and Paddings from options to param
- template <typename Param, typename OptionsType>
- void loadStridesAndPaddings(Param &param, const OptionsType *options);
- // Load Pool2D param
- template <typename Param> void loadPool2DOptions(Param &param, const Pool2DOptions *options);
-
-private:
- virtual std::unique_ptr<ir::Graph> loadSubgraph(const SubGraph *subg) = 0;
- // Operations
- template <typename OpIR, typename... Args>
- const OpIR *loadOperationTo(const Operator *op, ir::Graph &subg, Args &&... args);
- void loadConv2D(const Operator *op, ir::Graph &subg);
- void loadDepthwiseConv2D(const Operator *op, ir::Graph &subg);
- void loadTransposeConv(const Operator *op, ir::Graph &subg);
- void loadPool2D(const Operator *op, ir::Graph &subg, ir::operation::Pool2D::PoolType op_type);
- void loadReshape(const Operator *op, ir::Graph &subg);
- void loadSoftmax(const Operator *op, ir::Graph &subg);
- void loadConcatenation(const Operator *op, ir::Graph &subg);
- void loadFC(const Operator *op, ir::Graph &subg);
- void loadBinaryArithmetic(const Operator *op, ir::Graph &subg,
- ir::operation::BinaryArithmetic::ArithmeticType op_type);
- void loadAddV2(const Operator *op, ir::Graph &subg);
- void loadPack(const Operator *op, ir::Graph &subg);
- void loadResizeBilinear(const Operator *op, ir::Graph &subg);
- void loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg);
- void loadReduce(const Operator *op, ir::Graph &subg,
- ir::operation::Reduce::ReduceType reduce_type);
- void loadReduceAll(const Operator *op, ir::Graph &subg);
- void loadElementwiseActivation(const Operator *op, ir::Graph &subg,
- ir::operation::ElementwiseActivation::Type op_type,
- float alpha = 0.f, float beta = 0.f);
- void loadElementwiseBinary(const Operator *op, ir::Graph &subg,
- ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type);
- void loadElementwiseUnary(const Operator *op, ir::Graph &subg,
- ir::operation::ElementwiseUnary::Type op_type);
- void loadGather(const Operator *op, ir::Graph &subg);
- void loadCustom(const Operator *op, ir::Graph &subg);
- void loadBatchMatMul(const Operator *op, ir::Graph &subg);
- void loadSqueeze(const Operator *op, ir::Graph &subg);
- void loadSplit(const Operator *op, ir::Graph &subg);
- void loadSplitV(const Operator *op, ir::Graph &subg);
- void loadStridedSlice(const Operator *op, ir::Graph &subg);
- void loadUnpack(const Operator *op, ir::Graph &subg);
- void loadComparison(const Operator *op, ir::Graph &subg);
- void loadEinsum(const Operator *op, ir::Graph &subg);
- void loadOneHot(const Operator *op, ir::Graph &subg);
- void loadIf(const Operator *op, ir::Graph &subg);
- void loadWhile(const Operator *op, ir::Graph &subg);
- void loadArgMax(const Operator *op, ir::Graph &subg);
- void loadFusedBatchNorm(const Operator *op, ir::Graph &subg);
- void loadLogSoftmax(const Operator *op, ir::Graph &subg);
- void loadSpaceToDepth(const Operator *op, ir::Graph &subg);
- void loadLeakyRelu(const Operator *op, ir::Graph &subg);
-
- void verifySubgraphIndex(int subg_index)
- {
- const auto num_subgraphs = _model->subgraphs()->size();
- if (subg_index < 0 || subg_index >= static_cast<int32_t>(num_subgraphs))
- throw std::runtime_error{std::string{"Invalid subgraph index - "} +
- std::to_string(subg_index)};
- }
-
-protected:
- // Base address for mapped region for loading (if needed)
- uint8_t *_base;
- // Memory page size
- int32_t _pagesize;
- // loaded file description
- int _fd;
- // Reference on loadable subgraphs
- std::unique_ptr<ir::Subgraphs> &_subgraphs;
- const Model *_model;
- // Maps Tensor indices to onert Operands.
- std::vector<ir::OperandIndex> _tensor_to_operand;
- std::unordered_map<ir::OperandIndex, std::string> _tensor_names;
- // Verifier
- std::unique_ptr<Verifier> _verifier;
- // Boolean flag to use MMAPED_DATA
- bool _use_mmaped_data = false;
-};
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::BaseLoader::loadFromFile(const char *file_path)
-{
- _fd = open(file_path, O_RDONLY);
- if (_fd < 0)
- {
- throw std::runtime_error("Failed to open file " + std::string(file_path));
- }
-
- struct stat file_stat;
- if (fstat(_fd, &file_stat) != 0)
- {
- throw std::runtime_error("Fstat failed or file " + std::string(file_path) +
- " is not a regular file");
- }
- int size = file_stat.st_size;
-
- // Map model file into memory region
- _base = static_cast<uint8_t *>(mmap(NULL, size, PROT_READ, MAP_PRIVATE, _fd, 0));
- if (_base == MAP_FAILED)
- {
- close(_fd);
- throw std::runtime_error("mmap failed - " + std::string(strerror(errno)));
- }
-
- _verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
-
- loadModel();
- munmap(_base, size);
-
- close(_fd);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::BaseLoader::loadFromBuffer(uint8_t *buffer, size_t size)
-{
- _base = buffer;
- _verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
- loadModel();
-}
-
-template <typename LoaderDomain>
-ir::Activation
-BaseLoader<LoaderDomain>::BaseLoader::convertActivation(const ActivationFunctionType type)
-{
- switch (type)
- {
- case ActivationFunctionType::ActivationFunctionType_NONE:
- return ir::Activation::NONE;
- case ActivationFunctionType::ActivationFunctionType_RELU:
- return ir::Activation::RELU;
- case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1:
- return ir::Activation::RELU1;
- case ActivationFunctionType::ActivationFunctionType_RELU6:
- return ir::Activation::RELU6;
- case ActivationFunctionType::ActivationFunctionType_TANH:
- return ir::Activation::TANH;
- default:
- throw std::runtime_error(std::string("Unsupported or invalid activation type: ") +
- std::to_string(static_cast<int>(type)));
- }
-}
-
-template <typename LoaderDomain>
-ir::DataType BaseLoader<LoaderDomain>::BaseLoader::tensorTypeToDataType(const TensorType type)
-{
- switch (type)
- {
- case TensorType::TensorType_FLOAT32:
- return ir::DataType::FLOAT32;
- case TensorType::TensorType_INT32:
- return ir::DataType::INT32;
- case TensorType::TensorType_BOOL:
- return ir::DataType::BOOL8;
- case TensorType::TensorType_UINT8:
- return ir::DataType::QUANT_UINT8_ASYMM;
- case TensorType::TensorType_INT8:
- return ir::DataType::QUANT_INT8_SYMM;
- case TensorType::TensorType_INT64:
- return ir::DataType::INT64;
- default:
- throw std::runtime_error(
- std::string("Unsupported tensor type: ").append(EnumNameTensorType(type)));
- }
-}
-
-template <typename LoaderDomain>
-ir::OperandIndex BaseLoader<LoaderDomain>::BaseLoader::tensorIdxToOperandIdx(int32_t tensorIdx)
-{
- return isOptionalInputTensor(tensorIdx) ? ir::OperandIndex() : _tensor_to_operand[tensorIdx];
-}
-
-/* Copy is copied from tensorflow lite */
-template <typename T> bool Copy(const T *data_ptr, std::vector<uint16_t> &arr)
-{
- if (data_ptr->values() == nullptr)
- {
- return false;
- }
-
- int size = data_ptr->values()->size();
- arr.reserve(size);
- for (int i = 0; i < size; i++)
- {
- arr.emplace_back(static_cast<uint16_t>(data_ptr->values()->Get(i)));
- }
- return true;
-}
-
-template <typename LoaderDomain>
-ir::OperandIndex BaseLoader<LoaderDomain>::loadOperand(const Tensor *tensor, ir::Graph &subg)
-{
- ir::Shape shape;
- // Shape
- const auto *tensor_shape = tensor->shape();
- if (tensor_shape != nullptr)
- {
- for (const auto &dim : *tensor_shape)
- {
- shape.append(dim);
- }
- }
-
- // Note for tensor->shape_signature()
- // We don't handle shape signature
- // How we handle:
- // If shape_signature[k] == -1, we will use tensor->shape()[k] == 1
- // If app wants to change the input shape, call nnfw_apply_input_tensorinfo() can
- // be used.
-
- // Type
- ir::DataType data_type = tensorTypeToDataType(tensor->type());
- // Quantization
- auto q_params = tensor->quantization();
- float scale = 0.0;
- long zero_point = 0;
- if (q_params != nullptr)
- {
- if (q_params->scale())
- {
- if (q_params->scale()->size() != 1)
- {
- throw std::runtime_error("Only 1 scale for a tensor is supported.");
- }
- scale = q_params->scale()->Get(0);
- }
-
- if (q_params->zero_point())
- {
- if (q_params->zero_point()->size() != 1)
- {
- throw std::runtime_error("Only 1 zero_point value for a tensor is supported.");
- }
- zero_point = q_params->zero_point()->Get(0);
- // zero_point is long while TypeInfo.zero_point is defined as int32_t.
- assert(zero_point >= std::numeric_limits<int32_t>::min());
- assert(zero_point <= std::numeric_limits<int32_t>::max());
- }
- auto details = q_params->details_as_CustomQuantization();
- if (details != nullptr)
- throw std::runtime_error("Custom Quantization is not supported");
- }
- // Create TypeInfo
- ir::TypeInfo type_info(data_type, scale, zero_point);
- // Sparsity
- auto src_sparsity = tensor->sparsity();
- if (src_sparsity != nullptr)
- {
- std::vector<uint16_t> w1_segments;
- std::vector<uint16_t> w1_indices;
- // check traversal_order
- if (src_sparsity->traversal_order())
- {
- const int traversal_order_size = src_sparsity->traversal_order()->size();
- for (int i = 0; i < traversal_order_size; ++i)
- {
- if (i != src_sparsity->traversal_order()->Get(i))
- throw std::runtime_error("traversal_order [0, 1, ..., n-1] is only supported.");
- }
- }
- // check block_map
- int block_rank = 0;
- if (src_sparsity->block_map())
- {
- block_rank = src_sparsity->block_map()->size();
- for (int i = 0; i < block_rank; ++i)
- {
- if (i != src_sparsity->block_map()->Get(i))
- throw std::runtime_error("block_map [0, 1, ..., n-1] is only supported.");
- }
- }
- // load metadata
- const int dim_metadata_size = src_sparsity->dim_metadata()->size();
- auto dense_rank = shape.rank();
- if (dense_rank + block_rank != dim_metadata_size)
- throw std::runtime_error("sparsity dim_metadata length is wrong.");
- bool random_sparsity = dim_metadata_size == 2 && block_rank == 0;
- bool block2D_sparsity = dim_metadata_size == 4 && block_rank == 2;
- if (dim_metadata_size != !random_sparsity && !block2D_sparsity)
- throw std::runtime_error(
- "sparsity is supported only for 2D tensor with random or 16x1 block sparsity.");
-
- const auto *src_metadata = src_sparsity->dim_metadata()->Get(0);
- if (src_metadata->format() != DimensionType::DimensionType_DENSE)
- throw std::runtime_error("sparse tensor dim[0] is not DENSE");
- src_metadata = src_sparsity->dim_metadata()->Get(1);
- if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR)
- throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR");
- auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() {
- if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr)
- return false;
- bool status = true;
- switch (src_metadata->array_segments_type())
- {
- case SparseIndexVector::SparseIndexVector_Int32Vector:
- status = Copy(src_metadata->array_segments_as_Int32Vector(), w1_segments);
- break;
- case SparseIndexVector::SparseIndexVector_Uint16Vector:
- status = Copy(src_metadata->array_segments_as_Uint16Vector(), w1_segments);
- break;
- case SparseIndexVector::SparseIndexVector_Uint8Vector:
- status = Copy(src_metadata->array_segments_as_Uint8Vector(), w1_segments);
- break;
- default:
- return false;
- }
- if (status != true)
- return false;
- switch (src_metadata->array_indices_type())
- {
- case SparseIndexVector::SparseIndexVector_Int32Vector:
- return Copy(src_metadata->array_indices_as_Int32Vector(), w1_indices);
- case SparseIndexVector::SparseIndexVector_Uint16Vector:
- return Copy(src_metadata->array_indices_as_Uint16Vector(), w1_indices);
- case SparseIndexVector::SparseIndexVector_Uint8Vector:
- return Copy(src_metadata->array_indices_as_Uint8Vector(), w1_indices);
- default:
- break;
- }
- return false;
- };
- if (ParseSparseIndexVector() == false)
- throw std::runtime_error("Error during parsing sparsity index information");
- // Get block size
- std::vector<int32_t> block_size;
- for (int i = 0; i < block_rank; ++i)
- {
- auto block_metadata = src_sparsity->dim_metadata()->Get(dense_rank + i);
- if (block_metadata->format() != DimensionType::DimensionType_DENSE)
- throw std::runtime_error("block dimension must be DENSE.");
- block_size.push_back(block_metadata->dense_size());
- }
- type_info.sparsity(std::make_shared<ir::Sparsity>(std::move(w1_segments), std::move(w1_indices),
- std::move(block_size)));
- }
- // Create operand
- const auto operand_index = subg.addOperand(shape, type_info);
-
- // Constant tensors are indicated by non-empty data.
- const auto *data = _model->buffers()->Get(tensor->buffer())->data();
- if (data != nullptr)
- {
- using std::ptrdiff_t;
- std::unique_ptr<ir::Data> data_obj;
- if (_fd == -1) // Model is from memory
- {
- data_obj = std::make_unique<ir::ExternalData>(data->data(), data->size());
- }
- else // Model is loaded(mmap'd) from a file
- {
- size_t data_size = data->size();
- ptrdiff_t unaligned_offset_start = data->data() - _base;
- ptrdiff_t offset_end = unaligned_offset_start + data_size;
-
- // Calculated aligned offset from base address of mapped region
- // munmap accepts memory address which is a multiple of the pagesize
- ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize;
- size_t mmap_size = offset_end - aligned_offset_start;
-
- if (_use_mmaped_data)
- {
- data_obj = std::make_unique<ir::MMapedData>(_fd, aligned_offset_start, mmap_size,
- unaligned_offset_start, data_size);
- }
- else
- {
- size_t offset = unaligned_offset_start - aligned_offset_start;
- uint8_t *mmap_base = static_cast<uint8_t *>(
- mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, _fd, aligned_offset_start));
- data_obj = std::make_unique<ir::CachedData>(mmap_base + offset, data_size);
- munmap(mmap_base, mmap_size);
- }
- }
- subg.setOperandValue(operand_index, std::move(data_obj));
- }
-
- _tensor_names.emplace(operand_index, tensor->name()->str());
-
- // Variablie
- if (tensor->is_variable())
- throw std::runtime_error("Variable tensor not supported!");
-
- return operand_index;
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
- ir::OperandIndexSequence &outputs)
-{
- for (const std::int32_t idx : *op->inputs())
- {
- // Optional tensors are not supported yet except for FULLY_CONNECTED and BCQ_FULLY_CONNECTED
- auto check_optional_input = [&]() {
- auto builtin_code = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
- if (isOptionalInputTensor(idx) && !allowOptionalInputTensor(builtin_code))
- throw std::runtime_error(
- std::string("loader doesn't support optional input tensor yet for ")
- .append(EnumNameBuiltinOperator(builtin_code)));
- };
- check_optional_input();
- inputs.append(tensorIdxToOperandIdx(idx));
- }
-
- for (const std::int32_t idx : *op->outputs())
- {
- outputs.append(tensorIdxToOperandIdx(idx));
- }
-}
-
-template <typename LoaderDomain>
-template <typename Param, typename OptionsType>
-void BaseLoader<LoaderDomain>::loadStridesAndPaddings(Param &param, const OptionsType *options)
-{
- // Strides
- param.stride.vertical = options->stride_h();
- param.stride.horizontal = options->stride_w();
- // Paddings
- switch (options->padding())
- {
- case Padding::Padding_SAME:
- param.padding.type = ir::PaddingType::SAME;
- break;
- case Padding::Padding_VALID:
- param.padding.type = ir::PaddingType::VALID;
- break;
- default:
- throw std::runtime_error{"Invalid padding type"};
- }
- // param paddings indexes unused
-}
-
-template <typename LoaderDomain>
-template <typename Param>
-void BaseLoader<LoaderDomain>::loadPool2DOptions(Param &param, const Pool2DOptions *options)
-{
- // Strides and Paddings
- if (options->stride_h() <= 0 || options->stride_w() <= 0)
- throw std::runtime_error{"Invalid stride vertical or horizontal - both must be bigger than 0"};
- loadStridesAndPaddings(param, options);
- // Filter width and height
- // Strides
- if (options->filter_width() <= 0 || options->filter_height() <= 0)
- throw std::runtime_error{"Invalid filter width or height - both must be bigger than 0"};
- param.kw = options->filter_width();
- param.kh = options->filter_height();
- // Activation
- param.activation = convertActivation(options->fused_activation_function());
-}
-
-template <typename LoaderDomain>
-template <typename OpIR, typename... Args>
-const OpIR *BaseLoader<LoaderDomain>::loadOperationTo(const Operator *op, ir::Graph &subg,
- Args &&... args)
-{
- static_assert(sizeof...(args) <= 1, "You can't have more than 1 arguments!");
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<OpIR> new_op(new OpIR(inputs, outputs, std::forward<Args>(args)...));
- auto ret = new_op.get();
- subg.addOperation(std::move(new_op));
-
- return ret;
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadConv2D(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Conv2D::Param param;
- const auto *options = op->builtin_options_as_Conv2DOptions();
- param.activation = convertActivation(options->fused_activation_function());
- loadStridesAndPaddings(param, options);
- param.dilation.width_factor = options->dilation_w_factor();
- param.dilation.height_factor = options->dilation_h_factor();
-
- loadOperationTo<ir::operation::Conv2D>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadDepthwiseConv2D(const Operator *op, ir::Graph &subg)
-{
- ir::operation::DepthwiseConv2D::Param param;
- const auto *options = op->builtin_options_as_DepthwiseConv2DOptions();
- param.activation = convertActivation(options->fused_activation_function());
- loadStridesAndPaddings(param, options);
- param.multiplier = options->depth_multiplier();
- // Dilation h/w factor unused
-
- loadOperationTo<ir::operation::DepthwiseConv2D>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadTransposeConv(const Operator *op, ir::Graph &subg)
-{
- ir::operation::TransposeConv::Param param;
- const auto *options = op->builtin_options_as_TransposeConvOptions();
- loadStridesAndPaddings(param, options);
-
- loadOperationTo<ir::operation::TransposeConv>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadPool2D(const Operator *op, ir::Graph &subg,
- ir::operation::Pool2D::PoolType op_type)
-{
- ir::operation::Pool2D::Param param;
- param.op_type = op_type;
- const auto *options = op->builtin_options_as_Pool2DOptions();
-
- loadPool2DOptions(param, options);
-
- loadOperationTo<ir::operation::Pool2D>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadReshape(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Reshape::Param param{};
- const auto *options = op->builtin_options_as_ReshapeOptions();
- if (options != nullptr)
- {
- const auto *new_shape = options->new_shape();
- if (new_shape)
- {
- for (uint i = 0; i < new_shape->size(); ++i)
- {
- param.new_shape.push_back(new_shape->Get(i));
- }
- }
- }
-
- loadOperationTo<ir::operation::Reshape>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadSoftmax(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Softmax::Param param;
- const auto *options = op->builtin_options_as_SoftmaxOptions();
- // Beta
- param.beta = options->beta();
-
- loadOperationTo<ir::operation::Softmax>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadConcatenation(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Concat::Param param;
- const auto *options = op->builtin_options_as_ConcatenationOptions();
- // Axis
- param.axis = options->axis();
- // activation unused
-
- loadOperationTo<ir::operation::Concat>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadFC(const Operator *op, ir::Graph &subg)
-{
- ir::operation::FullyConnected::Param param;
- const auto *options = op->builtin_options_as_FullyConnectedOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
- // weights_format unused
-
- const auto fc = loadOperationTo<ir::operation::FullyConnected>(op, subg, param);
-
- const auto &input_operand =
- subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::INPUT));
- auto &weights_operand =
- subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::WEIGHT));
- if (input_operand.typeInfo().type() == ir::DataType::FLOAT32 &&
- weights_operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
- {
- weights_operand.type(ir::DataType::QUANT_INT8_SYMM);
- }
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadAddV2(const Operator *op, ir::Graph &subg)
-{
- ir::operation::BinaryArithmetic::Param param;
- param.arithmetic_type = ir::operation::BinaryArithmetic::ArithmeticType::ADD;
-
- if (op->custom_options() == nullptr)
- {
- param.activation = ir::Activation::NONE;
- }
- else
- {
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = op->custom_options()->Data();
- auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
- auto attr_map = data_root.AsMap();
- const auto fused_activation_func = static_cast<typename LoaderDomain::ActivationFunctionType>(
- attr_map["fused_activation_function"].AsInt8());
- param.activation = convertActivation(fused_activation_func);
- }
-
- loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadBinaryArithmetic(
- const Operator *op, ir::Graph &subg, ir::operation::BinaryArithmetic::ArithmeticType op_type)
-{
- ir::operation::BinaryArithmetic::Param param;
- param.arithmetic_type = op_type;
- switch (op_type)
- {
- case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
- {
- const auto *add_options = op->builtin_options_as_AddOptions();
- param.activation = convertActivation(add_options->fused_activation_function());
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
- {
- const auto *sub_options = op->builtin_options_as_SubOptions();
- param.activation = convertActivation(sub_options->fused_activation_function());
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
- {
- const auto *mul_options = op->builtin_options_as_MulOptions();
- param.activation = convertActivation(mul_options->fused_activation_function());
- break;
- }
- case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
- {
- const auto *div_options = op->builtin_options_as_DivOptions();
- param.activation = convertActivation(div_options->fused_activation_function());
- break;
- }
- default:
- assert(false &&
- "The function 'loadBinaryArithmetic' supports only BinaryArithmetic operations");
- break;
- }
-
- loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadPack(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Pack::Param param;
- const auto *options = op->builtin_options_as_PackOptions();
- param.num = options->values_count();
- param.axis = options->axis();
-
- loadOperationTo<ir::operation::Pack>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadElementwiseActivation(
- const Operator *op, ir::Graph &subg, ir::operation::ElementwiseActivation::Type op_type,
- float alpha, float beta)
-{
- ir::operation::ElementwiseActivation::Param param;
- param.op_type = op_type;
- param.alpha = alpha;
- param.beta = beta;
-
- loadOperationTo<ir::operation::ElementwiseActivation>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadResizeBilinear(const Operator *op, ir::Graph &subg)
-{
- ir::operation::ResizeBilinear::Param param;
- param.align_corners = op->builtin_options_as_ResizeBilinearOptions()->align_corners();
- param.half_pixel_centers = op->builtin_options_as_ResizeBilinearOptions()->half_pixel_centers();
-
- loadOperationTo<ir::operation::ResizeBilinear>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg)
-{
- ir::operation::ResizeNearestNeighbor::Param param;
- param.align_corners = op->builtin_options_as_ResizeNearestNeighborOptions()->align_corners();
-
- loadOperationTo<ir::operation::ResizeNearestNeighbor>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadReduce(const Operator *op, ir::Graph &subg,
- ir::operation::Reduce::ReduceType reduce_type)
-{
- ir::operation::Reduce::Param param;
- param.reduce_type = reduce_type;
- param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
-
- loadOperationTo<ir::operation::Reduce>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadReduceAll(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Reduce::Param param;
- param.reduce_type = ir::operation::Reduce::ReduceType::ALL;
- if (op->custom_options() == nullptr)
- {
- param.keep_dims = false;
- }
- else
- {
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = op->custom_options()->Data();
- auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
- auto attr_map = data_root.AsMap();
- param.keep_dims = attr_map["keep_dims"].AsBool();
- }
-
- loadOperationTo<ir::operation::Reduce>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadElementwiseBinary(
- const Operator *op, ir::Graph &subg,
- ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
-{
- ir::operation::ElementwiseBinary::Param param;
- param.op_type = op_type;
-
- loadOperationTo<ir::operation::ElementwiseBinary>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadElementwiseUnary(const Operator *op, ir::Graph &subg,
- ir::operation::ElementwiseUnary::Type op_type)
-{
- ir::operation::ElementwiseUnary::Param param;
- param.op_type = op_type;
-
- const auto eu = loadOperationTo<ir::operation::ElementwiseUnary>(op, subg, param);
- if (op_type == ir::operation::ElementwiseUnary::Type::CAST)
- {
- auto qasymm8ToUint8 = [](ir::Operand &operand) {
- if (operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
- {
- operand.type(ir::DataType::UINT8);
- }
- };
- qasymm8ToUint8(
- subg.operands().at(eu->getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)));
- qasymm8ToUint8(subg.operands().at(eu->getOutputs().at(0)));
- }
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadGather(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Gather::Param param;
- param.axis = op->builtin_options_as_GatherOptions()->axis();
-
- loadOperationTo<ir::operation::Gather>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadBatchMatMul(const Operator *op, ir::Graph &subg)
-{
- ir::operation::BatchMatMul::Param param;
-
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
- param.adj_x = op->builtin_options_as_BatchMatMulOptions()->adjoint_lhs();
- param.adj_y = op->builtin_options_as_BatchMatMulOptions()->adjoint_rhs();
- break;
- case BuiltinOperator::BuiltinOperator_CUSTOM:
- if (op->custom_options() == nullptr)
- {
- param.adj_x = false;
- param.adj_y = false;
- }
- else
- {
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = op->custom_options()->Data();
- auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
- auto attr_map = data_root.AsMap();
- param.adj_x = attr_map["adj_x"].AsBool();
- param.adj_y = attr_map["adj_y"].AsBool();
- }
- break;
- default:
- throw std::runtime_error(
- std::string("Wrong loaded operation: ").append(EnumNameBuiltinOperator(builtin_op)) +
- " as " + EnumNameBuiltinOperator(BuiltinOperator::BuiltinOperator_BATCH_MATMUL));
- }
-
- loadOperationTo<ir::operation::BatchMatMul>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadSpaceToDepth(const Operator *op, ir::Graph &subg)
-{
- ir::operation::SpaceToDepth::Param param;
- const auto *options = op->builtin_options_as_SpaceToDepthOptions();
- param.block_size = options->block_size();
-
- loadOperationTo<ir::operation::SpaceToDepth>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadCustom(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS &&
- "Unsupported custom operation options format");
-
- auto *op_code = _model->operator_codes()->Get(op->opcode_index());
- auto custom_op_name = op_code->custom_code()->str();
-
- enum class BuiltinOP
- {
- AddV2,
- ReduceAll,
- MatrixBandPart,
- BatchMatMul,
- Einsum,
- BroadcastTo,
- FusedBatchNorm,
- StatelessRandomUniform,
- Erf
- };
-
- // Mapping from custom op name string to BuiltinOP enum
- std::map<std::string, BuiltinOP> builtin_map = {
- {"AddV2", BuiltinOP::AddV2},
- {"All", BuiltinOP::ReduceAll},
- {"MatrixBandPart", BuiltinOP::MatrixBandPart},
- {"BatchMatMulV2", BuiltinOP::BatchMatMul},
- {"Einsum", BuiltinOP::Einsum},
- {"FusedBatchNormV3", BuiltinOP::FusedBatchNorm},
- {"BroadcastTo", BuiltinOP::BroadcastTo},
- {"StatelessRandomUniform", BuiltinOP::StatelessRandomUniform},
- {"Erf", BuiltinOP::Erf},
- };
-
- try
- {
- // Throw out_of_range if it is unknown custom op
- auto custom_op_id = builtin_map.at(custom_op_name);
- switch (custom_op_id)
- {
- case BuiltinOP::AddV2:
- loadAddV2(op, subg);
- break;
- case BuiltinOP::ReduceAll:
- loadReduceAll(op, subg);
- break;
- case BuiltinOP::MatrixBandPart:
- loadOperationTo<ir::operation::MatrixBandPart>(op, subg);
- break;
- case BuiltinOP::BatchMatMul:
- loadBatchMatMul(op, subg);
- break;
- case BuiltinOP::Einsum:
- loadEinsum(op, subg);
- break;
- case BuiltinOP::BroadcastTo:
- loadOperationTo<ir::operation::BroadcastTo>(op, subg);
- break;
- case BuiltinOP::FusedBatchNorm:
- loadFusedBatchNorm(op, subg);
- break;
- case BuiltinOP::StatelessRandomUniform:
- loadOperationTo<ir::operation::StatelessRandomUniform>(op, subg);
- break;
- case BuiltinOP::Erf:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ERF);
- break;
- default:
- throw std::runtime_error{
- "Loader: Custom OP map is defined but operation loader function is not defined"};
- }
-
- return;
- }
- catch (...)
- {
- loadOperationIO(op, inputs, outputs);
-
- auto constraint = ir::OperandConstraint::createExact(inputs.size());
-
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = new char[custom_op_data_size];
- std::copy(op->custom_options()->begin(), op->custom_options()->end(), custom_op_data);
-
- ir::operation::Custom::Userdata userdata{};
- userdata.data = custom_op_data;
- userdata.size = custom_op_data_size;
-
- auto new_op = std::make_unique<ir::operation::Custom>(constraint, inputs, outputs,
- custom_op_name, userdata);
-
- subg.addOperation(std::move(new_op));
- }
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadSqueeze(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Squeeze::Param param;
- const auto *options = op->builtin_options_as_SqueezeOptions();
- const auto *dims = options->squeeze_dims();
- if (dims)
- {
- if (dims->size() > sizeof(param.dims) / sizeof(param.dims[0]))
- throw std::runtime_error("Squeeze: 'param.ndims' is out of range.");
- param.ndim = dims->size();
- for (int i = 0; i < param.ndim; ++i)
- param.dims[i] = dims->Get(i);
- }
-
- loadOperationTo<ir::operation::Squeeze>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadSplit(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Split::Param param;
- const auto *options = op->builtin_options_as_SplitOptions();
- param.num_splits = options->num_splits();
-
- loadOperationTo<ir::operation::Split>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadSplitV(const Operator *op, ir::Graph &subg)
-{
- ir::operation::SplitV::Param param;
- const auto *options = op->builtin_options_as_SplitVOptions();
- param.num_splits = options->num_splits();
-
- loadOperationTo<ir::operation::SplitV>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadStridedSlice(const Operator *op, ir::Graph &subg)
-{
- ir::operation::StridedSlice::Param param;
- const auto *options = op->builtin_options_as_StridedSliceOptions();
- param.begin_mask = options->begin_mask();
- param.end_mask = options->end_mask();
- param.shrink_axis_mask = options->shrink_axis_mask();
-
- loadOperationTo<ir::operation::StridedSlice>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadUnpack(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Unpack::Param param;
- const auto *options = op->builtin_options_as_UnpackOptions();
- param.num = options->num();
- param.axis = options->axis();
-
- loadOperationTo<ir::operation::Unpack>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadComparison(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Comparison::Param param;
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case BuiltinOperator::BuiltinOperator_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Equal;
- break;
- case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::NotEqual;
- break;
- case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::GreaterEqual;
- break;
- case BuiltinOperator::BuiltinOperator_GREATER:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Greater;
- break;
- case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::LessEqual;
- break;
- case BuiltinOperator::BuiltinOperator_LESS:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Less;
- break;
- default:
- throw std::runtime_error(
- std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
- }
-
- loadOperationTo<ir::operation::Comparison>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadEinsum(const Operator *op, ir::Graph &subg)
-{
- ir::operation::Einsum::Param param;
- if (op->custom_options() == nullptr)
- {
- throw std::runtime_error{"Einsum: empty equation"};
- }
- else
- {
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = op->custom_options()->Data();
- auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
- auto attr_map = data_root.AsMap();
- param.equation = attr_map["equation"].ToString();
- }
-
- const auto es = loadOperationTo<ir::operation::Einsum>(op, subg, param);
- if (es->getInputs().size() != 2)
- {
- throw std::runtime_error{"Einsum: NYI input - only support two inputs"};
- }
-}
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadFusedBatchNorm(const Operator *op, ir::Graph &subg)
-{
- ir::operation::FusedBatchNorm::Param param;
- if (op->custom_options() == nullptr)
- {
- throw std::runtime_error{"FusedBatchNorm: empty option"};
- }
- else
- {
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = op->custom_options()->Data();
- auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
- auto attr_map = data_root.AsMap();
- param.is_training = attr_map["is_training"].AsBool();
- param.epsilon = attr_map["epsilon"].AsFloat();
- param.data_format = attr_map["data_format"].ToString();
- }
-
- const auto fbn = loadOperationTo<ir::operation::FusedBatchNorm>(op, subg, param);
-
- if (fbn->getInputs().size() != 5)
- {
- throw std::runtime_error{"FusedBatchNorm: NYI input - only support five inputs"};
- }
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadOneHot(const Operator *op, ir::Graph &subg)
-{
- if (op->inputs()->size() != 4 || op->outputs()->size() != 1)
- throw std::runtime_error("OneHot Op has wrong number of input or output tensors.");
-
- // Set parameter
- ir::operation::OneHot::Param param;
- param.axis = op->builtin_options_as_OneHotOptions()->axis();
-
- loadOperationTo<ir::operation::OneHot>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadIf(const Operator *op, ir::Graph &subg)
-{
- const auto *options = op->builtin_options_as_IfOptions();
- const int32_t then_index = options->then_subgraph_index();
- const int32_t else_index = options->else_subgraph_index();
-
- verifySubgraphIndex(then_index);
- verifySubgraphIndex(else_index);
-
- ir::operation::If::Param param;
- param.then_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(then_index)};
- param.else_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(else_index)};
-
- loadOperationTo<ir::operation::If>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadWhile(const Operator *op, ir::Graph &subg)
-{
- const auto *options = op->builtin_options_as_WhileOptions();
- const int32_t cond_index = options->cond_subgraph_index();
- const int32_t body_index = options->body_subgraph_index();
-
- verifySubgraphIndex(cond_index);
- verifySubgraphIndex(body_index);
-
- ir::operation::While::Param param;
- param.cond_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(cond_index)};
- param.body_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(body_index)};
-
- loadOperationTo<ir::operation::While>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadArgMax(const Operator *op, ir::Graph &subg)
-{
- ir::operation::ArgMax::Param param;
- const auto output_type = op->builtin_options_as_ArgMaxOptions()->output_type();
- switch (output_type)
- {
- case TensorType::TensorType_INT32:
- case TensorType::TensorType_INT64:
- param.output_type = tensorTypeToDataType(output_type);
- break;
- default:
- throw std::runtime_error("ArgMax: `output_type` must be either int32 or int64.");
- }
- auto am = loadOperationTo<ir::operation::ArgMax>(op, subg, param);
-
- auto &axisOperand = subg.operands().at(am->getInputs().at(ir::operation::ArgMax::Input::AXIS));
- if (!(axisOperand.operandSize() == 4 && (axisOperand.typeInfo().type() == ir::DataType::INT32 ||
- axisOperand.typeInfo().type() == ir::DataType::INT64)))
- throw std::runtime_error("ArgMax: `axis` with an int32 or int64 element is only supported.");
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadLogSoftmax(const Operator *op, ir::Graph &subg)
-{
- ir::operation::LogSoftmax::Param param;
- // In tflite, beta is fixed to 1.0 and axis is fixed to -1.
- param.beta = 1.0f;
- param.axis = -1;
-
- loadOperationTo<ir::operation::LogSoftmax>(op, subg, param);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadLeakyRelu(const Operator *op, ir::Graph &subg)
-{
- float alpha = op->builtin_options_as_LeakyReluOptions()->alpha();
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LEAKY_RELU, alpha,
- 1.f);
-}
-
-template <typename LoaderDomain>
-void BaseLoader<LoaderDomain>::loadOperation(const Operator *op, ir::Graph &subg)
-{
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case BuiltinOperator::BuiltinOperator_CONV_2D:
- loadConv2D(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
- loadPool2D(op, subg, ir::operation::Pool2D::PoolType::AVG);
- return;
- case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
- loadDepthwiseConv2D(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV:
- loadTransposeConv(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RESHAPE:
- loadReshape(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SOFTMAX:
- loadSoftmax(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
- loadPool2D(op, subg, ir::operation::Pool2D::PoolType::MAX);
- return;
- case BuiltinOperator::BuiltinOperator_CONCATENATION:
- loadConcatenation(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
- loadFC(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_ADD:
- loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::ADD);
- return;
- case BuiltinOperator::BuiltinOperator_SUB:
- loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::SUB);
- return;
- case BuiltinOperator::BuiltinOperator_MUL:
- loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::MUL);
- return;
- case BuiltinOperator::BuiltinOperator_DIV:
- loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::DIV);
- return;
- case BuiltinOperator::BuiltinOperator_PACK:
- loadPack(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RELU:
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU,
- ir::operation::ElementwiseActivation::infinity, 0.f);
- return;
- case BuiltinOperator::BuiltinOperator_RELU_N1_TO_1:
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 1.f,
- -1.f);
- return;
- case BuiltinOperator::BuiltinOperator_RELU6:
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 6.f,
- 0.f);
- return;
- case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
- loadResizeBilinear(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
- loadResizeNearestNeighbor(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RSQRT:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
- return;
- case BuiltinOperator::BuiltinOperator_SELECT:
- case BuiltinOperator::BuiltinOperator_SELECT_V2:
- loadOperationTo<ir::operation::Select>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SQRT:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
- return;
- case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
- loadOperationTo<ir::operation::SquaredDifference>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_TANH:
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
- 1.f);
- return;
- case BuiltinOperator::BuiltinOperator_TRANSPOSE:
- loadOperationTo<ir::operation::Transpose>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_MEAN:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
- return;
- case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
- return;
- case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
- return;
- case BuiltinOperator::BuiltinOperator_REVERSE_V2:
- loadOperationTo<ir::operation::Reverse>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_PAD:
- case BuiltinOperator::BuiltinOperator_PADV2:
- loadOperationTo<ir::operation::Pad>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_LOGISTIC:
- loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
- return;
- case BuiltinOperator::BuiltinOperator_EXP:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
- return;
- case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
- loadOperationTo<ir::operation::ExpandDims>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_GATHER:
- loadGather(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
- loadOperationTo<ir::operation::SpaceToBatchND>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
- loadOperationTo<ir::operation::BatchToSpaceND>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SUM:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
- return;
- case BuiltinOperator::BuiltinOperator_CUSTOM:
- loadCustom(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SQUEEZE:
- loadSqueeze(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_PRELU:
- loadOperationTo<ir::operation::PReLU>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SPLIT:
- loadSplit(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SPLIT_V:
- loadSplitV(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_SLICE:
- loadOperationTo<ir::operation::Slice>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
- loadStridedSlice(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_UNPACK:
- loadUnpack(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_MINIMUM:
- loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
- return;
- case BuiltinOperator::BuiltinOperator_MAXIMUM:
- loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
- return;
- case BuiltinOperator::BuiltinOperator_CAST:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
- return;
- case BuiltinOperator::BuiltinOperator_EQUAL:
- case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
- case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
- case BuiltinOperator::BuiltinOperator_GREATER:
- case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
- case BuiltinOperator::BuiltinOperator_LESS:
- loadComparison(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_ONE_HOT:
- loadOneHot(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_ABS:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ABS);
- return;
- case BuiltinOperator::BuiltinOperator_COS:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::COS);
- return;
- case BuiltinOperator::BuiltinOperator_SIN:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
- return;
- case BuiltinOperator::BuiltinOperator_SHAPE:
- loadOperationTo<ir::operation::Shape>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
- return;
- case BuiltinOperator::BuiltinOperator_IF:
- loadIf(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_WHILE:
- loadWhile(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_NEG:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::NEG);
- return;
- case BuiltinOperator::BuiltinOperator_ARG_MAX:
- loadArgMax(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_LOG:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOG);
- return;
- case BuiltinOperator::BuiltinOperator_ROUND:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
- return;
- case BuiltinOperator::BuiltinOperator_POW:
- loadOperationTo<ir::operation::Pow>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
- return;
- case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
- loadElementwiseBinary(op, subg,
- ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
- return;
- case BuiltinOperator::BuiltinOperator_FILL:
- loadOperationTo<ir::operation::Fill>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
- return;
- case BuiltinOperator::BuiltinOperator_TILE:
- loadOperationTo<ir::operation::Tile>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RANGE:
- loadOperationTo<ir::operation::Range>(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
- loadBatchMatMul(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_LOG_SOFTMAX:
- loadLogSoftmax(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_QUANTIZE:
- loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::QUANTIZE);
- return;
- case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH:
- loadSpaceToDepth(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
- loadOperationTo<ir::operation::L2Normalization>(op, subg);
- break;
- case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
- loadLeakyRelu(op, subg);
- return;
- case BuiltinOperator::BuiltinOperator_RANK:
- loadOperationTo<ir::operation::Rank>(op, subg);
- return;
- default:
- throw std::runtime_error(
- std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
- }
-}
-
-template <typename LoaderDomain> void BaseLoader<LoaderDomain>::loadModel()
-{
- LoaderDomain::VerifyModelBuffer(*_verifier.get());
- _model = LoaderDomain::GetModel(_base);
- // Version unused
- // const auto version = _model->version();
- // Description unused
- // const auto *description = _model->description();
- // Metabuffer unsued
- // const auto *metadata_buffer = _model->metadata_buffer();
- // Load subgraphs and map operations on subgraph
- const auto domain_subgraphs = _model->subgraphs();
- auto subgraphs = std::make_unique<ir::Subgraphs>();
- for (uint32_t subgraph_index = 0; subgraph_index < domain_subgraphs->size(); ++subgraph_index)
- {
- auto subg = loadSubgraph((*_model->subgraphs())[subgraph_index]);
- subgraphs->push(ir::SubgraphIndex{subgraph_index}, std::move(subg));
- }
- _subgraphs = std::move(subgraphs);
-}
-
-} // namespace base_loader
-} // namespace onert
-
-#endif //__BASE_LOADER_BASE_LOADER_H__
diff --git a/runtime/onert/frontend/circle/CMakeLists.txt b/runtime/onert/frontend/circle/CMakeLists.txt
deleted file mode 100644
index 76dca9989..000000000
--- a/runtime/onert/frontend/circle/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-if (NOT BUILD_CIRCLE_LOADER)
- return()
-endif ()
-
-set(CIRCLE_LOADER_SOURCES src/circle_loader.cc)
-
-add_library(circle_loader SHARED ${CIRCLE_LOADER_SOURCES})
-
-target_include_directories(circle_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-
-target_link_libraries(circle_loader PRIVATE onert_core)
-target_link_libraries(circle_loader PRIVATE base_loader nnfw_common nnfw_coverage)
-target_link_libraries(circle_loader PRIVATE circle_schema)
-
-install(TARGETS circle_loader DESTINATION lib)
diff --git a/runtime/onert/frontend/circle/include/circle_loader.h b/runtime/onert/frontend/circle/include/circle_loader.h
deleted file mode 100644
index 675a5b3e7..000000000
--- a/runtime/onert/frontend/circle/include/circle_loader.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CIRCLE_CIRCLE_LOADER_H__
-#define __CIRCLE_CIRCLE_LOADER_H__
-
-#include "ir/Graph.h"
-
-#include <memory>
-
-namespace onert
-{
-namespace circle_loader
-{
-std::unique_ptr<ir::Subgraphs> loadModel(const char *filename);
-std::unique_ptr<ir::Subgraphs> loadModel(uint8_t *buffer, size_t size);
-} // namespace circle_loader
-} // namespace onert
-
-#endif // __CIRCLE_CIRCLE_LOADER_H__
diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc
deleted file mode 100644
index 4565ffc00..000000000
--- a/runtime/onert/frontend/circle/src/circle_loader.cc
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "circle_loader.h"
-#include "base_loader.h"
-#include "circle_schema_generated.h"
-
-namespace onert
-{
-namespace circle_loader
-{
-
-namespace
-{
-
-ir::Layout convertDataFormat(circle::DataFormat data_format)
-{
- switch (data_format)
- {
- case circle::DataFormat::DataFormat_CHANNELS_FIRST:
- return ir::Layout::NCHW;
- case circle::DataFormat::DataFormat_CHANNELS_LAST:
- return ir::Layout::NHWC;
- default:
- throw std::runtime_error("Unsupported DataFormat");
- }
-}
-
-struct LoaderDomain
-{
- using Verifier = flatbuffers::Verifier;
- using ActivationFunctionType = circle::ActivationFunctionType;
- using Buffer = circle::Buffer;
- using BuiltinOperator = circle::BuiltinOperator;
- using CustomOptionsFormat = circle::CustomOptionsFormat;
- using Model = circle::Model;
- using Operator = circle::Operator;
- using Padding = circle::Padding;
- using Pool2DOptions = circle::Pool2DOptions;
- using Tensor = circle::Tensor;
- using TensorType = circle::TensorType;
- using SubGraph = circle::SubGraph;
- using DimensionType = circle::DimensionType;
- using SparseIndexVector = circle::SparseIndexVector;
-
- static const char *EnumNameBuiltinOperator(BuiltinOperator e)
- {
- return circle::EnumNameBuiltinOperator(e);
- }
- static const char *EnumNameActivationFunctionType(ActivationFunctionType e)
- {
- return circle::EnumNameActivationFunctionType(e);
- }
- static const char *EnumNameTensorType(TensorType e) { return circle::EnumNameTensorType(e); }
- static const Model *GetModel(const void *buf) { return circle::GetModel(buf); }
- static bool VerifyModelBuffer(Verifier &verifier) { return circle::VerifyModelBuffer(verifier); }
-};
-
-class CircleLoader final : public base_loader::BaseLoader<LoaderDomain>
-{
-protected:
- void loadInstanceNorm(const Operator *op, ir::Graph &subg);
- void loadBCQFullyConnected(const Operator *op, ir::Graph &subg);
- void loadBCQGather(const Operator *op, ir::Graph &subg);
-
-public:
- using BaseLoader::BaseLoader;
-
- bool allowOptionalInputTensor(BuiltinOperator op) override
- {
- switch (op)
- {
- case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
- case BuiltinOperator::BuiltinOperator_BCQ_FULLY_CONNECTED:
- return true;
- default:
- return false;
- }
- }
-
-private:
- std::unique_ptr<ir::Graph> loadSubgraph(const circle::SubGraph *circle_subg) override
- {
- auto subg = std::make_unique<ir::Graph>();
- // Load tensors
- _tensor_to_operand.resize(circle_subg->tensors()->size());
- for (flatbuffers::uoffset_t i = 0; i < circle_subg->tensors()->size(); ++i)
- {
- _tensor_to_operand[i] = loadOperand(circle_subg->tensors()->Get(i), *subg);
- }
- // Set inputs
- for (const std::int32_t input_ind : *circle_subg->inputs())
- {
- subg->addInput(tensorIdxToOperandIdx(input_ind),
- _tensor_names.at(_tensor_to_operand[input_ind]));
- }
- // Set outputs
- for (const std::int32_t output_ind : *circle_subg->outputs())
- {
- subg->addOutput(tensorIdxToOperandIdx(output_ind),
- _tensor_names.at(_tensor_to_operand[output_ind]));
- }
- // Create operations
- for (const auto *op : *circle_subg->operators())
- {
- CircleLoader::loadOperation(op, *subg);
- }
-
- subg->setLayout(convertDataFormat(circle_subg->data_format()));
-
- subg->finishBuilding();
-
- return subg;
- }
-
- void loadOperation(const circle::Operator *op, ir::Graph &subg)
- {
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case circle::BuiltinOperator::BuiltinOperator_INSTANCE_NORM:
- loadInstanceNorm(op, subg);
- return;
- case circle::BuiltinOperator::BuiltinOperator_BCQ_FULLY_CONNECTED:
- loadBCQFullyConnected(op, subg);
- return;
- case circle::BuiltinOperator::BuiltinOperator_BCQ_GATHER:
- loadBCQGather(op, subg);
- return;
- default:
- BaseLoader::loadOperation(op, subg);
- return;
- }
- }
-};
-
-void CircleLoader::loadInstanceNorm(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::InstanceNorm::Param param;
- const auto *options = op->builtin_options_as_InstanceNormOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
- // Use default value 1e-5 if value of epsilon is zero
- param.epsilon = options->epsilon() == 0.f ? 1e-5 : options->epsilon();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::InstanceNorm(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-void CircleLoader::loadBCQGather(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::BCQGather::Param param;
- const auto *options = op->builtin_options_as_BCQGatherOptions();
- param.input_hidden_size = options->input_hidden_size();
- param.axis = options->axis();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::BCQGather(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-void CircleLoader::loadBCQFullyConnected(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::BCQFullyConnected::Param param;
- const auto *options = op->builtin_options_as_BCQFullyConnectedOptions();
- param.weights_hidden_size = options->weights_hidden_size();
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::BCQFullyConnected(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-} // namespace
-
-std::unique_ptr<ir::Subgraphs> loadModel(const char *filename)
-{
- auto subgraphs = std::make_unique<ir::Subgraphs>();
- CircleLoader loader(subgraphs);
- loader.loadFromFile(filename);
- return subgraphs;
-}
-
-std::unique_ptr<ir::Subgraphs> loadModel(uint8_t *buffer, size_t size)
-{
- auto subgraphs = std::make_unique<ir::Subgraphs>();
- CircleLoader loader(subgraphs);
- loader.loadFromBuffer(buffer, size);
- return subgraphs;
-}
-
-} // namespace circle_loader
-} // namespace onert
diff --git a/runtime/onert/frontend/circle_schema/CMakeLists.txt b/runtime/onert/frontend/circle_schema/CMakeLists.txt
deleted file mode 100644
index 208103f1c..000000000
--- a/runtime/onert/frontend/circle_schema/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-add_library(circle_schema INTERFACE)
-
-nnfw_find_package(FlatBuffers REQUIRED)
-
-target_link_libraries(circle_schema INTERFACE flatbuffers::flatbuffers)
-
-target_include_directories(circle_schema INTERFACE include)
diff --git a/runtime/onert/frontend/circle_schema/include/circle_schema_generated.h b/runtime/onert/frontend/circle_schema/include/circle_schema_generated.h
deleted file mode 100644
index 190c84d98..000000000
--- a/runtime/onert/frontend/circle_schema/include/circle_schema_generated.h
+++ /dev/null
@@ -1,10112 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd. All Rights Reserved
- * Copyright 2018 The TensorFlow Authors. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-// automatically generated by the FlatBuffers compiler, do not modify
-
-#ifndef FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
-#define FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace circle
-{
-
-struct CustomQuantization;
-
-struct QuantizationParameters;
-
-struct Int32Vector;
-
-struct Uint16Vector;
-
-struct Uint8Vector;
-
-struct DimensionMetadata;
-
-struct SparsityParameters;
-
-struct Tensor;
-
-struct Conv2DOptions;
-
-struct Pool2DOptions;
-
-struct DepthwiseConv2DOptions;
-
-struct ConcatEmbeddingsOptions;
-
-struct LSHProjectionOptions;
-
-struct SVDFOptions;
-
-struct RNNOptions;
-
-struct SequenceRNNOptions;
-
-struct BidirectionalSequenceRNNOptions;
-
-struct FullyConnectedOptions;
-
-struct SoftmaxOptions;
-
-struct ConcatenationOptions;
-
-struct AddOptions;
-
-struct MulOptions;
-
-struct L2NormOptions;
-
-struct LocalResponseNormalizationOptions;
-
-struct LSTMOptions;
-
-struct UnidirectionalSequenceLSTMOptions;
-
-struct BidirectionalSequenceLSTMOptions;
-
-struct ResizeBilinearOptions;
-
-struct ResizeNearestNeighborOptions;
-
-struct CallOptions;
-
-struct PadOptions;
-
-struct PadV2Options;
-
-struct ReshapeOptions;
-
-struct SpaceToBatchNDOptions;
-
-struct BatchToSpaceNDOptions;
-
-struct SkipGramOptions;
-
-struct SpaceToDepthOptions;
-
-struct DepthToSpaceOptions;
-
-struct SubOptions;
-
-struct DivOptions;
-
-struct TopKV2Options;
-
-struct EmbeddingLookupSparseOptions;
-
-struct GatherOptions;
-
-struct TransposeOptions;
-
-struct ExpOptions;
-
-struct CosOptions;
-
-struct ReducerOptions;
-
-struct SqueezeOptions;
-
-struct SplitOptions;
-
-struct SplitVOptions;
-
-struct StridedSliceOptions;
-
-struct LogSoftmaxOptions;
-
-struct CastOptions;
-
-struct DequantizeOptions;
-
-struct MaximumMinimumOptions;
-
-struct TileOptions;
-
-struct ArgMaxOptions;
-
-struct ArgMinOptions;
-
-struct GreaterOptions;
-
-struct GreaterEqualOptions;
-
-struct LessOptions;
-
-struct LessEqualOptions;
-
-struct NegOptions;
-
-struct SelectOptions;
-
-struct SliceOptions;
-
-struct TransposeConvOptions;
-
-struct ExpandDimsOptions;
-
-struct SparseToDenseOptions;
-
-struct EqualOptions;
-
-struct NotEqualOptions;
-
-struct ShapeOptions;
-
-struct RankOptions;
-
-struct PowOptions;
-
-struct FakeQuantOptions;
-
-struct PackOptions;
-
-struct LogicalOrOptions;
-
-struct OneHotOptions;
-
-struct AbsOptions;
-
-struct HardSwishOptions;
-
-struct LogicalAndOptions;
-
-struct LogicalNotOptions;
-
-struct UnpackOptions;
-
-struct FloorDivOptions;
-
-struct SquareOptions;
-
-struct ZerosLikeOptions;
-
-struct FillOptions;
-
-struct FloorModOptions;
-
-struct RangeOptions;
-
-struct LeakyReluOptions;
-
-struct SquaredDifferenceOptions;
-
-struct MirrorPadOptions;
-
-struct UniqueOptions;
-
-struct ReverseV2Options;
-
-struct AddNOptions;
-
-struct GatherNdOptions;
-
-struct WhereOptions;
-
-struct ReverseSequenceOptions;
-
-struct MatrixDiagOptions;
-
-struct QuantizeOptions;
-
-struct MatrixSetDiagOptions;
-
-struct IfOptions;
-
-struct WhileOptions;
-
-struct NonMaxSuppressionV4Options;
-
-struct NonMaxSuppressionV5Options;
-
-struct ScatterNdOptions;
-
-struct SelectV2Options;
-
-struct DensifyOptions;
-
-struct SegmentSumOptions;
-
-struct BatchMatMulOptions;
-
-struct BCQGatherOptions;
-
-struct BCQFullyConnectedOptions;
-
-struct InstanceNormOptions;
-
-struct OperatorCode;
-
-struct Operator;
-
-struct SubGraph;
-
-struct Buffer;
-
-struct Metadata;
-
-struct Model;
-
-enum TensorType
-{
- TensorType_FLOAT32 = 0,
- TensorType_FLOAT16 = 1,
- TensorType_INT32 = 2,
- TensorType_UINT8 = 3,
- TensorType_INT64 = 4,
- TensorType_STRING = 5,
- TensorType_BOOL = 6,
- TensorType_INT16 = 7,
- TensorType_COMPLEX64 = 8,
- TensorType_INT8 = 9,
- TensorType_FLOAT64 = 10,
- TensorType_MIN = TensorType_FLOAT32,
- TensorType_MAX = TensorType_FLOAT64
-};
-
-inline const TensorType (&EnumValuesTensorType())[11]
-{
- static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32,
- TensorType_UINT8, TensorType_INT64, TensorType_STRING,
- TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64,
- TensorType_INT8, TensorType_FLOAT64};
- return values;
-}
-
-inline const char *const *EnumNamesTensorType()
-{
- static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8",
- "INT64", "STRING", "BOOL", "INT16",
- "COMPLEX64", "INT8", "FLOAT64", nullptr};
- return names;
-}
-
-inline const char *EnumNameTensorType(TensorType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesTensorType()[index];
-}
-
-enum QuantizationDetails
-{
- QuantizationDetails_NONE = 0,
- QuantizationDetails_CustomQuantization = 1,
- QuantizationDetails_MIN = QuantizationDetails_NONE,
- QuantizationDetails_MAX = QuantizationDetails_CustomQuantization
-};
-
-inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2]
-{
- static const QuantizationDetails values[] = {QuantizationDetails_NONE,
- QuantizationDetails_CustomQuantization};
- return values;
-}
-
-inline const char *const *EnumNamesQuantizationDetails()
-{
- static const char *const names[] = {"NONE", "CustomQuantization", nullptr};
- return names;
-}
-
-inline const char *EnumNameQuantizationDetails(QuantizationDetails e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesQuantizationDetails()[index];
-}
-
-template <typename T> struct QuantizationDetailsTraits
-{
- static const QuantizationDetails enum_value = QuantizationDetails_NONE;
-};
-
-template <> struct QuantizationDetailsTraits<CustomQuantization>
-{
- static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization;
-};
-
-bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type);
-bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum DimensionType
-{
- DimensionType_DENSE = 0,
- DimensionType_SPARSE_CSR = 1,
- DimensionType_MIN = DimensionType_DENSE,
- DimensionType_MAX = DimensionType_SPARSE_CSR
-};
-
-inline const DimensionType (&EnumValuesDimensionType())[2]
-{
- static const DimensionType values[] = {DimensionType_DENSE, DimensionType_SPARSE_CSR};
- return values;
-}
-
-inline const char *const *EnumNamesDimensionType()
-{
- static const char *const names[] = {"DENSE", "SPARSE_CSR", nullptr};
- return names;
-}
-
-inline const char *EnumNameDimensionType(DimensionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesDimensionType()[index];
-}
-
-enum SparseIndexVector
-{
- SparseIndexVector_NONE = 0,
- SparseIndexVector_Int32Vector = 1,
- SparseIndexVector_Uint16Vector = 2,
- SparseIndexVector_Uint8Vector = 3,
- SparseIndexVector_MIN = SparseIndexVector_NONE,
- SparseIndexVector_MAX = SparseIndexVector_Uint8Vector
-};
-
-inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4]
-{
- static const SparseIndexVector values[] = {SparseIndexVector_NONE, SparseIndexVector_Int32Vector,
- SparseIndexVector_Uint16Vector,
- SparseIndexVector_Uint8Vector};
- return values;
-}
-
-inline const char *const *EnumNamesSparseIndexVector()
-{
- static const char *const names[] = {"NONE", "Int32Vector", "Uint16Vector", "Uint8Vector",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameSparseIndexVector(SparseIndexVector e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesSparseIndexVector()[index];
-}
-
-template <typename T> struct SparseIndexVectorTraits
-{
- static const SparseIndexVector enum_value = SparseIndexVector_NONE;
-};
-
-template <> struct SparseIndexVectorTraits<Int32Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector;
-};
-
-template <> struct SparseIndexVectorTraits<Uint16Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector;
-};
-
-template <> struct SparseIndexVectorTraits<Uint8Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector;
-};
-
-bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
- SparseIndexVector type);
-bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum BuiltinOperator
-{
- BuiltinOperator_ADD = 0,
- BuiltinOperator_AVERAGE_POOL_2D = 1,
- BuiltinOperator_CONCATENATION = 2,
- BuiltinOperator_CONV_2D = 3,
- BuiltinOperator_DEPTHWISE_CONV_2D = 4,
- BuiltinOperator_DEPTH_TO_SPACE = 5,
- BuiltinOperator_DEQUANTIZE = 6,
- BuiltinOperator_EMBEDDING_LOOKUP = 7,
- BuiltinOperator_FLOOR = 8,
- BuiltinOperator_FULLY_CONNECTED = 9,
- BuiltinOperator_HASHTABLE_LOOKUP = 10,
- BuiltinOperator_L2_NORMALIZATION = 11,
- BuiltinOperator_L2_POOL_2D = 12,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
- BuiltinOperator_LOGISTIC = 14,
- BuiltinOperator_LSH_PROJECTION = 15,
- BuiltinOperator_LSTM = 16,
- BuiltinOperator_MAX_POOL_2D = 17,
- BuiltinOperator_MUL = 18,
- BuiltinOperator_RELU = 19,
- BuiltinOperator_RELU_N1_TO_1 = 20,
- BuiltinOperator_RELU6 = 21,
- BuiltinOperator_RESHAPE = 22,
- BuiltinOperator_RESIZE_BILINEAR = 23,
- BuiltinOperator_RNN = 24,
- BuiltinOperator_SOFTMAX = 25,
- BuiltinOperator_SPACE_TO_DEPTH = 26,
- BuiltinOperator_SVDF = 27,
- BuiltinOperator_TANH = 28,
- BuiltinOperator_CONCAT_EMBEDDINGS = 29,
- BuiltinOperator_SKIP_GRAM = 30,
- BuiltinOperator_CALL = 31,
- BuiltinOperator_CUSTOM = 32,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
- BuiltinOperator_PAD = 34,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- BuiltinOperator_GATHER = 36,
- BuiltinOperator_BATCH_TO_SPACE_ND = 37,
- BuiltinOperator_SPACE_TO_BATCH_ND = 38,
- BuiltinOperator_TRANSPOSE = 39,
- BuiltinOperator_MEAN = 40,
- BuiltinOperator_SUB = 41,
- BuiltinOperator_DIV = 42,
- BuiltinOperator_SQUEEZE = 43,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- BuiltinOperator_STRIDED_SLICE = 45,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
- BuiltinOperator_EXP = 47,
- BuiltinOperator_TOPK_V2 = 48,
- BuiltinOperator_SPLIT = 49,
- BuiltinOperator_LOG_SOFTMAX = 50,
- BuiltinOperator_DELEGATE = 51,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- BuiltinOperator_CAST = 53,
- BuiltinOperator_PRELU = 54,
- BuiltinOperator_MAXIMUM = 55,
- BuiltinOperator_ARG_MAX = 56,
- BuiltinOperator_MINIMUM = 57,
- BuiltinOperator_LESS = 58,
- BuiltinOperator_NEG = 59,
- BuiltinOperator_PADV2 = 60,
- BuiltinOperator_GREATER = 61,
- BuiltinOperator_GREATER_EQUAL = 62,
- BuiltinOperator_LESS_EQUAL = 63,
- BuiltinOperator_SELECT = 64,
- BuiltinOperator_SLICE = 65,
- BuiltinOperator_SIN = 66,
- BuiltinOperator_TRANSPOSE_CONV = 67,
- BuiltinOperator_SPARSE_TO_DENSE = 68,
- BuiltinOperator_TILE = 69,
- BuiltinOperator_EXPAND_DIMS = 70,
- BuiltinOperator_EQUAL = 71,
- BuiltinOperator_NOT_EQUAL = 72,
- BuiltinOperator_LOG = 73,
- BuiltinOperator_SUM = 74,
- BuiltinOperator_SQRT = 75,
- BuiltinOperator_RSQRT = 76,
- BuiltinOperator_SHAPE = 77,
- BuiltinOperator_POW = 78,
- BuiltinOperator_ARG_MIN = 79,
- BuiltinOperator_FAKE_QUANT = 80,
- BuiltinOperator_REDUCE_PROD = 81,
- BuiltinOperator_REDUCE_MAX = 82,
- BuiltinOperator_PACK = 83,
- BuiltinOperator_LOGICAL_OR = 84,
- BuiltinOperator_ONE_HOT = 85,
- BuiltinOperator_LOGICAL_AND = 86,
- BuiltinOperator_LOGICAL_NOT = 87,
- BuiltinOperator_UNPACK = 88,
- BuiltinOperator_REDUCE_MIN = 89,
- BuiltinOperator_FLOOR_DIV = 90,
- BuiltinOperator_REDUCE_ANY = 91,
- BuiltinOperator_SQUARE = 92,
- BuiltinOperator_ZEROS_LIKE = 93,
- BuiltinOperator_FILL = 94,
- BuiltinOperator_FLOOR_MOD = 95,
- BuiltinOperator_RANGE = 96,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
- BuiltinOperator_LEAKY_RELU = 98,
- BuiltinOperator_SQUARED_DIFFERENCE = 99,
- BuiltinOperator_MIRROR_PAD = 100,
- BuiltinOperator_ABS = 101,
- BuiltinOperator_SPLIT_V = 102,
- BuiltinOperator_UNIQUE = 103,
- BuiltinOperator_CEIL = 104,
- BuiltinOperator_REVERSE_V2 = 105,
- BuiltinOperator_ADD_N = 106,
- BuiltinOperator_GATHER_ND = 107,
- BuiltinOperator_COS = 108,
- BuiltinOperator_WHERE = 109,
- BuiltinOperator_RANK = 110,
- BuiltinOperator_ELU = 111,
- BuiltinOperator_REVERSE_SEQUENCE = 112,
- BuiltinOperator_MATRIX_DIAG = 113,
- BuiltinOperator_QUANTIZE = 114,
- BuiltinOperator_MATRIX_SET_DIAG = 115,
- BuiltinOperator_ROUND = 116,
- BuiltinOperator_HARD_SWISH = 117,
- BuiltinOperator_IF = 118,
- BuiltinOperator_WHILE = 119,
- BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120,
- BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121,
- BuiltinOperator_SCATTER_ND = 122,
- BuiltinOperator_SELECT_V2 = 123,
- BuiltinOperator_DENSIFY = 124,
- BuiltinOperator_SEGMENT_SUM = 125,
- BuiltinOperator_BATCH_MATMUL = 126,
- BuiltinOperator_BCQ_GATHER = 252,
- BuiltinOperator_BCQ_FULLY_CONNECTED = 253,
- BuiltinOperator_INSTANCE_NORM = 254,
- BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_INSTANCE_NORM
-};
-
-inline const BuiltinOperator (&EnumValuesBuiltinOperator())[130]
-{
- static const BuiltinOperator values[] = {BuiltinOperator_ADD,
- BuiltinOperator_AVERAGE_POOL_2D,
- BuiltinOperator_CONCATENATION,
- BuiltinOperator_CONV_2D,
- BuiltinOperator_DEPTHWISE_CONV_2D,
- BuiltinOperator_DEPTH_TO_SPACE,
- BuiltinOperator_DEQUANTIZE,
- BuiltinOperator_EMBEDDING_LOOKUP,
- BuiltinOperator_FLOOR,
- BuiltinOperator_FULLY_CONNECTED,
- BuiltinOperator_HASHTABLE_LOOKUP,
- BuiltinOperator_L2_NORMALIZATION,
- BuiltinOperator_L2_POOL_2D,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- BuiltinOperator_LOGISTIC,
- BuiltinOperator_LSH_PROJECTION,
- BuiltinOperator_LSTM,
- BuiltinOperator_MAX_POOL_2D,
- BuiltinOperator_MUL,
- BuiltinOperator_RELU,
- BuiltinOperator_RELU_N1_TO_1,
- BuiltinOperator_RELU6,
- BuiltinOperator_RESHAPE,
- BuiltinOperator_RESIZE_BILINEAR,
- BuiltinOperator_RNN,
- BuiltinOperator_SOFTMAX,
- BuiltinOperator_SPACE_TO_DEPTH,
- BuiltinOperator_SVDF,
- BuiltinOperator_TANH,
- BuiltinOperator_CONCAT_EMBEDDINGS,
- BuiltinOperator_SKIP_GRAM,
- BuiltinOperator_CALL,
- BuiltinOperator_CUSTOM,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
- BuiltinOperator_PAD,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_GATHER,
- BuiltinOperator_BATCH_TO_SPACE_ND,
- BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOperator_TRANSPOSE,
- BuiltinOperator_MEAN,
- BuiltinOperator_SUB,
- BuiltinOperator_DIV,
- BuiltinOperator_SQUEEZE,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_STRIDED_SLICE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_EXP,
- BuiltinOperator_TOPK_V2,
- BuiltinOperator_SPLIT,
- BuiltinOperator_LOG_SOFTMAX,
- BuiltinOperator_DELEGATE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_CAST,
- BuiltinOperator_PRELU,
- BuiltinOperator_MAXIMUM,
- BuiltinOperator_ARG_MAX,
- BuiltinOperator_MINIMUM,
- BuiltinOperator_LESS,
- BuiltinOperator_NEG,
- BuiltinOperator_PADV2,
- BuiltinOperator_GREATER,
- BuiltinOperator_GREATER_EQUAL,
- BuiltinOperator_LESS_EQUAL,
- BuiltinOperator_SELECT,
- BuiltinOperator_SLICE,
- BuiltinOperator_SIN,
- BuiltinOperator_TRANSPOSE_CONV,
- BuiltinOperator_SPARSE_TO_DENSE,
- BuiltinOperator_TILE,
- BuiltinOperator_EXPAND_DIMS,
- BuiltinOperator_EQUAL,
- BuiltinOperator_NOT_EQUAL,
- BuiltinOperator_LOG,
- BuiltinOperator_SUM,
- BuiltinOperator_SQRT,
- BuiltinOperator_RSQRT,
- BuiltinOperator_SHAPE,
- BuiltinOperator_POW,
- BuiltinOperator_ARG_MIN,
- BuiltinOperator_FAKE_QUANT,
- BuiltinOperator_REDUCE_PROD,
- BuiltinOperator_REDUCE_MAX,
- BuiltinOperator_PACK,
- BuiltinOperator_LOGICAL_OR,
- BuiltinOperator_ONE_HOT,
- BuiltinOperator_LOGICAL_AND,
- BuiltinOperator_LOGICAL_NOT,
- BuiltinOperator_UNPACK,
- BuiltinOperator_REDUCE_MIN,
- BuiltinOperator_FLOOR_DIV,
- BuiltinOperator_REDUCE_ANY,
- BuiltinOperator_SQUARE,
- BuiltinOperator_ZEROS_LIKE,
- BuiltinOperator_FILL,
- BuiltinOperator_FLOOR_MOD,
- BuiltinOperator_RANGE,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- BuiltinOperator_LEAKY_RELU,
- BuiltinOperator_SQUARED_DIFFERENCE,
- BuiltinOperator_MIRROR_PAD,
- BuiltinOperator_ABS,
- BuiltinOperator_SPLIT_V,
- BuiltinOperator_UNIQUE,
- BuiltinOperator_CEIL,
- BuiltinOperator_REVERSE_V2,
- BuiltinOperator_ADD_N,
- BuiltinOperator_GATHER_ND,
- BuiltinOperator_COS,
- BuiltinOperator_WHERE,
- BuiltinOperator_RANK,
- BuiltinOperator_ELU,
- BuiltinOperator_REVERSE_SEQUENCE,
- BuiltinOperator_MATRIX_DIAG,
- BuiltinOperator_QUANTIZE,
- BuiltinOperator_MATRIX_SET_DIAG,
- BuiltinOperator_ROUND,
- BuiltinOperator_HARD_SWISH,
- BuiltinOperator_IF,
- BuiltinOperator_WHILE,
- BuiltinOperator_NON_MAX_SUPPRESSION_V4,
- BuiltinOperator_NON_MAX_SUPPRESSION_V5,
- BuiltinOperator_SCATTER_ND,
- BuiltinOperator_SELECT_V2,
- BuiltinOperator_DENSIFY,
- BuiltinOperator_SEGMENT_SUM,
- BuiltinOperator_BATCH_MATMUL,
- BuiltinOperator_BCQ_GATHER,
- BuiltinOperator_BCQ_FULLY_CONNECTED,
- BuiltinOperator_INSTANCE_NORM};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOperator()
-{
- static const char *const names[] = {"ADD",
- "AVERAGE_POOL_2D",
- "CONCATENATION",
- "CONV_2D",
- "DEPTHWISE_CONV_2D",
- "DEPTH_TO_SPACE",
- "DEQUANTIZE",
- "EMBEDDING_LOOKUP",
- "FLOOR",
- "FULLY_CONNECTED",
- "HASHTABLE_LOOKUP",
- "L2_NORMALIZATION",
- "L2_POOL_2D",
- "LOCAL_RESPONSE_NORMALIZATION",
- "LOGISTIC",
- "LSH_PROJECTION",
- "LSTM",
- "MAX_POOL_2D",
- "MUL",
- "RELU",
- "RELU_N1_TO_1",
- "RELU6",
- "RESHAPE",
- "RESIZE_BILINEAR",
- "RNN",
- "SOFTMAX",
- "SPACE_TO_DEPTH",
- "SVDF",
- "TANH",
- "CONCAT_EMBEDDINGS",
- "SKIP_GRAM",
- "CALL",
- "CUSTOM",
- "EMBEDDING_LOOKUP_SPARSE",
- "PAD",
- "UNIDIRECTIONAL_SEQUENCE_RNN",
- "GATHER",
- "BATCH_TO_SPACE_ND",
- "SPACE_TO_BATCH_ND",
- "TRANSPOSE",
- "MEAN",
- "SUB",
- "DIV",
- "SQUEEZE",
- "UNIDIRECTIONAL_SEQUENCE_LSTM",
- "STRIDED_SLICE",
- "BIDIRECTIONAL_SEQUENCE_RNN",
- "EXP",
- "TOPK_V2",
- "SPLIT",
- "LOG_SOFTMAX",
- "DELEGATE",
- "BIDIRECTIONAL_SEQUENCE_LSTM",
- "CAST",
- "PRELU",
- "MAXIMUM",
- "ARG_MAX",
- "MINIMUM",
- "LESS",
- "NEG",
- "PADV2",
- "GREATER",
- "GREATER_EQUAL",
- "LESS_EQUAL",
- "SELECT",
- "SLICE",
- "SIN",
- "TRANSPOSE_CONV",
- "SPARSE_TO_DENSE",
- "TILE",
- "EXPAND_DIMS",
- "EQUAL",
- "NOT_EQUAL",
- "LOG",
- "SUM",
- "SQRT",
- "RSQRT",
- "SHAPE",
- "POW",
- "ARG_MIN",
- "FAKE_QUANT",
- "REDUCE_PROD",
- "REDUCE_MAX",
- "PACK",
- "LOGICAL_OR",
- "ONE_HOT",
- "LOGICAL_AND",
- "LOGICAL_NOT",
- "UNPACK",
- "REDUCE_MIN",
- "FLOOR_DIV",
- "REDUCE_ANY",
- "SQUARE",
- "ZEROS_LIKE",
- "FILL",
- "FLOOR_MOD",
- "RANGE",
- "RESIZE_NEAREST_NEIGHBOR",
- "LEAKY_RELU",
- "SQUARED_DIFFERENCE",
- "MIRROR_PAD",
- "ABS",
- "SPLIT_V",
- "UNIQUE",
- "CEIL",
- "REVERSE_V2",
- "ADD_N",
- "GATHER_ND",
- "COS",
- "WHERE",
- "RANK",
- "ELU",
- "REVERSE_SEQUENCE",
- "MATRIX_DIAG",
- "QUANTIZE",
- "MATRIX_SET_DIAG",
- "ROUND",
- "HARD_SWISH",
- "IF",
- "WHILE",
- "NON_MAX_SUPPRESSION_V4",
- "NON_MAX_SUPPRESSION_V5",
- "SCATTER_ND",
- "SELECT_V2",
- "DENSIFY",
- "SEGMENT_SUM",
- "BATCH_MATMUL",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "BCQ_GATHER",
- "BCQ_FULLY_CONNECTED",
- "INSTANCE_NORM",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOperator(BuiltinOperator e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOperator()[index];
-}
-
-enum BuiltinOptions
-{
- BuiltinOptions_NONE = 0,
- BuiltinOptions_Conv2DOptions = 1,
- BuiltinOptions_DepthwiseConv2DOptions = 2,
- BuiltinOptions_ConcatEmbeddingsOptions = 3,
- BuiltinOptions_LSHProjectionOptions = 4,
- BuiltinOptions_Pool2DOptions = 5,
- BuiltinOptions_SVDFOptions = 6,
- BuiltinOptions_RNNOptions = 7,
- BuiltinOptions_FullyConnectedOptions = 8,
- BuiltinOptions_SoftmaxOptions = 9,
- BuiltinOptions_ConcatenationOptions = 10,
- BuiltinOptions_AddOptions = 11,
- BuiltinOptions_L2NormOptions = 12,
- BuiltinOptions_LocalResponseNormalizationOptions = 13,
- BuiltinOptions_LSTMOptions = 14,
- BuiltinOptions_ResizeBilinearOptions = 15,
- BuiltinOptions_CallOptions = 16,
- BuiltinOptions_ReshapeOptions = 17,
- BuiltinOptions_SkipGramOptions = 18,
- BuiltinOptions_SpaceToDepthOptions = 19,
- BuiltinOptions_EmbeddingLookupSparseOptions = 20,
- BuiltinOptions_MulOptions = 21,
- BuiltinOptions_PadOptions = 22,
- BuiltinOptions_GatherOptions = 23,
- BuiltinOptions_BatchToSpaceNDOptions = 24,
- BuiltinOptions_SpaceToBatchNDOptions = 25,
- BuiltinOptions_TransposeOptions = 26,
- BuiltinOptions_ReducerOptions = 27,
- BuiltinOptions_SubOptions = 28,
- BuiltinOptions_DivOptions = 29,
- BuiltinOptions_SqueezeOptions = 30,
- BuiltinOptions_SequenceRNNOptions = 31,
- BuiltinOptions_StridedSliceOptions = 32,
- BuiltinOptions_ExpOptions = 33,
- BuiltinOptions_TopKV2Options = 34,
- BuiltinOptions_SplitOptions = 35,
- BuiltinOptions_LogSoftmaxOptions = 36,
- BuiltinOptions_CastOptions = 37,
- BuiltinOptions_DequantizeOptions = 38,
- BuiltinOptions_MaximumMinimumOptions = 39,
- BuiltinOptions_ArgMaxOptions = 40,
- BuiltinOptions_LessOptions = 41,
- BuiltinOptions_NegOptions = 42,
- BuiltinOptions_PadV2Options = 43,
- BuiltinOptions_GreaterOptions = 44,
- BuiltinOptions_GreaterEqualOptions = 45,
- BuiltinOptions_LessEqualOptions = 46,
- BuiltinOptions_SelectOptions = 47,
- BuiltinOptions_SliceOptions = 48,
- BuiltinOptions_TransposeConvOptions = 49,
- BuiltinOptions_SparseToDenseOptions = 50,
- BuiltinOptions_TileOptions = 51,
- BuiltinOptions_ExpandDimsOptions = 52,
- BuiltinOptions_EqualOptions = 53,
- BuiltinOptions_NotEqualOptions = 54,
- BuiltinOptions_ShapeOptions = 55,
- BuiltinOptions_PowOptions = 56,
- BuiltinOptions_ArgMinOptions = 57,
- BuiltinOptions_FakeQuantOptions = 58,
- BuiltinOptions_PackOptions = 59,
- BuiltinOptions_LogicalOrOptions = 60,
- BuiltinOptions_OneHotOptions = 61,
- BuiltinOptions_LogicalAndOptions = 62,
- BuiltinOptions_LogicalNotOptions = 63,
- BuiltinOptions_UnpackOptions = 64,
- BuiltinOptions_FloorDivOptions = 65,
- BuiltinOptions_SquareOptions = 66,
- BuiltinOptions_ZerosLikeOptions = 67,
- BuiltinOptions_FillOptions = 68,
- BuiltinOptions_BidirectionalSequenceLSTMOptions = 69,
- BuiltinOptions_BidirectionalSequenceRNNOptions = 70,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71,
- BuiltinOptions_FloorModOptions = 72,
- BuiltinOptions_RangeOptions = 73,
- BuiltinOptions_ResizeNearestNeighborOptions = 74,
- BuiltinOptions_LeakyReluOptions = 75,
- BuiltinOptions_SquaredDifferenceOptions = 76,
- BuiltinOptions_MirrorPadOptions = 77,
- BuiltinOptions_AbsOptions = 78,
- BuiltinOptions_SplitVOptions = 79,
- BuiltinOptions_UniqueOptions = 80,
- BuiltinOptions_ReverseV2Options = 81,
- BuiltinOptions_AddNOptions = 82,
- BuiltinOptions_GatherNdOptions = 83,
- BuiltinOptions_CosOptions = 84,
- BuiltinOptions_WhereOptions = 85,
- BuiltinOptions_RankOptions = 86,
- BuiltinOptions_ReverseSequenceOptions = 87,
- BuiltinOptions_MatrixDiagOptions = 88,
- BuiltinOptions_QuantizeOptions = 89,
- BuiltinOptions_MatrixSetDiagOptions = 90,
- BuiltinOptions_HardSwishOptions = 91,
- BuiltinOptions_IfOptions = 92,
- BuiltinOptions_WhileOptions = 93,
- BuiltinOptions_DepthToSpaceOptions = 94,
- BuiltinOptions_NonMaxSuppressionV4Options = 95,
- BuiltinOptions_NonMaxSuppressionV5Options = 96,
- BuiltinOptions_ScatterNdOptions = 97,
- BuiltinOptions_SelectV2Options = 98,
- BuiltinOptions_DensifyOptions = 99,
- BuiltinOptions_SegmentSumOptions = 100,
- BuiltinOptions_BatchMatMulOptions = 101,
- BuiltinOptions_BCQGatherOptions = 252,
- BuiltinOptions_BCQFullyConnectedOptions = 253,
- BuiltinOptions_InstanceNormOptions = 254,
- BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_InstanceNormOptions
-};
-
-inline const BuiltinOptions (&EnumValuesBuiltinOptions())[105]
-{
- static const BuiltinOptions values[] = {BuiltinOptions_NONE,
- BuiltinOptions_Conv2DOptions,
- BuiltinOptions_DepthwiseConv2DOptions,
- BuiltinOptions_ConcatEmbeddingsOptions,
- BuiltinOptions_LSHProjectionOptions,
- BuiltinOptions_Pool2DOptions,
- BuiltinOptions_SVDFOptions,
- BuiltinOptions_RNNOptions,
- BuiltinOptions_FullyConnectedOptions,
- BuiltinOptions_SoftmaxOptions,
- BuiltinOptions_ConcatenationOptions,
- BuiltinOptions_AddOptions,
- BuiltinOptions_L2NormOptions,
- BuiltinOptions_LocalResponseNormalizationOptions,
- BuiltinOptions_LSTMOptions,
- BuiltinOptions_ResizeBilinearOptions,
- BuiltinOptions_CallOptions,
- BuiltinOptions_ReshapeOptions,
- BuiltinOptions_SkipGramOptions,
- BuiltinOptions_SpaceToDepthOptions,
- BuiltinOptions_EmbeddingLookupSparseOptions,
- BuiltinOptions_MulOptions,
- BuiltinOptions_PadOptions,
- BuiltinOptions_GatherOptions,
- BuiltinOptions_BatchToSpaceNDOptions,
- BuiltinOptions_SpaceToBatchNDOptions,
- BuiltinOptions_TransposeOptions,
- BuiltinOptions_ReducerOptions,
- BuiltinOptions_SubOptions,
- BuiltinOptions_DivOptions,
- BuiltinOptions_SqueezeOptions,
- BuiltinOptions_SequenceRNNOptions,
- BuiltinOptions_StridedSliceOptions,
- BuiltinOptions_ExpOptions,
- BuiltinOptions_TopKV2Options,
- BuiltinOptions_SplitOptions,
- BuiltinOptions_LogSoftmaxOptions,
- BuiltinOptions_CastOptions,
- BuiltinOptions_DequantizeOptions,
- BuiltinOptions_MaximumMinimumOptions,
- BuiltinOptions_ArgMaxOptions,
- BuiltinOptions_LessOptions,
- BuiltinOptions_NegOptions,
- BuiltinOptions_PadV2Options,
- BuiltinOptions_GreaterOptions,
- BuiltinOptions_GreaterEqualOptions,
- BuiltinOptions_LessEqualOptions,
- BuiltinOptions_SelectOptions,
- BuiltinOptions_SliceOptions,
- BuiltinOptions_TransposeConvOptions,
- BuiltinOptions_SparseToDenseOptions,
- BuiltinOptions_TileOptions,
- BuiltinOptions_ExpandDimsOptions,
- BuiltinOptions_EqualOptions,
- BuiltinOptions_NotEqualOptions,
- BuiltinOptions_ShapeOptions,
- BuiltinOptions_PowOptions,
- BuiltinOptions_ArgMinOptions,
- BuiltinOptions_FakeQuantOptions,
- BuiltinOptions_PackOptions,
- BuiltinOptions_LogicalOrOptions,
- BuiltinOptions_OneHotOptions,
- BuiltinOptions_LogicalAndOptions,
- BuiltinOptions_LogicalNotOptions,
- BuiltinOptions_UnpackOptions,
- BuiltinOptions_FloorDivOptions,
- BuiltinOptions_SquareOptions,
- BuiltinOptions_ZerosLikeOptions,
- BuiltinOptions_FillOptions,
- BuiltinOptions_BidirectionalSequenceLSTMOptions,
- BuiltinOptions_BidirectionalSequenceRNNOptions,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions,
- BuiltinOptions_FloorModOptions,
- BuiltinOptions_RangeOptions,
- BuiltinOptions_ResizeNearestNeighborOptions,
- BuiltinOptions_LeakyReluOptions,
- BuiltinOptions_SquaredDifferenceOptions,
- BuiltinOptions_MirrorPadOptions,
- BuiltinOptions_AbsOptions,
- BuiltinOptions_SplitVOptions,
- BuiltinOptions_UniqueOptions,
- BuiltinOptions_ReverseV2Options,
- BuiltinOptions_AddNOptions,
- BuiltinOptions_GatherNdOptions,
- BuiltinOptions_CosOptions,
- BuiltinOptions_WhereOptions,
- BuiltinOptions_RankOptions,
- BuiltinOptions_ReverseSequenceOptions,
- BuiltinOptions_MatrixDiagOptions,
- BuiltinOptions_QuantizeOptions,
- BuiltinOptions_MatrixSetDiagOptions,
- BuiltinOptions_HardSwishOptions,
- BuiltinOptions_IfOptions,
- BuiltinOptions_WhileOptions,
- BuiltinOptions_DepthToSpaceOptions,
- BuiltinOptions_NonMaxSuppressionV4Options,
- BuiltinOptions_NonMaxSuppressionV5Options,
- BuiltinOptions_ScatterNdOptions,
- BuiltinOptions_SelectV2Options,
- BuiltinOptions_DensifyOptions,
- BuiltinOptions_SegmentSumOptions,
- BuiltinOptions_BatchMatMulOptions,
- BuiltinOptions_BCQGatherOptions,
- BuiltinOptions_BCQFullyConnectedOptions,
- BuiltinOptions_InstanceNormOptions};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOptions()
-{
- static const char *const names[] = {"NONE",
- "Conv2DOptions",
- "DepthwiseConv2DOptions",
- "ConcatEmbeddingsOptions",
- "LSHProjectionOptions",
- "Pool2DOptions",
- "SVDFOptions",
- "RNNOptions",
- "FullyConnectedOptions",
- "SoftmaxOptions",
- "ConcatenationOptions",
- "AddOptions",
- "L2NormOptions",
- "LocalResponseNormalizationOptions",
- "LSTMOptions",
- "ResizeBilinearOptions",
- "CallOptions",
- "ReshapeOptions",
- "SkipGramOptions",
- "SpaceToDepthOptions",
- "EmbeddingLookupSparseOptions",
- "MulOptions",
- "PadOptions",
- "GatherOptions",
- "BatchToSpaceNDOptions",
- "SpaceToBatchNDOptions",
- "TransposeOptions",
- "ReducerOptions",
- "SubOptions",
- "DivOptions",
- "SqueezeOptions",
- "SequenceRNNOptions",
- "StridedSliceOptions",
- "ExpOptions",
- "TopKV2Options",
- "SplitOptions",
- "LogSoftmaxOptions",
- "CastOptions",
- "DequantizeOptions",
- "MaximumMinimumOptions",
- "ArgMaxOptions",
- "LessOptions",
- "NegOptions",
- "PadV2Options",
- "GreaterOptions",
- "GreaterEqualOptions",
- "LessEqualOptions",
- "SelectOptions",
- "SliceOptions",
- "TransposeConvOptions",
- "SparseToDenseOptions",
- "TileOptions",
- "ExpandDimsOptions",
- "EqualOptions",
- "NotEqualOptions",
- "ShapeOptions",
- "PowOptions",
- "ArgMinOptions",
- "FakeQuantOptions",
- "PackOptions",
- "LogicalOrOptions",
- "OneHotOptions",
- "LogicalAndOptions",
- "LogicalNotOptions",
- "UnpackOptions",
- "FloorDivOptions",
- "SquareOptions",
- "ZerosLikeOptions",
- "FillOptions",
- "BidirectionalSequenceLSTMOptions",
- "BidirectionalSequenceRNNOptions",
- "UnidirectionalSequenceLSTMOptions",
- "FloorModOptions",
- "RangeOptions",
- "ResizeNearestNeighborOptions",
- "LeakyReluOptions",
- "SquaredDifferenceOptions",
- "MirrorPadOptions",
- "AbsOptions",
- "SplitVOptions",
- "UniqueOptions",
- "ReverseV2Options",
- "AddNOptions",
- "GatherNdOptions",
- "CosOptions",
- "WhereOptions",
- "RankOptions",
- "ReverseSequenceOptions",
- "MatrixDiagOptions",
- "QuantizeOptions",
- "MatrixSetDiagOptions",
- "HardSwishOptions",
- "IfOptions",
- "WhileOptions",
- "DepthToSpaceOptions",
- "NonMaxSuppressionV4Options",
- "NonMaxSuppressionV5Options",
- "ScatterNdOptions",
- "SelectV2Options",
- "DensifyOptions",
- "SegmentSumOptions",
- "BatchMatMulOptions",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "BCQGatherOptions",
- "BCQFullyConnectedOptions",
- "InstanceNormOptions",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOptions(BuiltinOptions e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOptions()[index];
-}
-
-template <typename T> struct BuiltinOptionsTraits
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NONE;
-};
-
-template <> struct BuiltinOptionsTraits<Conv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSHProjectionOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
-};
-
-template <> struct BuiltinOptionsTraits<Pool2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SVDFOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FullyConnectedOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatenationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AddOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
-};
-
-template <> struct BuiltinOptionsTraits<L2NormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeBilinearOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CallOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReshapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SkipGramOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToDepthOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReducerOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SubOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SubOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SqueezeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<StridedSliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TopKV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<SplitOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogSoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CastOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CastOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DequantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MaximumMinimumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NegOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NegOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeConvOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SparseToDenseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpandDimsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NotEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ShapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PowOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMinOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FakeQuantOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalOrOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
-};
-
-template <> struct BuiltinOptionsTraits<OneHotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalAndOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalNotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnpackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorDivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquareOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ZerosLikeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FillOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FillOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorModOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RangeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LeakyReluOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MirrorPadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AbsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SplitVOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UniqueOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReverseV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<AddNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherNdOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CosOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CosOptions;
-};
-
-template <> struct BuiltinOptionsTraits<WhereOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RankOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RankOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReverseSequenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MatrixDiagOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions;
-};
-
-template <> struct BuiltinOptionsTraits<QuantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MatrixSetDiagOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions;
-};
-
-template <> struct BuiltinOptionsTraits<HardSwishOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions;
-};
-
-template <> struct BuiltinOptionsTraits<IfOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_IfOptions;
-};
-
-template <> struct BuiltinOptionsTraits<WhileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthToSpaceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NonMaxSuppressionV4Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options;
-};
-
-template <> struct BuiltinOptionsTraits<NonMaxSuppressionV5Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options;
-};
-
-template <> struct BuiltinOptionsTraits<ScatterNdOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<DensifyOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SegmentSumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchMatMulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BCQGatherOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BCQGatherOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BCQFullyConnectedOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BCQFullyConnectedOptions;
-};
-
-template <> struct BuiltinOptionsTraits<InstanceNormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_InstanceNormOptions;
-};
-
-bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
-bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum Padding
-{
- Padding_SAME = 0,
- Padding_VALID = 1,
- Padding_MIN = Padding_SAME,
- Padding_MAX = Padding_VALID
-};
-
-inline const Padding (&EnumValuesPadding())[2]
-{
- static const Padding values[] = {Padding_SAME, Padding_VALID};
- return values;
-}
-
-inline const char *const *EnumNamesPadding()
-{
- static const char *const names[] = {"SAME", "VALID", nullptr};
- return names;
-}
-
-inline const char *EnumNamePadding(Padding e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesPadding()[index];
-}
-
-enum ActivationFunctionType
-{
- ActivationFunctionType_NONE = 0,
- ActivationFunctionType_RELU = 1,
- ActivationFunctionType_RELU_N1_TO_1 = 2,
- ActivationFunctionType_RELU6 = 3,
- ActivationFunctionType_TANH = 4,
- ActivationFunctionType_SIGN_BIT = 5,
- ActivationFunctionType_MIN = ActivationFunctionType_NONE,
- ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
-};
-
-inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6]
-{
- static const ActivationFunctionType values[] = {
- ActivationFunctionType_NONE, ActivationFunctionType_RELU,
- ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6,
- ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
- return values;
-}
-
-inline const char *const *EnumNamesActivationFunctionType()
-{
- static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
- "TANH", "SIGN_BIT", nullptr};
- return names;
-}
-
-inline const char *EnumNameActivationFunctionType(ActivationFunctionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesActivationFunctionType()[index];
-}
-
-enum LSHProjectionType
-{
- LSHProjectionType_UNKNOWN = 0,
- LSHProjectionType_SPARSE = 1,
- LSHProjectionType_DENSE = 2,
- LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
- LSHProjectionType_MAX = LSHProjectionType_DENSE
-};
-
-inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3]
-{
- static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE,
- LSHProjectionType_DENSE};
- return values;
-}
-
-inline const char *const *EnumNamesLSHProjectionType()
-{
- static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSHProjectionType(LSHProjectionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSHProjectionType()[index];
-}
-
-enum FullyConnectedOptionsWeightsFormat
-{
- FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
- FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
-};
-
-inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2]
-{
- static const FullyConnectedOptionsWeightsFormat values[] = {
- FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8};
- return values;
-}
-
-inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat()
-{
- static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
-}
-
-enum LSTMKernelType
-{
- LSTMKernelType_FULL = 0,
- LSTMKernelType_BASIC = 1,
- LSTMKernelType_MIN = LSTMKernelType_FULL,
- LSTMKernelType_MAX = LSTMKernelType_BASIC
-};
-
-inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2]
-{
- static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC};
- return values;
-}
-
-inline const char *const *EnumNamesLSTMKernelType()
-{
- static const char *const names[] = {"FULL", "BASIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSTMKernelType(LSTMKernelType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSTMKernelType()[index];
-}
-
-enum CombinerType
-{
- CombinerType_SUM = 0,
- CombinerType_MEAN = 1,
- CombinerType_SQRTN = 2,
- CombinerType_MIN = CombinerType_SUM,
- CombinerType_MAX = CombinerType_SQRTN
-};
-
-inline const CombinerType (&EnumValuesCombinerType())[3]
-{
- static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN};
- return values;
-}
-
-inline const char *const *EnumNamesCombinerType()
-{
- static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr};
- return names;
-}
-
-inline const char *EnumNameCombinerType(CombinerType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCombinerType()[index];
-}
-
-enum MirrorPadMode
-{
- MirrorPadMode_REFLECT = 0,
- MirrorPadMode_SYMMETRIC = 1,
- MirrorPadMode_MIN = MirrorPadMode_REFLECT,
- MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC
-};
-
-inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2]
-{
- static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC};
- return values;
-}
-
-inline const char *const *EnumNamesMirrorPadMode()
-{
- static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameMirrorPadMode(MirrorPadMode e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesMirrorPadMode()[index];
-}
-
-enum CustomOptionsFormat
-{
- CustomOptionsFormat_FLEXBUFFERS = 0,
- CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
- CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
-};
-
-inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1]
-{
- static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS};
- return values;
-}
-
-inline const char *const *EnumNamesCustomOptionsFormat()
-{
- static const char *const names[] = {"FLEXBUFFERS", nullptr};
- return names;
-}
-
-inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCustomOptionsFormat()[index];
-}
-
-enum DataFormat
-{
- DataFormat_CHANNELS_LAST = 0,
- DataFormat_CHANNELS_FIRST = 1,
- DataFormat_MIN = DataFormat_CHANNELS_LAST,
- DataFormat_MAX = DataFormat_CHANNELS_FIRST
-};
-
-inline const DataFormat (&EnumValuesDataFormat())[2]
-{
- static const DataFormat values[] = {DataFormat_CHANNELS_LAST, DataFormat_CHANNELS_FIRST};
- return values;
-}
-
-inline const char *const *EnumNamesDataFormat()
-{
- static const char *const names[] = {"CHANNELS_LAST", "CHANNELS_FIRST", nullptr};
- return names;
-}
-
-inline const char *EnumNameDataFormat(DataFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesDataFormat()[index];
-}
-
-struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_CUSTOM = 4
- };
- const flatbuffers::Vector<uint8_t> *custom() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) &&
- verifier.VerifyVector(custom()) && verifier.EndTable();
- }
-};
-
-struct CustomQuantizationBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom)
- {
- fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
- }
- explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &);
- flatbuffers::Offset<CustomQuantization> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CustomQuantization>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0)
-{
- CustomQuantizationBuilder builder_(_fbb);
- builder_.add_custom(custom);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *custom = nullptr)
-{
- return circle::CreateCustomQuantization(_fbb, custom ? _fbb.CreateVector<uint8_t>(*custom) : 0);
-}
-
-struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_SCALE = 8,
- VT_ZERO_POINT = 10,
- VT_DETAILS_TYPE = 12,
- VT_DETAILS = 14,
- VT_QUANTIZED_DIMENSION = 16
- };
- const flatbuffers::Vector<float> *min() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
- }
- const flatbuffers::Vector<float> *max() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
- }
- const flatbuffers::Vector<float> *scale() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
- }
- const flatbuffers::Vector<int64_t> *zero_point() const
- {
- return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
- }
- QuantizationDetails details_type() const
- {
- return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
- }
- const void *details() const { return GetPointer<const void *>(VT_DETAILS); }
- template <typename T> const T *details_as() const;
- const CustomQuantization *details_as_CustomQuantization() const
- {
- return details_type() == QuantizationDetails_CustomQuantization
- ? static_cast<const CustomQuantization *>(details())
- : nullptr;
- }
- int32_t quantized_dimension() const { return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) &&
- verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) &&
- verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) &&
- verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) &&
- verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) &&
- VerifyOffset(verifier, VT_DETAILS) &&
- VerifyQuantizationDetails(verifier, details(), details_type()) &&
- VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable();
- }
-};
-
-template <>
-inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const
-{
- return details_as_CustomQuantization();
-}
-
-struct QuantizationParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
- }
- void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
- }
- void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale)
- {
- fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
- }
- void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point)
- {
- fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
- }
- void add_details_type(QuantizationDetails details_type)
- {
- fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE,
- static_cast<uint8_t>(details_type), 0);
- }
- void add_details(flatbuffers::Offset<void> details)
- {
- fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
- }
- void add_quantized_dimension(int32_t quantized_dimension)
- {
- fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension,
- 0);
- }
- explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &);
- flatbuffers::Offset<QuantizationParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizationParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizationParameters>
-CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
- flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
-{
- QuantizationParametersBuilder builder_(_fbb);
- builder_.add_quantized_dimension(quantized_dimension);
- builder_.add_details(details);
- builder_.add_zero_point(zero_point);
- builder_.add_scale(scale);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_details_type(details_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr,
- const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr,
- const std::vector<int64_t> *zero_point = nullptr,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
-{
- return circle::CreateQuantizationParameters(
- _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0,
- scale ? _fbb.CreateVector<float>(*scale) : 0,
- zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details,
- quantized_dimension);
-}
-
-struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<int32_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Int32VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values)
- {
- fbb_.AddOffset(Int32Vector::VT_VALUES, values);
- }
- explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Int32VectorBuilder &operator=(const Int32VectorBuilder &);
- flatbuffers::Offset<Int32Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Int32Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Int32Vector>
-CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0)
-{
- Int32VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Int32Vector>
-CreateInt32VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *values = nullptr)
-{
- return circle::CreateInt32Vector(_fbb, values ? _fbb.CreateVector<int32_t>(*values) : 0);
-}
-
-struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<uint16_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Uint16VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values)
- {
- fbb_.AddOffset(Uint16Vector::VT_VALUES, values);
- }
- explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Uint16VectorBuilder &operator=(const Uint16VectorBuilder &);
- flatbuffers::Offset<Uint16Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Uint16Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Uint16Vector>
-CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0)
-{
- Uint16VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Uint16Vector>
-CreateUint16VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint16_t> *values = nullptr)
-{
- return circle::CreateUint16Vector(_fbb, values ? _fbb.CreateVector<uint16_t>(*values) : 0);
-}
-
-struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<uint8_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Uint8VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values)
- {
- fbb_.AddOffset(Uint8Vector::VT_VALUES, values);
- }
- explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Uint8VectorBuilder &operator=(const Uint8VectorBuilder &);
- flatbuffers::Offset<Uint8Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Uint8Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Uint8Vector>
-CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0)
-{
- Uint8VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Uint8Vector>
-CreateUint8VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *values = nullptr)
-{
- return circle::CreateUint8Vector(_fbb, values ? _fbb.CreateVector<uint8_t>(*values) : 0);
-}
-
-struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FORMAT = 4,
- VT_DENSE_SIZE = 6,
- VT_ARRAY_SEGMENTS_TYPE = 8,
- VT_ARRAY_SEGMENTS = 10,
- VT_ARRAY_INDICES_TYPE = 12,
- VT_ARRAY_INDICES = 14
- };
- DimensionType format() const
- {
- return static_cast<DimensionType>(GetField<int8_t>(VT_FORMAT, 0));
- }
- int32_t dense_size() const { return GetField<int32_t>(VT_DENSE_SIZE, 0); }
- SparseIndexVector array_segments_type() const
- {
- return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_SEGMENTS_TYPE, 0));
- }
- const void *array_segments() const { return GetPointer<const void *>(VT_ARRAY_SEGMENTS); }
- template <typename T> const T *array_segments_as() const;
- const Int32Vector *array_segments_as_Int32Vector() const
- {
- return array_segments_type() == SparseIndexVector_Int32Vector
- ? static_cast<const Int32Vector *>(array_segments())
- : nullptr;
- }
- const Uint16Vector *array_segments_as_Uint16Vector() const
- {
- return array_segments_type() == SparseIndexVector_Uint16Vector
- ? static_cast<const Uint16Vector *>(array_segments())
- : nullptr;
- }
- const Uint8Vector *array_segments_as_Uint8Vector() const
- {
- return array_segments_type() == SparseIndexVector_Uint8Vector
- ? static_cast<const Uint8Vector *>(array_segments())
- : nullptr;
- }
- SparseIndexVector array_indices_type() const
- {
- return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_INDICES_TYPE, 0));
- }
- const void *array_indices() const { return GetPointer<const void *>(VT_ARRAY_INDICES); }
- template <typename T> const T *array_indices_as() const;
- const Int32Vector *array_indices_as_Int32Vector() const
- {
- return array_indices_type() == SparseIndexVector_Int32Vector
- ? static_cast<const Int32Vector *>(array_indices())
- : nullptr;
- }
- const Uint16Vector *array_indices_as_Uint16Vector() const
- {
- return array_indices_type() == SparseIndexVector_Uint16Vector
- ? static_cast<const Uint16Vector *>(array_indices())
- : nullptr;
- }
- const Uint8Vector *array_indices_as_Uint8Vector() const
- {
- return array_indices_type() == SparseIndexVector_Uint8Vector
- ? static_cast<const Uint8Vector *>(array_indices())
- : nullptr;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_FORMAT) &&
- VerifyField<int32_t>(verifier, VT_DENSE_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_ARRAY_SEGMENTS_TYPE) &&
- VerifyOffset(verifier, VT_ARRAY_SEGMENTS) &&
- VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) &&
- VerifyField<uint8_t>(verifier, VT_ARRAY_INDICES_TYPE) &&
- VerifyOffset(verifier, VT_ARRAY_INDICES) &&
- VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) &&
- verifier.EndTable();
- }
-};
-
-template <> inline const Int32Vector *DimensionMetadata::array_segments_as<Int32Vector>() const
-{
- return array_segments_as_Int32Vector();
-}
-
-template <> inline const Uint16Vector *DimensionMetadata::array_segments_as<Uint16Vector>() const
-{
- return array_segments_as_Uint16Vector();
-}
-
-template <> inline const Uint8Vector *DimensionMetadata::array_segments_as<Uint8Vector>() const
-{
- return array_segments_as_Uint8Vector();
-}
-
-template <> inline const Int32Vector *DimensionMetadata::array_indices_as<Int32Vector>() const
-{
- return array_indices_as_Int32Vector();
-}
-
-template <> inline const Uint16Vector *DimensionMetadata::array_indices_as<Uint16Vector>() const
-{
- return array_indices_as_Uint16Vector();
-}
-
-template <> inline const Uint8Vector *DimensionMetadata::array_indices_as<Uint8Vector>() const
-{
- return array_indices_as_Uint8Vector();
-}
-
-struct DimensionMetadataBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_format(DimensionType format)
- {
- fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0);
- }
- void add_dense_size(int32_t dense_size)
- {
- fbb_.AddElement<int32_t>(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0);
- }
- void add_array_segments_type(SparseIndexVector array_segments_type)
- {
- fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE,
- static_cast<uint8_t>(array_segments_type), 0);
- }
- void add_array_segments(flatbuffers::Offset<void> array_segments)
- {
- fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments);
- }
- void add_array_indices_type(SparseIndexVector array_indices_type)
- {
- fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE,
- static_cast<uint8_t>(array_indices_type), 0);
- }
- void add_array_indices(flatbuffers::Offset<void> array_indices)
- {
- fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices);
- }
- explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &);
- flatbuffers::Offset<DimensionMetadata> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DimensionMetadata>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DimensionMetadata>
-CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb,
- DimensionType format = DimensionType_DENSE, int32_t dense_size = 0,
- SparseIndexVector array_segments_type = SparseIndexVector_NONE,
- flatbuffers::Offset<void> array_segments = 0,
- SparseIndexVector array_indices_type = SparseIndexVector_NONE,
- flatbuffers::Offset<void> array_indices = 0)
-{
- DimensionMetadataBuilder builder_(_fbb);
- builder_.add_array_indices(array_indices);
- builder_.add_array_segments(array_segments);
- builder_.add_dense_size(dense_size);
- builder_.add_array_indices_type(array_indices_type);
- builder_.add_array_segments_type(array_segments_type);
- builder_.add_format(format);
- return builder_.Finish();
-}
-
-struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TRAVERSAL_ORDER = 4,
- VT_BLOCK_MAP = 6,
- VT_DIM_METADATA = 8
- };
- const flatbuffers::Vector<int32_t> *traversal_order() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER);
- }
- const flatbuffers::Vector<int32_t> *block_map() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP);
- }
- const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *>(
- VT_DIM_METADATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) &&
- verifier.VerifyVector(traversal_order()) && VerifyOffset(verifier, VT_BLOCK_MAP) &&
- verifier.VerifyVector(block_map()) && VerifyOffset(verifier, VT_DIM_METADATA) &&
- verifier.VerifyVector(dim_metadata()) && verifier.VerifyVectorOfTables(dim_metadata()) &&
- verifier.EndTable();
- }
-};
-
-struct SparsityParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order)
- {
- fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order);
- }
- void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map)
- {
- fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map);
- }
- void add_dim_metadata(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata)
- {
- fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata);
- }
- explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparsityParametersBuilder &operator=(const SparsityParametersBuilder &);
- flatbuffers::Offset<SparsityParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparsityParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata =
- 0)
-{
- SparsityParametersBuilder builder_(_fbb);
- builder_.add_dim_metadata(dim_metadata);
- builder_.add_block_map(block_map);
- builder_.add_traversal_order(traversal_order);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *traversal_order = nullptr,
- const std::vector<int32_t> *block_map = nullptr,
- const std::vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata = nullptr)
-{
- return circle::CreateSparsityParameters(
- _fbb, traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0,
- block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0,
- dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<DimensionMetadata>>(*dim_metadata) : 0);
-}
-
-struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SHAPE = 4,
- VT_TYPE = 6,
- VT_BUFFER = 8,
- VT_NAME = 10,
- VT_QUANTIZATION = 12,
- VT_IS_VARIABLE = 14,
- VT_SPARSITY = 16,
- VT_SHAPE_SIGNATURE = 18
- };
- const flatbuffers::Vector<int32_t> *shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const QuantizationParameters *quantization() const
- {
- return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION);
- }
- bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; }
- const SparsityParameters *sparsity() const
- {
- return GetPointer<const SparsityParameters *>(VT_SPARSITY);
- }
- const flatbuffers::Vector<int32_t> *shape_signature() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) &&
- verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) &&
- VerifyOffset(verifier, VT_SPARSITY) && verifier.VerifyTable(sparsity()) &&
- VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && verifier.VerifyVector(shape_signature()) &&
- verifier.EndTable();
- }
-};
-
-struct TensorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE, shape);
- }
- void add_type(TensorType type)
- {
- fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Tensor::VT_NAME, name);
- }
- void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization)
- {
- fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
- }
- void add_is_variable(bool is_variable)
- {
- fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
- }
- void add_sparsity(flatbuffers::Offset<SparsityParameters> sparsity)
- {
- fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity);
- }
- void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature);
- }
- explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TensorBuilder &operator=(const TensorBuilder &);
- flatbuffers::Offset<Tensor> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Tensor>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Tensor>
-CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false,
- flatbuffers::Offset<SparsityParameters> sparsity = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0)
-{
- TensorBuilder builder_(_fbb);
- builder_.add_shape_signature(shape_signature);
- builder_.add_sparsity(sparsity);
- builder_.add_quantization(quantization);
- builder_.add_name(name);
- builder_.add_buffer(buffer);
- builder_.add_shape(shape);
- builder_.add_is_variable(is_variable);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Tensor> CreateTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false,
- flatbuffers::Offset<SparsityParameters> sparsity = 0,
- const std::vector<int32_t> *shape_signature = nullptr)
-{
- return circle::CreateTensor(_fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type, buffer,
- name ? _fbb.CreateString(name) : 0, quantization, is_variable,
- sparsity,
- shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0);
-}
-
-struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FUSED_ACTIVATION_FUNCTION = 10,
- VT_DILATION_W_FACTOR = 12,
- VT_DILATION_H_FACTOR = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct Conv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &);
- flatbuffers::Offset<Conv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Conv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Conv2DOptions>
-CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- Conv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FILTER_WIDTH = 10,
- VT_FILTER_HEIGHT = 12,
- VT_FUSED_ACTIVATION_FUNCTION = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); }
- int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
- VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct Pool2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_filter_width(int32_t filter_width)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
- }
- void add_filter_height(int32_t filter_height)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &);
- flatbuffers::Offset<Pool2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Pool2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Pool2DOptions>
-CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0,
- int32_t filter_height = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- Pool2DOptionsBuilder builder_(_fbb);
- builder_.add_filter_height(filter_height);
- builder_.add_filter_width(filter_width);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_DEPTH_MULTIPLIER = 10,
- VT_FUSED_ACTIVATION_FUNCTION = 12,
- VT_DILATION_W_FACTOR = 14,
- VT_DILATION_H_FACTOR = 16
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct DepthwiseConv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_depth_multiplier(int32_t depth_multiplier)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &);
- flatbuffers::Offset<DepthwiseConv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
- flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0,
- int32_t stride_h = 0, int32_t depth_multiplier = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- DepthwiseConv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_depth_multiplier(depth_multiplier);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_CHANNELS = 4,
- VT_NUM_COLUMNS_PER_CHANNEL = 6,
- VT_EMBEDDING_DIM_PER_CHANNEL = 8
- };
- int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); }
- const flatbuffers::Vector<int32_t> *num_columns_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
- }
- const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
- VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
- verifier.VerifyVector(num_columns_per_channel()) &&
- VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
- verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable();
- }
-};
-
-struct ConcatEmbeddingsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_channels(int32_t num_channels)
- {
- fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
- }
- void add_num_columns_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
- }
- void add_embedding_dim_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL,
- embedding_dim_per_channel);
- }
- explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &);
- flatbuffers::Offset<ConcatEmbeddingsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0)
-{
- ConcatEmbeddingsOptionsBuilder builder_(_fbb);
- builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
- builder_.add_num_columns_per_channel(num_columns_per_channel);
- builder_.add_num_channels(num_channels);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions>
-CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- const std::vector<int32_t> *num_columns_per_channel = nullptr,
- const std::vector<int32_t> *embedding_dim_per_channel = nullptr)
-{
- return circle::CreateConcatEmbeddingsOptions(
- _fbb, num_channels,
- num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0,
- embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0);
-}
-
-struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TYPE = 4
- };
- LSHProjectionType type() const
- {
- return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct LSHProjectionOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_type(LSHProjectionType type)
- {
- fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &);
- flatbuffers::Offset<LSHProjectionOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSHProjectionOptions>
-CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb,
- LSHProjectionType type = LSHProjectionType_UNKNOWN)
-{
- LSHProjectionOptionsBuilder builder_(_fbb);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RANK = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
- };
- int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct SVDFOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &);
- flatbuffers::Offset<SVDFOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SVDFOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SVDFOptions>
-CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- SVDFOptionsBuilder builder_(_fbb);
- builder_.add_rank(rank);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 6
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct RNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RNNOptionsBuilder &operator=(const RNNOptionsBuilder &);
- flatbuffers::Offset<RNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RNNOptions>
-CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- RNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct SequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major),
- 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &);
- flatbuffers::Offset<SequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- SequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_MERGE_OUTPUTS = 8,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool merge_outputs = false, bool asymmetric_quantize_inputs = false)
-{
- BidirectionalSequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_WEIGHTS_FORMAT = 6,
- VT_KEEP_NUM_DIMS = 8,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- FullyConnectedOptionsWeightsFormat weights_format() const
- {
- return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
- }
- bool keep_num_dims() const { return GetField<uint8_t>(VT_KEEP_NUM_DIMS, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) &&
- VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct FullyConnectedOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT,
- static_cast<int8_t>(weights_format), 0);
- }
- void add_keep_num_dims(bool keep_num_dims)
- {
- fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_KEEP_NUM_DIMS,
- static_cast<uint8_t>(keep_num_dims), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &);
- flatbuffers::Offset<FullyConnectedOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT,
- bool keep_num_dims = false, bool asymmetric_quantize_inputs = false)
-{
- FullyConnectedOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_keep_num_dims(keep_num_dims);
- builder_.add_weights_format(weights_format);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BETA = 4
- };
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) &&
- verifier.EndTable();
- }
-};
-
-struct SoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); }
- explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &);
- flatbuffers::Offset<SoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SoftmaxOptions>
-CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f)
-{
- SoftmaxOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- return builder_.Finish();
-}
-
-struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct ConcatenationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &);
- flatbuffers::Offset<ConcatenationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatenationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- ConcatenationOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct AddOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddOptionsBuilder &operator=(const AddOptionsBuilder &);
- flatbuffers::Offset<AddOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddOptions>
-CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- AddOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct MulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MulOptionsBuilder &operator=(const MulOptionsBuilder &);
- flatbuffers::Offset<MulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MulOptions>
-CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- MulOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct L2NormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &);
- flatbuffers::Offset<L2NormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<L2NormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<L2NormOptions>
-CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- L2NormOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RADIUS = 4,
- VT_BIAS = 6,
- VT_ALPHA = 8,
- VT_BETA = 10
- };
- int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); }
- float bias() const { return GetField<float>(VT_BIAS, 0.0f); }
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) &&
- VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) &&
- VerifyField<float>(verifier, VT_BETA) && verifier.EndTable();
- }
-};
-
-struct LocalResponseNormalizationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_radius(int32_t radius)
- {
- fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
- }
- void add_bias(float bias)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
- }
- void add_alpha(float alpha)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
- }
- void add_beta(float beta)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
- }
- explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LocalResponseNormalizationOptionsBuilder &
- operator=(const LocalResponseNormalizationOptionsBuilder &);
- flatbuffers::Offset<LocalResponseNormalizationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LocalResponseNormalizationOptions>
-CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0,
- float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f)
-{
- LocalResponseNormalizationOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- builder_.add_alpha(alpha);
- builder_.add_bias(bias);
- builder_.add_radius(radius);
- return builder_.Finish();
-}
-
-struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_KERNEL_TYPE = 10,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- LSTMKernelType kernel_type() const
- {
- return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct LSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_kernel_type(LSTMKernelType kernel_type)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &);
- flatbuffers::Offset<LSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSTMOptions>
-CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f,
- LSTMKernelType kernel_type = LSTMKernelType_FULL,
- bool asymmetric_quantize_inputs = false)
-{
- LSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_kernel_type(kernel_type);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_TIME_MAJOR = 10,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct UnidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnidirectionalSequenceLSTMOptionsBuilder &
- operator=(const UnidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
-CreateUnidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false,
- bool asymmetric_quantize_inputs = false)
-{
- UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_time_major(time_major);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_MERGE_OUTPUTS = 10,
- VT_TIME_MAJOR = 12,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 14
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 1) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 1);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceLSTMOptionsBuilder &
- operator=(const BidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false,
- bool time_major = true, bool asymmetric_quantize_inputs = false)
-{
- BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_time_major(time_major);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 8,
- VT_HALF_PIXEL_CENTERS = 10
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable();
- }
-};
-
-struct ResizeBilinearOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- void add_half_pixel_centers(bool half_pixel_centers)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS,
- static_cast<uint8_t>(half_pixel_centers), 0);
- }
- explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &);
- flatbuffers::Offset<ResizeBilinearOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeBilinearOptions>
-CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false,
- bool half_pixel_centers = false)
-{
- ResizeBilinearOptionsBuilder builder_(_fbb);
- builder_.add_half_pixel_centers(half_pixel_centers);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 4
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeNearestNeighborOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &);
- flatbuffers::Offset<ResizeNearestNeighborOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeNearestNeighborOptions>
-CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeNearestNeighborOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SUBGRAPH = 4
- };
- uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
- verifier.EndTable();
- }
-};
-
-struct CallOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_subgraph(uint32_t subgraph)
- {
- fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
- }
- explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CallOptionsBuilder &operator=(const CallOptionsBuilder &);
- flatbuffers::Offset<CallOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CallOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb,
- uint32_t subgraph = 0)
-{
- CallOptionsBuilder builder_(_fbb);
- builder_.add_subgraph(subgraph);
- return builder_.Finish();
-}
-
-struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadOptionsBuilder &operator=(const PadOptionsBuilder &);
- flatbuffers::Offset<PadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &);
- flatbuffers::Offset<PadV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NEW_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *new_shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) &&
- verifier.VerifyVector(new_shape()) && verifier.EndTable();
- }
-};
-
-struct ReshapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape)
- {
- fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
- }
- explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &);
- flatbuffers::Offset<ReshapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0)
-{
- ReshapeOptionsBuilder builder_(_fbb);
- builder_.add_new_shape(new_shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *new_shape = nullptr)
-{
- return circle::CreateReshapeOptions(_fbb, new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0);
-}
-
-struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SpaceToBatchNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &);
- flatbuffers::Offset<SpaceToBatchNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToBatchNDOptions>
-CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SpaceToBatchNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct BatchToSpaceNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &);
- flatbuffers::Offset<BatchToSpaceNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchToSpaceNDOptions>
-CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- BatchToSpaceNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NGRAM_SIZE = 4,
- VT_MAX_SKIP_SIZE = 6,
- VT_INCLUDE_ALL_NGRAMS = 8
- };
- int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); }
- int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); }
- bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
- VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable();
- }
-};
-
-struct SkipGramOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_ngram_size(int32_t ngram_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
- }
- void add_max_skip_size(int32_t max_skip_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
- }
- void add_include_all_ngrams(bool include_all_ngrams)
- {
- fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS,
- static_cast<uint8_t>(include_all_ngrams), 0);
- }
- explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &);
- flatbuffers::Offset<SkipGramOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SkipGramOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SkipGramOptions>
-CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0,
- int32_t max_skip_size = 0, bool include_all_ngrams = false)
-{
- SkipGramOptionsBuilder builder_(_fbb);
- builder_.add_max_skip_size(max_skip_size);
- builder_.add_ngram_size(ngram_size);
- builder_.add_include_all_ngrams(include_all_ngrams);
- return builder_.Finish();
-}
-
-struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct SpaceToDepthOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &);
- flatbuffers::Offset<SpaceToDepthOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToDepthOptions>
-CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- SpaceToDepthOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct DepthToSpaceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &);
- flatbuffers::Offset<DepthToSpaceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthToSpaceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthToSpaceOptions>
-CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- DepthToSpaceOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SubOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubOptionsBuilder &operator=(const SubOptionsBuilder &);
- flatbuffers::Offset<SubOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubOptions>
-CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SubOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct DivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DivOptionsBuilder &operator=(const DivOptionsBuilder &);
- flatbuffers::Offset<DivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DivOptions>
-CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- DivOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TopKV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &);
- flatbuffers::Offset<TopKV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TopKV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TopKV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COMBINER = 4
- };
- CombinerType combiner() const
- {
- return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) &&
- verifier.EndTable();
- }
-};
-
-struct EmbeddingLookupSparseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_combiner(CombinerType combiner)
- {
- fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER,
- static_cast<int8_t>(combiner), 0);
- }
- explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &);
- flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
-CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
- CombinerType combiner = CombinerType_SUM)
-{
- EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
- builder_.add_combiner(combiner);
- return builder_.Finish();
-}
-
-struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct GatherOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); }
- explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherOptionsBuilder &operator=(const GatherOptionsBuilder &);
- flatbuffers::Offset<GatherOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- GatherOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TransposeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &);
- flatbuffers::Offset<TransposeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeOptions>
-CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TransposeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpOptionsBuilder &operator=(const ExpOptionsBuilder &);
- flatbuffers::Offset<ExpOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct CosOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CosOptionsBuilder &operator=(const CosOptionsBuilder &);
- flatbuffers::Offset<CosOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CosOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- CosOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_KEEP_DIMS = 4
- };
- bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) &&
- verifier.EndTable();
- }
-};
-
-struct ReducerOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_keep_dims(bool keep_dims)
- {
- fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
- }
- explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &);
- flatbuffers::Offset<ReducerOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReducerOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReducerOptions>
-CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false)
-{
- ReducerOptionsBuilder builder_(_fbb);
- builder_.add_keep_dims(keep_dims);
- return builder_.Finish();
-}
-
-struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SQUEEZE_DIMS = 4
- };
- const flatbuffers::Vector<int32_t> *squeeze_dims() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
- verifier.VerifyVector(squeeze_dims()) && verifier.EndTable();
- }
-};
-
-struct SqueezeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims)
- {
- fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
- }
- explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &);
- flatbuffers::Offset<SqueezeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SqueezeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0)
-{
- SqueezeOptionsBuilder builder_(_fbb);
- builder_.add_squeeze_dims(squeeze_dims);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *squeeze_dims = nullptr)
-{
- return circle::CreateSqueezeOptions(_fbb,
- squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0);
-}
-
-struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitOptionsBuilder &operator=(const SplitOptionsBuilder &);
- flatbuffers::Offset<SplitOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitVOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &);
- flatbuffers::Offset<SplitVOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitVOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitVOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BEGIN_MASK = 4,
- VT_END_MASK = 6,
- VT_ELLIPSIS_MASK = 8,
- VT_NEW_AXIS_MASK = 10,
- VT_SHRINK_AXIS_MASK = 12
- };
- int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); }
- int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); }
- int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); }
- int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); }
- int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) &&
- VerifyField<int32_t>(verifier, VT_END_MASK) &&
- VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable();
- }
-};
-
-struct StridedSliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_begin_mask(int32_t begin_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
- }
- void add_end_mask(int32_t end_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0);
- }
- void add_ellipsis_mask(int32_t ellipsis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0);
- }
- void add_new_axis_mask(int32_t new_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0);
- }
- void add_shrink_axis_mask(int32_t shrink_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0);
- }
- explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &);
- flatbuffers::Offset<StridedSliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<StridedSliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<StridedSliceOptions>
-CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0,
- int32_t end_mask = 0, int32_t ellipsis_mask = 0,
- int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0)
-{
- StridedSliceOptionsBuilder builder_(_fbb);
- builder_.add_shrink_axis_mask(shrink_axis_mask);
- builder_.add_new_axis_mask(new_axis_mask);
- builder_.add_ellipsis_mask(ellipsis_mask);
- builder_.add_end_mask(end_mask);
- builder_.add_begin_mask(begin_mask);
- return builder_.Finish();
-}
-
-struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogSoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &);
- flatbuffers::Offset<LogSoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogSoftmaxOptions>
-CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogSoftmaxOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IN_DATA_TYPE = 4,
- VT_OUT_DATA_TYPE = 6
- };
- TensorType in_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0));
- }
- TensorType out_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) &&
- VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable();
- }
-};
-
-struct CastOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_in_data_type(TensorType in_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
- }
- void add_out_data_type(TensorType out_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
- }
- explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CastOptionsBuilder &operator=(const CastOptionsBuilder &);
- flatbuffers::Offset<CastOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CastOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CastOptions>
-CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType in_data_type = TensorType_FLOAT32,
- TensorType out_data_type = TensorType_FLOAT32)
-{
- CastOptionsBuilder builder_(_fbb);
- builder_.add_out_data_type(out_data_type);
- builder_.add_in_data_type(in_data_type);
- return builder_.Finish();
-}
-
-struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DequantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &);
- flatbuffers::Offset<DequantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DequantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DequantizeOptions>
-CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DequantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MaximumMinimumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &);
- flatbuffers::Offset<MaximumMinimumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MaximumMinimumOptions>
-CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MaximumMinimumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TileOptionsBuilder &operator=(const TileOptionsBuilder &);
- flatbuffers::Offset<TileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TileOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &);
- flatbuffers::Offset<ArgMaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMaxOptions>
-CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMaxOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMinOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &);
- flatbuffers::Offset<ArgMinOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMinOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMinOptions>
-CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMinOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &);
- flatbuffers::Offset<GreaterOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterOptions>
-CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &);
- flatbuffers::Offset<GreaterEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterEqualOptions>
-CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessOptionsBuilder &operator=(const LessOptionsBuilder &);
- flatbuffers::Offset<LessOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &);
- flatbuffers::Offset<LessEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessEqualOptions>
-CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NegOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NegOptionsBuilder &operator=(const NegOptionsBuilder &);
- flatbuffers::Offset<NegOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NegOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NegOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectOptionsBuilder &operator=(const SelectOptionsBuilder &);
- flatbuffers::Offset<SelectOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SliceOptionsBuilder &operator=(const SliceOptionsBuilder &);
- flatbuffers::Offset<SliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SliceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable();
- }
-};
-
-struct TransposeConvOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0);
- }
- explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &);
- flatbuffers::Offset<TransposeConvOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConvOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeConvOptions>
-CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0)
-{
- TransposeConvOptionsBuilder builder_(_fbb);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpandDimsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &);
- flatbuffers::Offset<ExpandDimsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpandDimsOptions>
-CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpandDimsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALIDATE_INDICES = 4
- };
- bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
- verifier.EndTable();
- }
-};
-
-struct SparseToDenseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_validate_indices(bool validate_indices)
- {
- fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES,
- static_cast<uint8_t>(validate_indices), 0);
- }
- explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &);
- flatbuffers::Offset<SparseToDenseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparseToDenseOptions>
-CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false)
-{
- SparseToDenseOptionsBuilder builder_(_fbb);
- builder_.add_validate_indices(validate_indices);
- return builder_.Finish();
-}
-
-struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct EqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EqualOptionsBuilder &operator=(const EqualOptionsBuilder &);
- flatbuffers::Offset<EqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- EqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NotEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &);
- flatbuffers::Offset<NotEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NotEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NotEqualOptions>
-CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NotEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUT_TYPE = 4
- };
- TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ShapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_out_type(TensorType out_type)
- {
- fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
- }
- explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &);
- flatbuffers::Offset<ShapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ShapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ShapeOptions>
-CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32)
-{
- ShapeOptionsBuilder builder_(_fbb);
- builder_.add_out_type(out_type);
- return builder_.Finish();
-}
-
-struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RankOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RankOptionsBuilder &operator=(const RankOptionsBuilder &);
- flatbuffers::Offset<RankOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RankOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RankOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PowOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PowOptionsBuilder &operator=(const PowOptionsBuilder &);
- flatbuffers::Offset<PowOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PowOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PowOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_NUM_BITS = 8,
- VT_NARROW_RANGE = 10
- };
- float min() const { return GetField<float>(VT_MIN, 0.0f); }
- float max() const { return GetField<float>(VT_MAX, 0.0f); }
- int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); }
- bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) &&
- VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
- VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable();
- }
-};
-
-struct FakeQuantOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); }
- void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); }
- void add_num_bits(int32_t num_bits)
- {
- fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
- }
- void add_narrow_range(bool narrow_range)
- {
- fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range),
- 0);
- }
- explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &);
- flatbuffers::Offset<FakeQuantOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FakeQuantOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FakeQuantOptions>
-CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f,
- int32_t num_bits = 0, bool narrow_range = false)
-{
- FakeQuantOptionsBuilder builder_(_fbb);
- builder_.add_num_bits(num_bits);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_narrow_range(narrow_range);
- return builder_.Finish();
-}
-
-struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES_COUNT = 4,
- VT_AXIS = 6
- };
- int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct PackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values_count(int32_t values_count)
- {
- fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
- }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); }
- explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PackOptionsBuilder &operator=(const PackOptionsBuilder &);
- flatbuffers::Offset<PackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PackOptions>
-CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0)
-{
- PackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_values_count(values_count);
- return builder_.Finish();
-}
-
-struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalOrOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &);
- flatbuffers::Offset<LogicalOrOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalOrOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalOrOptions>
-CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalOrOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct OneHotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); }
- explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &);
- flatbuffers::Offset<OneHotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OneHotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- OneHotOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AbsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AbsOptionsBuilder &operator=(const AbsOptionsBuilder &);
- flatbuffers::Offset<AbsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AbsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AbsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct HardSwishOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &);
- flatbuffers::Offset<HardSwishOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<HardSwishOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<HardSwishOptions>
-CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- HardSwishOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalAndOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &);
- flatbuffers::Offset<LogicalAndOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalAndOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalAndOptions>
-CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalAndOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalNotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &);
- flatbuffers::Offset<LogicalNotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalNotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalNotOptions>
-CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalNotOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM = 4,
- VT_AXIS = 6
- };
- int32_t num() const { return GetField<int32_t>(VT_NUM, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct UnpackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); }
- explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &);
- flatbuffers::Offset<UnpackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnpackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num = 0, int32_t axis = 0)
-{
- UnpackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_num(num);
- return builder_.Finish();
-}
-
-struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorDivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &);
- flatbuffers::Offset<FloorDivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorDivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorDivOptions>
-CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorDivOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquareOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquareOptionsBuilder &operator=(const SquareOptionsBuilder &);
- flatbuffers::Offset<SquareOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquareOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquareOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ZerosLikeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &);
- flatbuffers::Offset<ZerosLikeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ZerosLikeOptions>
-CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ZerosLikeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FillOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FillOptionsBuilder &operator=(const FillOptionsBuilder &);
- flatbuffers::Offset<FillOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FillOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FillOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorModOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &);
- flatbuffers::Offset<FloorModOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorModOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorModOptions>
-CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorModOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RangeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RangeOptionsBuilder &operator=(const RangeOptionsBuilder &);
- flatbuffers::Offset<RangeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RangeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RangeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALPHA = 4
- };
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) &&
- verifier.EndTable();
- }
-};
-
-struct LeakyReluOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); }
- explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &);
- flatbuffers::Offset<LeakyReluOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LeakyReluOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LeakyReluOptions>
-CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f)
-{
- LeakyReluOptionsBuilder builder_(_fbb);
- builder_.add_alpha(alpha);
- return builder_.Finish();
-}
-
-struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquaredDifferenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &);
- flatbuffers::Offset<SquaredDifferenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquaredDifferenceOptions>
-CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquaredDifferenceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MODE = 4
- };
- MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) &&
- verifier.EndTable();
- }
-};
-
-struct MirrorPadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_mode(MirrorPadMode mode)
- {
- fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
- }
- explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &);
- flatbuffers::Offset<MirrorPadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MirrorPadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MirrorPadOptions>
-CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb,
- MirrorPadMode mode = MirrorPadMode_REFLECT)
-{
- MirrorPadOptionsBuilder builder_(_fbb);
- builder_.add_mode(mode);
- return builder_.Finish();
-}
-
-struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IDX_OUT_TYPE = 4
- };
- TensorType idx_out_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct UniqueOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_idx_out_type(TensorType idx_out_type)
- {
- fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2);
- }
- explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &);
- flatbuffers::Offset<UniqueOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UniqueOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UniqueOptions>
-CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType idx_out_type = TensorType_INT32)
-{
- UniqueOptionsBuilder builder_(_fbb);
- builder_.add_idx_out_type(idx_out_type);
- return builder_.Finish();
-}
-
-struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ReverseV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &);
- flatbuffers::Offset<ReverseV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReverseV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReverseV2Options>
-CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ReverseV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AddNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddNOptionsBuilder &operator=(const AddNOptionsBuilder &);
- flatbuffers::Offset<AddNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AddNOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GatherNdOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &);
- flatbuffers::Offset<GatherNdOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherNdOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherNdOptions>
-CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GatherNdOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct WhereOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- WhereOptionsBuilder &operator=(const WhereOptionsBuilder &);
- flatbuffers::Offset<WhereOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhereOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- WhereOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SEQ_DIM = 4,
- VT_BATCH_DIM = 6
- };
- int32_t seq_dim() const { return GetField<int32_t>(VT_SEQ_DIM, 0); }
- int32_t batch_dim() const { return GetField<int32_t>(VT_BATCH_DIM, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEQ_DIM) &&
- VerifyField<int32_t>(verifier, VT_BATCH_DIM) && verifier.EndTable();
- }
-};
-
-struct ReverseSequenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_seq_dim(int32_t seq_dim)
- {
- fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0);
- }
- void add_batch_dim(int32_t batch_dim)
- {
- fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0);
- }
- explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &);
- flatbuffers::Offset<ReverseSequenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReverseSequenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReverseSequenceOptions>
-CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t seq_dim = 0,
- int32_t batch_dim = 0)
-{
- ReverseSequenceOptionsBuilder builder_(_fbb);
- builder_.add_batch_dim(batch_dim);
- builder_.add_seq_dim(seq_dim);
- return builder_.Finish();
-}
-
-struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MatrixDiagOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &);
- flatbuffers::Offset<MatrixDiagOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatrixDiagOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MatrixDiagOptions>
-CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MatrixDiagOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct QuantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &);
- flatbuffers::Offset<QuantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizeOptions>
-CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- QuantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MatrixSetDiagOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &);
- flatbuffers::Offset<MatrixSetDiagOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MatrixSetDiagOptions>
-CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MatrixSetDiagOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_THEN_SUBGRAPH_INDEX = 4,
- VT_ELSE_SUBGRAPH_INDEX = 6
- };
- int32_t then_subgraph_index() const { return GetField<int32_t>(VT_THEN_SUBGRAPH_INDEX, 0); }
- int32_t else_subgraph_index() const { return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX) &&
- VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX) && verifier.EndTable();
- }
-};
-
-struct IfOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_then_subgraph_index(int32_t then_subgraph_index)
- {
- fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0);
- }
- void add_else_subgraph_index(int32_t else_subgraph_index)
- {
- fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0);
- }
- explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- IfOptionsBuilder &operator=(const IfOptionsBuilder &);
- flatbuffers::Offset<IfOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<IfOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t then_subgraph_index = 0,
- int32_t else_subgraph_index = 0)
-{
- IfOptionsBuilder builder_(_fbb);
- builder_.add_else_subgraph_index(else_subgraph_index);
- builder_.add_then_subgraph_index(then_subgraph_index);
- return builder_.Finish();
-}
-
-struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COND_SUBGRAPH_INDEX = 4,
- VT_BODY_SUBGRAPH_INDEX = 6
- };
- int32_t cond_subgraph_index() const { return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0); }
- int32_t body_subgraph_index() const { return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX) &&
- VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX) && verifier.EndTable();
- }
-};
-
-struct WhileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_cond_subgraph_index(int32_t cond_subgraph_index)
- {
- fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0);
- }
- void add_body_subgraph_index(int32_t body_subgraph_index)
- {
- fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
- }
- explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- WhileOptionsBuilder &operator=(const WhileOptionsBuilder &);
- flatbuffers::Offset<WhileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t cond_subgraph_index = 0,
- int32_t body_subgraph_index = 0)
-{
- WhileOptionsBuilder builder_(_fbb);
- builder_.add_body_subgraph_index(body_subgraph_index);
- builder_.add_cond_subgraph_index(cond_subgraph_index);
- return builder_.Finish();
-}
-
-struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NonMaxSuppressionV4OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &);
- flatbuffers::Offset<NonMaxSuppressionV4Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NonMaxSuppressionV4Options>
-CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NonMaxSuppressionV4OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NonMaxSuppressionV5OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &);
- flatbuffers::Offset<NonMaxSuppressionV5Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NonMaxSuppressionV5Options>
-CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NonMaxSuppressionV5OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ScatterNdOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &);
- flatbuffers::Offset<ScatterNdOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ScatterNdOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ScatterNdOptions>
-CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ScatterNdOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &);
- flatbuffers::Offset<SelectV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectV2Options>
-CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DensifyOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &);
- flatbuffers::Offset<DensifyOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DensifyOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DensifyOptions>
-CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DensifyOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SegmentSumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &);
- flatbuffers::Offset<SegmentSumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SegmentSumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SegmentSumOptions>
-CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SegmentSumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ADJOINT_LHS = 4,
- VT_ADJOINT_RHS = 6
- };
- bool adjoint_lhs() const { return GetField<uint8_t>(VT_ADJOINT_LHS, 0) != 0; }
- bool adjoint_rhs() const { return GetField<uint8_t>(VT_ADJOINT_RHS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ADJOINT_LHS) &&
- VerifyField<uint8_t>(verifier, VT_ADJOINT_RHS) && verifier.EndTable();
- }
-};
-
-struct BatchMatMulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_adjoint_lhs(bool adjoint_lhs)
- {
- fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_LHS, static_cast<uint8_t>(adjoint_lhs),
- 0);
- }
- void add_adjoint_rhs(bool adjoint_rhs)
- {
- fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_RHS, static_cast<uint8_t>(adjoint_rhs),
- 0);
- }
- explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &);
- flatbuffers::Offset<BatchMatMulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchMatMulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchMatMulOptions>
-CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, bool adjoint_lhs = false,
- bool adjoint_rhs = false)
-{
- BatchMatMulOptionsBuilder builder_(_fbb);
- builder_.add_adjoint_rhs(adjoint_rhs);
- builder_.add_adjoint_lhs(adjoint_lhs);
- return builder_.Finish();
-}
-
-struct BCQGatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_INPUT_HIDDEN_SIZE = 4,
- VT_AXIS = 6
- };
- int32_t input_hidden_size() const { return GetField<int32_t>(VT_INPUT_HIDDEN_SIZE, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_INPUT_HIDDEN_SIZE) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct BCQGatherOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_input_hidden_size(int32_t input_hidden_size)
- {
- fbb_.AddElement<int32_t>(BCQGatherOptions::VT_INPUT_HIDDEN_SIZE, input_hidden_size, 0);
- }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(BCQGatherOptions::VT_AXIS, axis, 0); }
- explicit BCQGatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BCQGatherOptionsBuilder &operator=(const BCQGatherOptionsBuilder &);
- flatbuffers::Offset<BCQGatherOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BCQGatherOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BCQGatherOptions>
-CreateBCQGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t input_hidden_size = 0,
- int32_t axis = 0)
-{
- BCQGatherOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_input_hidden_size(input_hidden_size);
- return builder_.Finish();
-}
-
-struct BCQFullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_WEIGHTS_HIDDEN_SIZE = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t weights_hidden_size() const { return GetField<int32_t>(VT_WEIGHTS_HIDDEN_SIZE, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_WEIGHTS_HIDDEN_SIZE) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct BCQFullyConnectedOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_weights_hidden_size(int32_t weights_hidden_size)
- {
- fbb_.AddElement<int32_t>(BCQFullyConnectedOptions::VT_WEIGHTS_HIDDEN_SIZE, weights_hidden_size,
- 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BCQFullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit BCQFullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BCQFullyConnectedOptionsBuilder &operator=(const BCQFullyConnectedOptionsBuilder &);
- flatbuffers::Offset<BCQFullyConnectedOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BCQFullyConnectedOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BCQFullyConnectedOptions> CreateBCQFullyConnectedOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t weights_hidden_size = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- BCQFullyConnectedOptionsBuilder builder_(_fbb);
- builder_.add_weights_hidden_size(weights_hidden_size);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct InstanceNormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_EPSILON = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- float epsilon() const { return GetField<float>(VT_EPSILON, 0.0f); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_EPSILON) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct InstanceNormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_epsilon(float epsilon)
- {
- fbb_.AddElement<float>(InstanceNormOptions::VT_EPSILON, epsilon, 0.0f);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(InstanceNormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit InstanceNormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- InstanceNormOptionsBuilder &operator=(const InstanceNormOptionsBuilder &);
- flatbuffers::Offset<InstanceNormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<InstanceNormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<InstanceNormOptions> CreateInstanceNormOptions(
- flatbuffers::FlatBufferBuilder &_fbb, float epsilon = 0.0f,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- InstanceNormOptionsBuilder builder_(_fbb);
- builder_.add_epsilon(epsilon);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BUILTIN_CODE = 4,
- VT_CUSTOM_CODE = 6,
- VT_VERSION = 8
- };
- BuiltinOperator builtin_code() const
- {
- return static_cast<BuiltinOperator>(GetField<uint8_t>(VT_BUILTIN_CODE, 0));
- }
- const flatbuffers::String *custom_code() const
- {
- return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
- }
- int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_BUILTIN_CODE) &&
- VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) &&
- VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable();
- }
-};
-
-struct OperatorCodeBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_builtin_code(BuiltinOperator builtin_code)
- {
- fbb_.AddElement<uint8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<uint8_t>(builtin_code), 0);
- }
- void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code)
- {
- fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
- }
- void add_version(int32_t version)
- {
- fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1);
- }
- explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorCodeBuilder &operator=(const OperatorCodeBuilder &);
- flatbuffers::Offset<OperatorCode> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OperatorCode>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1)
-{
- OperatorCodeBuilder builder_(_fbb);
- builder_.add_version(version);
- builder_.add_custom_code(custom_code);
- builder_.add_builtin_code(builtin_code);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- const char *custom_code = nullptr, int32_t version = 1)
-{
- return circle::CreateOperatorCode(_fbb, builtin_code,
- custom_code ? _fbb.CreateString(custom_code) : 0, version);
-}
-
-struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OPCODE_INDEX = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_BUILTIN_OPTIONS_TYPE = 10,
- VT_BUILTIN_OPTIONS = 12,
- VT_CUSTOM_OPTIONS = 14,
- VT_CUSTOM_OPTIONS_FORMAT = 16,
- VT_MUTATING_VARIABLE_INPUTS = 18,
- VT_INTERMEDIATES = 20
- };
- uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- BuiltinOptions builtin_options_type() const
- {
- return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
- }
- const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); }
- template <typename T> const T *builtin_options_as() const;
- const Conv2DOptions *builtin_options_as_Conv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Conv2DOptions
- ? static_cast<const Conv2DOptions *>(builtin_options())
- : nullptr;
- }
- const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions
- ? static_cast<const DepthwiseConv2DOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions
- ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options())
- : nullptr;
- }
- const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSHProjectionOptions
- ? static_cast<const LSHProjectionOptions *>(builtin_options())
- : nullptr;
- }
- const Pool2DOptions *builtin_options_as_Pool2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Pool2DOptions
- ? static_cast<const Pool2DOptions *>(builtin_options())
- : nullptr;
- }
- const SVDFOptions *builtin_options_as_SVDFOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SVDFOptions
- ? static_cast<const SVDFOptions *>(builtin_options())
- : nullptr;
- }
- const RNNOptions *builtin_options_as_RNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RNNOptions
- ? static_cast<const RNNOptions *>(builtin_options())
- : nullptr;
- }
- const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FullyConnectedOptions
- ? static_cast<const FullyConnectedOptions *>(builtin_options())
- : nullptr;
- }
- const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SoftmaxOptions
- ? static_cast<const SoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatenationOptions
- ? static_cast<const ConcatenationOptions *>(builtin_options())
- : nullptr;
- }
- const AddOptions *builtin_options_as_AddOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddOptions
- ? static_cast<const AddOptions *>(builtin_options())
- : nullptr;
- }
- const L2NormOptions *builtin_options_as_L2NormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_L2NormOptions
- ? static_cast<const L2NormOptions *>(builtin_options())
- : nullptr;
- }
- const LocalResponseNormalizationOptions *
- builtin_options_as_LocalResponseNormalizationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions
- ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options())
- : nullptr;
- }
- const LSTMOptions *builtin_options_as_LSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSTMOptions
- ? static_cast<const LSTMOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions
- ? static_cast<const ResizeBilinearOptions *>(builtin_options())
- : nullptr;
- }
- const CallOptions *builtin_options_as_CallOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CallOptions
- ? static_cast<const CallOptions *>(builtin_options())
- : nullptr;
- }
- const ReshapeOptions *builtin_options_as_ReshapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReshapeOptions
- ? static_cast<const ReshapeOptions *>(builtin_options())
- : nullptr;
- }
- const SkipGramOptions *builtin_options_as_SkipGramOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SkipGramOptions
- ? static_cast<const SkipGramOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions
- ? static_cast<const SpaceToDepthOptions *>(builtin_options())
- : nullptr;
- }
- const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions
- ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options())
- : nullptr;
- }
- const MulOptions *builtin_options_as_MulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MulOptions
- ? static_cast<const MulOptions *>(builtin_options())
- : nullptr;
- }
- const PadOptions *builtin_options_as_PadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PadOptions
- ? static_cast<const PadOptions *>(builtin_options())
- : nullptr;
- }
- const GatherOptions *builtin_options_as_GatherOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherOptions
- ? static_cast<const GatherOptions *>(builtin_options())
- : nullptr;
- }
- const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions
- ? static_cast<const BatchToSpaceNDOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions
- ? static_cast<const SpaceToBatchNDOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeOptions *builtin_options_as_TransposeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeOptions
- ? static_cast<const TransposeOptions *>(builtin_options())
- : nullptr;
- }
- const ReducerOptions *builtin_options_as_ReducerOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReducerOptions
- ? static_cast<const ReducerOptions *>(builtin_options())
- : nullptr;
- }
- const SubOptions *builtin_options_as_SubOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SubOptions
- ? static_cast<const SubOptions *>(builtin_options())
- : nullptr;
- }
- const DivOptions *builtin_options_as_DivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DivOptions
- ? static_cast<const DivOptions *>(builtin_options())
- : nullptr;
- }
- const SqueezeOptions *builtin_options_as_SqueezeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SqueezeOptions
- ? static_cast<const SqueezeOptions *>(builtin_options())
- : nullptr;
- }
- const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SequenceRNNOptions
- ? static_cast<const SequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_StridedSliceOptions
- ? static_cast<const StridedSliceOptions *>(builtin_options())
- : nullptr;
- }
- const ExpOptions *builtin_options_as_ExpOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpOptions
- ? static_cast<const ExpOptions *>(builtin_options())
- : nullptr;
- }
- const TopKV2Options *builtin_options_as_TopKV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_TopKV2Options
- ? static_cast<const TopKV2Options *>(builtin_options())
- : nullptr;
- }
- const SplitOptions *builtin_options_as_SplitOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitOptions
- ? static_cast<const SplitOptions *>(builtin_options())
- : nullptr;
- }
- const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions
- ? static_cast<const LogSoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const CastOptions *builtin_options_as_CastOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CastOptions
- ? static_cast<const CastOptions *>(builtin_options())
- : nullptr;
- }
- const DequantizeOptions *builtin_options_as_DequantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DequantizeOptions
- ? static_cast<const DequantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions
- ? static_cast<const MaximumMinimumOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMaxOptions
- ? static_cast<const ArgMaxOptions *>(builtin_options())
- : nullptr;
- }
- const LessOptions *builtin_options_as_LessOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessOptions
- ? static_cast<const LessOptions *>(builtin_options())
- : nullptr;
- }
- const NegOptions *builtin_options_as_NegOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NegOptions
- ? static_cast<const NegOptions *>(builtin_options())
- : nullptr;
- }
- const PadV2Options *builtin_options_as_PadV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_PadV2Options
- ? static_cast<const PadV2Options *>(builtin_options())
- : nullptr;
- }
- const GreaterOptions *builtin_options_as_GreaterOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterOptions
- ? static_cast<const GreaterOptions *>(builtin_options())
- : nullptr;
- }
- const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterEqualOptions
- ? static_cast<const GreaterEqualOptions *>(builtin_options())
- : nullptr;
- }
- const LessEqualOptions *builtin_options_as_LessEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessEqualOptions
- ? static_cast<const LessEqualOptions *>(builtin_options())
- : nullptr;
- }
- const SelectOptions *builtin_options_as_SelectOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SelectOptions
- ? static_cast<const SelectOptions *>(builtin_options())
- : nullptr;
- }
- const SliceOptions *builtin_options_as_SliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SliceOptions
- ? static_cast<const SliceOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeConvOptions
- ? static_cast<const TransposeConvOptions *>(builtin_options())
- : nullptr;
- }
- const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SparseToDenseOptions
- ? static_cast<const SparseToDenseOptions *>(builtin_options())
- : nullptr;
- }
- const TileOptions *builtin_options_as_TileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TileOptions
- ? static_cast<const TileOptions *>(builtin_options())
- : nullptr;
- }
- const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpandDimsOptions
- ? static_cast<const ExpandDimsOptions *>(builtin_options())
- : nullptr;
- }
- const EqualOptions *builtin_options_as_EqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EqualOptions
- ? static_cast<const EqualOptions *>(builtin_options())
- : nullptr;
- }
- const NotEqualOptions *builtin_options_as_NotEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NotEqualOptions
- ? static_cast<const NotEqualOptions *>(builtin_options())
- : nullptr;
- }
- const ShapeOptions *builtin_options_as_ShapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ShapeOptions
- ? static_cast<const ShapeOptions *>(builtin_options())
- : nullptr;
- }
- const PowOptions *builtin_options_as_PowOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PowOptions
- ? static_cast<const PowOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMinOptions *builtin_options_as_ArgMinOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMinOptions
- ? static_cast<const ArgMinOptions *>(builtin_options())
- : nullptr;
- }
- const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FakeQuantOptions
- ? static_cast<const FakeQuantOptions *>(builtin_options())
- : nullptr;
- }
- const PackOptions *builtin_options_as_PackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PackOptions
- ? static_cast<const PackOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalOrOptions
- ? static_cast<const LogicalOrOptions *>(builtin_options())
- : nullptr;
- }
- const OneHotOptions *builtin_options_as_OneHotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_OneHotOptions
- ? static_cast<const OneHotOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalAndOptions
- ? static_cast<const LogicalAndOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalNotOptions
- ? static_cast<const LogicalNotOptions *>(builtin_options())
- : nullptr;
- }
- const UnpackOptions *builtin_options_as_UnpackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnpackOptions
- ? static_cast<const UnpackOptions *>(builtin_options())
- : nullptr;
- }
- const FloorDivOptions *builtin_options_as_FloorDivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorDivOptions
- ? static_cast<const FloorDivOptions *>(builtin_options())
- : nullptr;
- }
- const SquareOptions *builtin_options_as_SquareOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquareOptions
- ? static_cast<const SquareOptions *>(builtin_options())
- : nullptr;
- }
- const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ZerosLikeOptions
- ? static_cast<const ZerosLikeOptions *>(builtin_options())
- : nullptr;
- }
- const FillOptions *builtin_options_as_FillOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FillOptions
- ? static_cast<const FillOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceLSTMOptions *
- builtin_options_as_BidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions
- ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions
- ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const UnidirectionalSequenceLSTMOptions *
- builtin_options_as_UnidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions
- ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const FloorModOptions *builtin_options_as_FloorModOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorModOptions
- ? static_cast<const FloorModOptions *>(builtin_options())
- : nullptr;
- }
- const RangeOptions *builtin_options_as_RangeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RangeOptions
- ? static_cast<const RangeOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions
- ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options())
- : nullptr;
- }
- const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LeakyReluOptions
- ? static_cast<const LeakyReluOptions *>(builtin_options())
- : nullptr;
- }
- const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions
- ? static_cast<const SquaredDifferenceOptions *>(builtin_options())
- : nullptr;
- }
- const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MirrorPadOptions
- ? static_cast<const MirrorPadOptions *>(builtin_options())
- : nullptr;
- }
- const AbsOptions *builtin_options_as_AbsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AbsOptions
- ? static_cast<const AbsOptions *>(builtin_options())
- : nullptr;
- }
- const SplitVOptions *builtin_options_as_SplitVOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitVOptions
- ? static_cast<const SplitVOptions *>(builtin_options())
- : nullptr;
- }
- const UniqueOptions *builtin_options_as_UniqueOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UniqueOptions
- ? static_cast<const UniqueOptions *>(builtin_options())
- : nullptr;
- }
- const ReverseV2Options *builtin_options_as_ReverseV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_ReverseV2Options
- ? static_cast<const ReverseV2Options *>(builtin_options())
- : nullptr;
- }
- const AddNOptions *builtin_options_as_AddNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddNOptions
- ? static_cast<const AddNOptions *>(builtin_options())
- : nullptr;
- }
- const GatherNdOptions *builtin_options_as_GatherNdOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherNdOptions
- ? static_cast<const GatherNdOptions *>(builtin_options())
- : nullptr;
- }
- const CosOptions *builtin_options_as_CosOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CosOptions
- ? static_cast<const CosOptions *>(builtin_options())
- : nullptr;
- }
- const WhereOptions *builtin_options_as_WhereOptions() const
- {
- return builtin_options_type() == BuiltinOptions_WhereOptions
- ? static_cast<const WhereOptions *>(builtin_options())
- : nullptr;
- }
- const RankOptions *builtin_options_as_RankOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RankOptions
- ? static_cast<const RankOptions *>(builtin_options())
- : nullptr;
- }
- const ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReverseSequenceOptions
- ? static_cast<const ReverseSequenceOptions *>(builtin_options())
- : nullptr;
- }
- const MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MatrixDiagOptions
- ? static_cast<const MatrixDiagOptions *>(builtin_options())
- : nullptr;
- }
- const QuantizeOptions *builtin_options_as_QuantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_QuantizeOptions
- ? static_cast<const QuantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MatrixSetDiagOptions
- ? static_cast<const MatrixSetDiagOptions *>(builtin_options())
- : nullptr;
- }
- const HardSwishOptions *builtin_options_as_HardSwishOptions() const
- {
- return builtin_options_type() == BuiltinOptions_HardSwishOptions
- ? static_cast<const HardSwishOptions *>(builtin_options())
- : nullptr;
- }
- const IfOptions *builtin_options_as_IfOptions() const
- {
- return builtin_options_type() == BuiltinOptions_IfOptions
- ? static_cast<const IfOptions *>(builtin_options())
- : nullptr;
- }
- const WhileOptions *builtin_options_as_WhileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_WhileOptions
- ? static_cast<const WhileOptions *>(builtin_options())
- : nullptr;
- }
- const DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthToSpaceOptions
- ? static_cast<const DepthToSpaceOptions *>(builtin_options())
- : nullptr;
- }
- const NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const
- {
- return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV4Options
- ? static_cast<const NonMaxSuppressionV4Options *>(builtin_options())
- : nullptr;
- }
- const NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const
- {
- return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV5Options
- ? static_cast<const NonMaxSuppressionV5Options *>(builtin_options())
- : nullptr;
- }
- const ScatterNdOptions *builtin_options_as_ScatterNdOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ScatterNdOptions
- ? static_cast<const ScatterNdOptions *>(builtin_options())
- : nullptr;
- }
- const SelectV2Options *builtin_options_as_SelectV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_SelectV2Options
- ? static_cast<const SelectV2Options *>(builtin_options())
- : nullptr;
- }
- const DensifyOptions *builtin_options_as_DensifyOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DensifyOptions
- ? static_cast<const DensifyOptions *>(builtin_options())
- : nullptr;
- }
- const SegmentSumOptions *builtin_options_as_SegmentSumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SegmentSumOptions
- ? static_cast<const SegmentSumOptions *>(builtin_options())
- : nullptr;
- }
- const BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchMatMulOptions
- ? static_cast<const BatchMatMulOptions *>(builtin_options())
- : nullptr;
- }
- const BCQGatherOptions *builtin_options_as_BCQGatherOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BCQGatherOptions
- ? static_cast<const BCQGatherOptions *>(builtin_options())
- : nullptr;
- }
- const BCQFullyConnectedOptions *builtin_options_as_BCQFullyConnectedOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BCQFullyConnectedOptions
- ? static_cast<const BCQFullyConnectedOptions *>(builtin_options())
- : nullptr;
- }
- const InstanceNormOptions *builtin_options_as_InstanceNormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_InstanceNormOptions
- ? static_cast<const InstanceNormOptions *>(builtin_options())
- : nullptr;
- }
- const flatbuffers::Vector<uint8_t> *custom_options() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
- }
- CustomOptionsFormat custom_options_format() const
- {
- return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
- }
- const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *intermediates() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
- VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
- VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
- VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) &&
- VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
- VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) &&
- verifier.VerifyVector(mutating_variable_inputs()) &&
- VerifyOffset(verifier, VT_INTERMEDIATES) && verifier.VerifyVector(intermediates()) &&
- verifier.EndTable();
- }
-};
-
-template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const
-{
- return builtin_options_as_Conv2DOptions();
-}
-
-template <>
-inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const
-{
- return builtin_options_as_DepthwiseConv2DOptions();
-}
-
-template <>
-inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const
-{
- return builtin_options_as_ConcatEmbeddingsOptions();
-}
-
-template <>
-inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const
-{
- return builtin_options_as_LSHProjectionOptions();
-}
-
-template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const
-{
- return builtin_options_as_Pool2DOptions();
-}
-
-template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const
-{
- return builtin_options_as_SVDFOptions();
-}
-
-template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const
-{
- return builtin_options_as_RNNOptions();
-}
-
-template <>
-inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const
-{
- return builtin_options_as_FullyConnectedOptions();
-}
-
-template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const
-{
- return builtin_options_as_SoftmaxOptions();
-}
-
-template <>
-inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const
-{
- return builtin_options_as_ConcatenationOptions();
-}
-
-template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const
-{
- return builtin_options_as_AddOptions();
-}
-
-template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const
-{
- return builtin_options_as_L2NormOptions();
-}
-
-template <>
-inline const LocalResponseNormalizationOptions *
-Operator::builtin_options_as<LocalResponseNormalizationOptions>() const
-{
- return builtin_options_as_LocalResponseNormalizationOptions();
-}
-
-template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const
-{
- return builtin_options_as_LSTMOptions();
-}
-
-template <>
-inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const
-{
- return builtin_options_as_ResizeBilinearOptions();
-}
-
-template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const
-{
- return builtin_options_as_CallOptions();
-}
-
-template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const
-{
- return builtin_options_as_ReshapeOptions();
-}
-
-template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const
-{
- return builtin_options_as_SkipGramOptions();
-}
-
-template <>
-inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const
-{
- return builtin_options_as_SpaceToDepthOptions();
-}
-
-template <>
-inline const EmbeddingLookupSparseOptions *
-Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const
-{
- return builtin_options_as_EmbeddingLookupSparseOptions();
-}
-
-template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const
-{
- return builtin_options_as_MulOptions();
-}
-
-template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const
-{
- return builtin_options_as_PadOptions();
-}
-
-template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const
-{
- return builtin_options_as_GatherOptions();
-}
-
-template <>
-inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const
-{
- return builtin_options_as_BatchToSpaceNDOptions();
-}
-
-template <>
-inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const
-{
- return builtin_options_as_SpaceToBatchNDOptions();
-}
-
-template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const
-{
- return builtin_options_as_TransposeOptions();
-}
-
-template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const
-{
- return builtin_options_as_ReducerOptions();
-}
-
-template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const
-{
- return builtin_options_as_SubOptions();
-}
-
-template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const
-{
- return builtin_options_as_DivOptions();
-}
-
-template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const
-{
- return builtin_options_as_SqueezeOptions();
-}
-
-template <>
-inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const
-{
- return builtin_options_as_SequenceRNNOptions();
-}
-
-template <>
-inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const
-{
- return builtin_options_as_StridedSliceOptions();
-}
-
-template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const
-{
- return builtin_options_as_ExpOptions();
-}
-
-template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const
-{
- return builtin_options_as_TopKV2Options();
-}
-
-template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const
-{
- return builtin_options_as_SplitOptions();
-}
-
-template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const
-{
- return builtin_options_as_LogSoftmaxOptions();
-}
-
-template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const
-{
- return builtin_options_as_CastOptions();
-}
-
-template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const
-{
- return builtin_options_as_DequantizeOptions();
-}
-
-template <>
-inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const
-{
- return builtin_options_as_MaximumMinimumOptions();
-}
-
-template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const
-{
- return builtin_options_as_ArgMaxOptions();
-}
-
-template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const
-{
- return builtin_options_as_LessOptions();
-}
-
-template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const
-{
- return builtin_options_as_NegOptions();
-}
-
-template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const
-{
- return builtin_options_as_PadV2Options();
-}
-
-template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const
-{
- return builtin_options_as_GreaterOptions();
-}
-
-template <>
-inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const
-{
- return builtin_options_as_GreaterEqualOptions();
-}
-
-template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const
-{
- return builtin_options_as_LessEqualOptions();
-}
-
-template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const
-{
- return builtin_options_as_SelectOptions();
-}
-
-template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const
-{
- return builtin_options_as_SliceOptions();
-}
-
-template <>
-inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const
-{
- return builtin_options_as_TransposeConvOptions();
-}
-
-template <>
-inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const
-{
- return builtin_options_as_SparseToDenseOptions();
-}
-
-template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const
-{
- return builtin_options_as_TileOptions();
-}
-
-template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const
-{
- return builtin_options_as_ExpandDimsOptions();
-}
-
-template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const
-{
- return builtin_options_as_EqualOptions();
-}
-
-template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const
-{
- return builtin_options_as_NotEqualOptions();
-}
-
-template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const
-{
- return builtin_options_as_ShapeOptions();
-}
-
-template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const
-{
- return builtin_options_as_PowOptions();
-}
-
-template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const
-{
- return builtin_options_as_ArgMinOptions();
-}
-
-template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const
-{
- return builtin_options_as_FakeQuantOptions();
-}
-
-template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const
-{
- return builtin_options_as_PackOptions();
-}
-
-template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const
-{
- return builtin_options_as_LogicalOrOptions();
-}
-
-template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const
-{
- return builtin_options_as_OneHotOptions();
-}
-
-template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const
-{
- return builtin_options_as_LogicalAndOptions();
-}
-
-template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const
-{
- return builtin_options_as_LogicalNotOptions();
-}
-
-template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const
-{
- return builtin_options_as_UnpackOptions();
-}
-
-template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const
-{
- return builtin_options_as_FloorDivOptions();
-}
-
-template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const
-{
- return builtin_options_as_SquareOptions();
-}
-
-template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const
-{
- return builtin_options_as_ZerosLikeOptions();
-}
-
-template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const
-{
- return builtin_options_as_FillOptions();
-}
-
-template <>
-inline const BidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceLSTMOptions();
-}
-
-template <>
-inline const BidirectionalSequenceRNNOptions *
-Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceRNNOptions();
-}
-
-template <>
-inline const UnidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_UnidirectionalSequenceLSTMOptions();
-}
-
-template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const
-{
- return builtin_options_as_FloorModOptions();
-}
-
-template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const
-{
- return builtin_options_as_RangeOptions();
-}
-
-template <>
-inline const ResizeNearestNeighborOptions *
-Operator::builtin_options_as<ResizeNearestNeighborOptions>() const
-{
- return builtin_options_as_ResizeNearestNeighborOptions();
-}
-
-template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const
-{
- return builtin_options_as_LeakyReluOptions();
-}
-
-template <>
-inline const SquaredDifferenceOptions *
-Operator::builtin_options_as<SquaredDifferenceOptions>() const
-{
- return builtin_options_as_SquaredDifferenceOptions();
-}
-
-template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const
-{
- return builtin_options_as_MirrorPadOptions();
-}
-
-template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const
-{
- return builtin_options_as_AbsOptions();
-}
-
-template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const
-{
- return builtin_options_as_SplitVOptions();
-}
-
-template <> inline const UniqueOptions *Operator::builtin_options_as<UniqueOptions>() const
-{
- return builtin_options_as_UniqueOptions();
-}
-
-template <> inline const ReverseV2Options *Operator::builtin_options_as<ReverseV2Options>() const
-{
- return builtin_options_as_ReverseV2Options();
-}
-
-template <> inline const AddNOptions *Operator::builtin_options_as<AddNOptions>() const
-{
- return builtin_options_as_AddNOptions();
-}
-
-template <> inline const GatherNdOptions *Operator::builtin_options_as<GatherNdOptions>() const
-{
- return builtin_options_as_GatherNdOptions();
-}
-
-template <> inline const CosOptions *Operator::builtin_options_as<CosOptions>() const
-{
- return builtin_options_as_CosOptions();
-}
-
-template <> inline const WhereOptions *Operator::builtin_options_as<WhereOptions>() const
-{
- return builtin_options_as_WhereOptions();
-}
-
-template <> inline const RankOptions *Operator::builtin_options_as<RankOptions>() const
-{
- return builtin_options_as_RankOptions();
-}
-
-template <>
-inline const ReverseSequenceOptions *Operator::builtin_options_as<ReverseSequenceOptions>() const
-{
- return builtin_options_as_ReverseSequenceOptions();
-}
-
-template <> inline const MatrixDiagOptions *Operator::builtin_options_as<MatrixDiagOptions>() const
-{
- return builtin_options_as_MatrixDiagOptions();
-}
-
-template <> inline const QuantizeOptions *Operator::builtin_options_as<QuantizeOptions>() const
-{
- return builtin_options_as_QuantizeOptions();
-}
-
-template <>
-inline const MatrixSetDiagOptions *Operator::builtin_options_as<MatrixSetDiagOptions>() const
-{
- return builtin_options_as_MatrixSetDiagOptions();
-}
-
-template <> inline const HardSwishOptions *Operator::builtin_options_as<HardSwishOptions>() const
-{
- return builtin_options_as_HardSwishOptions();
-}
-
-template <> inline const IfOptions *Operator::builtin_options_as<IfOptions>() const
-{
- return builtin_options_as_IfOptions();
-}
-
-template <> inline const WhileOptions *Operator::builtin_options_as<WhileOptions>() const
-{
- return builtin_options_as_WhileOptions();
-}
-
-template <>
-inline const DepthToSpaceOptions *Operator::builtin_options_as<DepthToSpaceOptions>() const
-{
- return builtin_options_as_DepthToSpaceOptions();
-}
-
-template <>
-inline const NonMaxSuppressionV4Options *
-Operator::builtin_options_as<NonMaxSuppressionV4Options>() const
-{
- return builtin_options_as_NonMaxSuppressionV4Options();
-}
-
-template <>
-inline const NonMaxSuppressionV5Options *
-Operator::builtin_options_as<NonMaxSuppressionV5Options>() const
-{
- return builtin_options_as_NonMaxSuppressionV5Options();
-}
-
-template <> inline const ScatterNdOptions *Operator::builtin_options_as<ScatterNdOptions>() const
-{
- return builtin_options_as_ScatterNdOptions();
-}
-
-template <> inline const SelectV2Options *Operator::builtin_options_as<SelectV2Options>() const
-{
- return builtin_options_as_SelectV2Options();
-}
-
-template <> inline const DensifyOptions *Operator::builtin_options_as<DensifyOptions>() const
-{
- return builtin_options_as_DensifyOptions();
-}
-
-template <> inline const SegmentSumOptions *Operator::builtin_options_as<SegmentSumOptions>() const
-{
- return builtin_options_as_SegmentSumOptions();
-}
-
-template <>
-inline const BatchMatMulOptions *Operator::builtin_options_as<BatchMatMulOptions>() const
-{
- return builtin_options_as_BatchMatMulOptions();
-}
-
-template <> inline const BCQGatherOptions *Operator::builtin_options_as<BCQGatherOptions>() const
-{
- return builtin_options_as_BCQGatherOptions();
-}
-
-template <>
-inline const BCQFullyConnectedOptions *
-Operator::builtin_options_as<BCQFullyConnectedOptions>() const
-{
- return builtin_options_as_BCQFullyConnectedOptions();
-}
-
-template <>
-inline const InstanceNormOptions *Operator::builtin_options_as<InstanceNormOptions>() const
-{
- return builtin_options_as_InstanceNormOptions();
-}
-
-struct OperatorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_opcode_index(uint32_t opcode_index)
- {
- fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(Operator::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
- }
- void add_builtin_options_type(BuiltinOptions builtin_options_type)
- {
- fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE,
- static_cast<uint8_t>(builtin_options_type), 0);
- }
- void add_builtin_options(flatbuffers::Offset<void> builtin_options)
- {
- fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
- }
- void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options)
- {
- fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
- }
- void add_custom_options_format(CustomOptionsFormat custom_options_format)
- {
- fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT,
- static_cast<int8_t>(custom_options_format), 0);
- }
- void add_mutating_variable_inputs(
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs)
- {
- fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
- }
- void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates)
- {
- fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates);
- }
- explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorBuilder &operator=(const OperatorBuilder &);
- flatbuffers::Offset<Operator> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Operator>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Operator>
-CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0)
-{
- OperatorBuilder builder_(_fbb);
- builder_.add_intermediates(intermediates);
- builder_.add_mutating_variable_inputs(mutating_variable_inputs);
- builder_.add_custom_options(custom_options);
- builder_.add_builtin_options(builtin_options);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_opcode_index(opcode_index);
- builder_.add_custom_options_format(custom_options_format);
- builder_.add_builtin_options_type(builtin_options_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Operator>
-CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- const std::vector<uint8_t> *custom_options = nullptr,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- const std::vector<uint8_t> *mutating_variable_inputs = nullptr,
- const std::vector<int32_t> *intermediates = nullptr)
-{
- return circle::CreateOperator(
- _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options,
- custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format,
- mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0,
- intermediates ? _fbb.CreateVector<int32_t>(*intermediates) : 0);
-}
-
-struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TENSORS = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_OPERATORS = 10,
- VT_NAME = 12,
- VT_DATA_FORMAT = 14
- };
- const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS);
- }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS);
- }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- DataFormat data_format() const
- {
- return static_cast<DataFormat>(GetField<int8_t>(VT_DATA_FORMAT, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) &&
- verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) &&
- verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyField<int8_t>(verifier, VT_DATA_FORMAT) &&
- verifier.EndTable();
- }
-};
-
-struct SubGraphBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors)
- {
- fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
- }
- void
- add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators)
- {
- fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
- }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(SubGraph::VT_NAME, name);
- }
- void add_data_format(DataFormat data_format)
- {
- fbb_.AddElement<int8_t>(SubGraph::VT_DATA_FORMAT, static_cast<int8_t>(data_format), 0);
- }
- explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubGraphBuilder &operator=(const SubGraphBuilder &);
- flatbuffers::Offset<SubGraph> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubGraph>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- DataFormat data_format = DataFormat_CHANNELS_LAST)
-{
- SubGraphBuilder builder_(_fbb);
- builder_.add_name(name);
- builder_.add_operators(operators);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_tensors(tensors);
- builder_.add_data_format(data_format);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SubGraph>
-CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr,
- const char *name = nullptr, DataFormat data_format = DataFormat_CHANNELS_LAST)
-{
- return circle::CreateSubGraph(
- _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0,
- inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
- operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0,
- name ? _fbb.CreateString(name) : 0, data_format);
-}
-
-struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_DATA = 4
- };
- const flatbuffers::Vector<uint8_t> *data() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) &&
- verifier.VerifyVector(data()) && verifier.EndTable();
- }
-};
-
-struct BufferBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data)
- {
- fbb_.AddOffset(Buffer::VT_DATA, data);
- }
- explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BufferBuilder &operator=(const BufferBuilder &);
- flatbuffers::Offset<Buffer> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Buffer>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Buffer>
-CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0)
-{
- BufferBuilder builder_(_fbb);
- builder_.add_data(data);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *data = nullptr)
-{
- return circle::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0);
-}
-
-struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NAME = 4,
- VT_BUFFER = 6
- };
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_BUFFER) &&
- verifier.EndTable();
- }
-};
-
-struct MetadataBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Metadata::VT_NAME, name);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0); }
- explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MetadataBuilder &operator=(const MetadataBuilder &);
- flatbuffers::Offset<Metadata> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Metadata>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Metadata>
-CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t buffer = 0)
-{
- MetadataBuilder builder_(_fbb);
- builder_.add_buffer(buffer);
- builder_.add_name(name);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Metadata> CreateMetadataDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const char *name = nullptr,
- uint32_t buffer = 0)
-{
- return circle::CreateMetadata(_fbb, name ? _fbb.CreateString(name) : 0, buffer);
-}
-
-struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VERSION = 4,
- VT_OPERATOR_CODES = 6,
- VT_SUBGRAPHS = 8,
- VT_DESCRIPTION = 10,
- VT_BUFFERS = 12,
- VT_METADATA_BUFFER = 14,
- VT_METADATA = 16
- };
- uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); }
- const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>(
- VT_OPERATOR_CODES);
- }
- const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS);
- }
- const flatbuffers::String *description() const
- {
- return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS);
- }
- const flatbuffers::Vector<int32_t> *metadata_buffer() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *metadata() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *>(VT_METADATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) &&
- VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) &&
- verifier.VerifyVectorOfTables(operator_codes()) &&
- VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) &&
- verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) &&
- verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) &&
- verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) &&
- VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) &&
- VerifyOffset(verifier, VT_METADATA) && verifier.VerifyVector(metadata()) &&
- verifier.VerifyVectorOfTables(metadata()) && verifier.EndTable();
- }
-};
-
-struct ModelBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); }
- void add_operator_codes(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes)
- {
- fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
- }
- void
- add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs)
- {
- fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
- }
- void add_description(flatbuffers::Offset<flatbuffers::String> description)
- {
- fbb_.AddOffset(Model::VT_DESCRIPTION, description);
- }
- void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers)
- {
- fbb_.AddOffset(Model::VT_BUFFERS, buffers);
- }
- void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer)
- {
- fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
- }
- void
- add_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata)
- {
- fbb_.AddOffset(Model::VT_METADATA, metadata);
- }
- explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ModelBuilder &operator=(const ModelBuilder &);
- flatbuffers::Offset<Model> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Model>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Model> CreateModel(
- flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0,
- flatbuffers::Offset<flatbuffers::String> description = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata = 0)
-{
- ModelBuilder builder_(_fbb);
- builder_.add_metadata(metadata);
- builder_.add_metadata_buffer(metadata_buffer);
- builder_.add_buffers(buffers);
- builder_.add_description(description);
- builder_.add_subgraphs(subgraphs);
- builder_.add_operator_codes(operator_codes);
- builder_.add_version(version);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Model>
-CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr,
- const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr,
- const char *description = nullptr,
- const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr,
- const std::vector<int32_t> *metadata_buffer = nullptr,
- const std::vector<flatbuffers::Offset<Metadata>> *metadata = nullptr)
-{
- return circle::CreateModel(
- _fbb, version,
- operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0,
- subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0,
- description ? _fbb.CreateString(description) : 0,
- buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0,
- metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0,
- metadata ? _fbb.CreateVector<flatbuffers::Offset<Metadata>>(*metadata) : 0);
-}
-
-inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type)
-{
- switch (type)
- {
- case QuantizationDetails_NONE:
- {
- return true;
- }
- case QuantizationDetails_CustomQuantization:
- {
- auto ptr = reinterpret_cast<const CustomQuantization *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyQuantizationDetails(verifier, values->Get(i),
- types->GetEnum<QuantizationDetails>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
- SparseIndexVector type)
-{
- switch (type)
- {
- case SparseIndexVector_NONE:
- {
- return true;
- }
- case SparseIndexVector_Int32Vector:
- {
- auto ptr = reinterpret_cast<const Int32Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case SparseIndexVector_Uint16Vector:
- {
- auto ptr = reinterpret_cast<const Uint16Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case SparseIndexVector_Uint8Vector:
- {
- auto ptr = reinterpret_cast<const Uint8Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifySparseIndexVector(verifier, values->Get(i), types->GetEnum<SparseIndexVector>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
- BuiltinOptions type)
-{
- switch (type)
- {
- case BuiltinOptions_NONE:
- {
- return true;
- }
- case BuiltinOptions_Conv2DOptions:
- {
- auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthwiseConv2DOptions:
- {
- auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatEmbeddingsOptions:
- {
- auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSHProjectionOptions:
- {
- auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_Pool2DOptions:
- {
- auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SVDFOptions:
- {
- auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RNNOptions:
- {
- auto ptr = reinterpret_cast<const RNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FullyConnectedOptions:
- {
- auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatenationOptions:
- {
- auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddOptions:
- {
- auto ptr = reinterpret_cast<const AddOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_L2NormOptions:
- {
- auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LocalResponseNormalizationOptions:
- {
- auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSTMOptions:
- {
- auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeBilinearOptions:
- {
- auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CallOptions:
- {
- auto ptr = reinterpret_cast<const CallOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReshapeOptions:
- {
- auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SkipGramOptions:
- {
- auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToDepthOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EmbeddingLookupSparseOptions:
- {
- auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MulOptions:
- {
- auto ptr = reinterpret_cast<const MulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadOptions:
- {
- auto ptr = reinterpret_cast<const PadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherOptions:
- {
- auto ptr = reinterpret_cast<const GatherOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchToSpaceNDOptions:
- {
- auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToBatchNDOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeOptions:
- {
- auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReducerOptions:
- {
- auto ptr = reinterpret_cast<const ReducerOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SubOptions:
- {
- auto ptr = reinterpret_cast<const SubOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DivOptions:
- {
- auto ptr = reinterpret_cast<const DivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SqueezeOptions:
- {
- auto ptr = reinterpret_cast<const SqueezeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_StridedSliceOptions:
- {
- auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpOptions:
- {
- auto ptr = reinterpret_cast<const ExpOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TopKV2Options:
- {
- auto ptr = reinterpret_cast<const TopKV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitOptions:
- {
- auto ptr = reinterpret_cast<const SplitOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogSoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CastOptions:
- {
- auto ptr = reinterpret_cast<const CastOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DequantizeOptions:
- {
- auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MaximumMinimumOptions:
- {
- auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMaxOptions:
- {
- auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessOptions:
- {
- auto ptr = reinterpret_cast<const LessOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NegOptions:
- {
- auto ptr = reinterpret_cast<const NegOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadV2Options:
- {
- auto ptr = reinterpret_cast<const PadV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterOptions:
- {
- auto ptr = reinterpret_cast<const GreaterOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterEqualOptions:
- {
- auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessEqualOptions:
- {
- auto ptr = reinterpret_cast<const LessEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectOptions:
- {
- auto ptr = reinterpret_cast<const SelectOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SliceOptions:
- {
- auto ptr = reinterpret_cast<const SliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeConvOptions:
- {
- auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SparseToDenseOptions:
- {
- auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TileOptions:
- {
- auto ptr = reinterpret_cast<const TileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpandDimsOptions:
- {
- auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EqualOptions:
- {
- auto ptr = reinterpret_cast<const EqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NotEqualOptions:
- {
- auto ptr = reinterpret_cast<const NotEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ShapeOptions:
- {
- auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PowOptions:
- {
- auto ptr = reinterpret_cast<const PowOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMinOptions:
- {
- auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FakeQuantOptions:
- {
- auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PackOptions:
- {
- auto ptr = reinterpret_cast<const PackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalOrOptions:
- {
- auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_OneHotOptions:
- {
- auto ptr = reinterpret_cast<const OneHotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalAndOptions:
- {
- auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalNotOptions:
- {
- auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnpackOptions:
- {
- auto ptr = reinterpret_cast<const UnpackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorDivOptions:
- {
- auto ptr = reinterpret_cast<const FloorDivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquareOptions:
- {
- auto ptr = reinterpret_cast<const SquareOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ZerosLikeOptions:
- {
- auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FillOptions:
- {
- auto ptr = reinterpret_cast<const FillOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorModOptions:
- {
- auto ptr = reinterpret_cast<const FloorModOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RangeOptions:
- {
- auto ptr = reinterpret_cast<const RangeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeNearestNeighborOptions:
- {
- auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LeakyReluOptions:
- {
- auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquaredDifferenceOptions:
- {
- auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MirrorPadOptions:
- {
- auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AbsOptions:
- {
- auto ptr = reinterpret_cast<const AbsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitVOptions:
- {
- auto ptr = reinterpret_cast<const SplitVOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UniqueOptions:
- {
- auto ptr = reinterpret_cast<const UniqueOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReverseV2Options:
- {
- auto ptr = reinterpret_cast<const ReverseV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddNOptions:
- {
- auto ptr = reinterpret_cast<const AddNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherNdOptions:
- {
- auto ptr = reinterpret_cast<const GatherNdOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CosOptions:
- {
- auto ptr = reinterpret_cast<const CosOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_WhereOptions:
- {
- auto ptr = reinterpret_cast<const WhereOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RankOptions:
- {
- auto ptr = reinterpret_cast<const RankOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReverseSequenceOptions:
- {
- auto ptr = reinterpret_cast<const ReverseSequenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MatrixDiagOptions:
- {
- auto ptr = reinterpret_cast<const MatrixDiagOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_QuantizeOptions:
- {
- auto ptr = reinterpret_cast<const QuantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MatrixSetDiagOptions:
- {
- auto ptr = reinterpret_cast<const MatrixSetDiagOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_HardSwishOptions:
- {
- auto ptr = reinterpret_cast<const HardSwishOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_IfOptions:
- {
- auto ptr = reinterpret_cast<const IfOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_WhileOptions:
- {
- auto ptr = reinterpret_cast<const WhileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthToSpaceOptions:
- {
- auto ptr = reinterpret_cast<const DepthToSpaceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NonMaxSuppressionV4Options:
- {
- auto ptr = reinterpret_cast<const NonMaxSuppressionV4Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NonMaxSuppressionV5Options:
- {
- auto ptr = reinterpret_cast<const NonMaxSuppressionV5Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ScatterNdOptions:
- {
- auto ptr = reinterpret_cast<const ScatterNdOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectV2Options:
- {
- auto ptr = reinterpret_cast<const SelectV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DensifyOptions:
- {
- auto ptr = reinterpret_cast<const DensifyOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SegmentSumOptions:
- {
- auto ptr = reinterpret_cast<const SegmentSumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchMatMulOptions:
- {
- auto ptr = reinterpret_cast<const BatchMatMulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BCQGatherOptions:
- {
- auto ptr = reinterpret_cast<const BCQGatherOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BCQFullyConnectedOptions:
- {
- auto ptr = reinterpret_cast<const BCQFullyConnectedOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_InstanceNormOptions:
- {
- auto ptr = reinterpret_cast<const InstanceNormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline const circle::Model *GetModel(const void *buf)
-{
- return flatbuffers::GetRoot<circle::Model>(buf);
-}
-
-inline const circle::Model *GetSizePrefixedModel(const void *buf)
-{
- return flatbuffers::GetSizePrefixedRoot<circle::Model>(buf);
-}
-
-inline const char *ModelIdentifier() { return "CIR0"; }
-
-inline bool ModelBufferHasIdentifier(const void *buf)
-{
- return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier());
-}
-
-inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifyBuffer<circle::Model>(ModelIdentifier());
-}
-
-inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifySizePrefixedBuffer<circle::Model>(ModelIdentifier());
-}
-
-inline const char *ModelExtension() { return "circle"; }
-
-inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<circle::Model> root)
-{
- fbb.Finish(root, ModelIdentifier());
-}
-
-inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<circle::Model> root)
-{
- fbb.FinishSizePrefixed(root, ModelIdentifier());
-}
-
-} // namespace circle
-
-#endif // FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
diff --git a/runtime/onert/frontend/nnapi/CMakeLists.txt b/runtime/onert/frontend/nnapi/CMakeLists.txt
deleted file mode 100644
index dafd84ccf..000000000
--- a/runtime/onert/frontend/nnapi/CMakeLists.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-file(GLOB_RECURSE SOURCES_FRONTEND "*.cc")
-file(GLOB_RECURSE TESTS_FRONTEND "*.test.cc")
-list(REMOVE_ITEM SOURCES_FRONTEND ${TESTS_FRONTEND})
-
-set(LIB_ONERT onert)
-
-add_library(${LIB_ONERT} SHARED ${SOURCES_FRONTEND})
-target_link_libraries(${LIB_ONERT} PUBLIC nnfw-nnapi-header)
-target_link_libraries(${LIB_ONERT} PUBLIC onert_core) # TODO Link PRIVATE onert_core
-target_link_libraries(${LIB_ONERT} PRIVATE nnfw_common)
-target_link_libraries(${LIB_ONERT} PRIVATE nnfw_coverage)
-
-set_target_properties(${LIB_ONERT} PROPERTIES OUTPUT_NAME neuralnetworks)
-
-install(TARGETS ${LIB_ONERT} DESTINATION lib)
-
-if(NOT ENABLE_TEST)
- return()
-endif(NOT ENABLE_TEST)
-
-add_executable(test_onert_frontend_nnapi ${TESTS_FRONTEND})
-
-target_link_libraries(test_onert_frontend_nnapi PRIVATE ${LIB_ONERT} dl)
-target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest)
-target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest_main)
-
-install(TARGETS test_onert_frontend_nnapi DESTINATION unittest_standalone)
diff --git a/runtime/onert/frontend/nnapi/compilation.cc b/runtime/onert/frontend/nnapi/compilation.cc
deleted file mode 100644
index 871c040ef..000000000
--- a/runtime/onert/frontend/nnapi/compilation.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksModel.h"
-#include "wrapper/ANeuralNetworksCompilation.h"
-#include "util/logging.h"
-
-//
-// NNAPI Implementation
-//
-int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
- ANeuralNetworksCompilation **compilation)
-{
- if ((model == nullptr) || (compilation == nullptr))
- {
- VERBOSE(NNAPI::Compilation) << "create: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!model->isFinished())
- {
- VERBOSE(NNAPI::Compilation) << "create: Model define is not finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- *compilation = new (std::nothrow) ANeuralNetworksCompilation(model);
- if (*compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "create: ail to create compilation object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
-{
- if (compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "finish: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (compilation->state() != ::onert::compiler::State::CREATED)
- {
- VERBOSE(NNAPI::Compilation) << "finish: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- if (!compilation->finish())
- {
- VERBOSE(NNAPI::Compilation) << "finish: Fail to compile" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
-{
- delete compilation;
-}
-
-int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
- int32_t preference)
-{
- if (compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (compilation->state() != ::onert::compiler::State::CREATED)
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const PreferenceCode FIRST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_LOW_POWER;
- const PreferenceCode LAST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED;
- if ((preference < FIRST_PREFERENCE_CODE) || (preference > LAST_PREFERENCE_CODE))
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect preference code" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // NYI: nothing to set
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/onert/frontend/nnapi/event.cc b/runtime/onert/frontend/nnapi/event.cc
deleted file mode 100644
index 593b74e90..000000000
--- a/runtime/onert/frontend/nnapi/event.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include "wrapper/ANeuralNetworksEvent.h"
-
-int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
-{
- if (event == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!event->waitFinish())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; }
diff --git a/runtime/onert/frontend/nnapi/execution.cc b/runtime/onert/frontend/nnapi/execution.cc
deleted file mode 100644
index 56ca5ef00..000000000
--- a/runtime/onert/frontend/nnapi/execution.cc
+++ /dev/null
@@ -1,504 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksCompilation.h"
-#include "wrapper/ANeuralNetworksExecution.h"
-#include "wrapper/ANeuralNetworksMemory.h"
-#include "wrapper/ANeuralNetworksEvent.h"
-#include "wrapper/NNAPIConvert.h"
-#include "util/logging.h"
-
-//
-// NNAPI Implementation
-//
-int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
- ANeuralNetworksExecution **execution)
-{
- if ((compilation == nullptr) || (execution == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "create: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- std::shared_ptr<onert::exec::ExecutorMap> executors;
-
- compilation->publish(executors);
-
- if (executors == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "create: Never compiled yet" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- *execution = new (std::nothrow) ANeuralNetworksExecution{executors};
- if (*execution == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "create: Fail to create execution object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-// NOTE Handle optional input
-// Unspecified shape on model build
-// Optional and omitted input on execution: skip input setting (workaround for LSTM)
-// Optional but not omitted input on execution: cannot handle
-// Normal input on execution: cannot handle
-// Fully specified shape on model build
-// Optional input on execution: cannot handle
-// Normal input: handle normally
-int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type, const void *buffer,
- size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if ((buffer != nullptr) && (length == 0))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getInputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid input index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // Omitted optional input
- // LSTM operation's some inputs can be optional input
- // Transpose operation's permutation input can be optional input
- if ((buffer == nullptr) && (length == 0))
- {
- uint32_t dims[1] = {0};
- ANeuralNetworksOperandType compared_shape;
- compared_shape.dimensionCount = 1;
- compared_shape.dimensions = dims;
- if (execution->hasUnspecifiedDims(operand_index))
- {
- return ANEURALNETWORKS_NO_ERROR;
- }
- else if (type == nullptr && execution->IsOptionalInput(operand_index))
- {
- if (!execution->setOptionalInput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Fail to set optional input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- return ANEURALNETWORKS_NO_ERROR;
- }
- // TODO Changes the condition to check zero sized
- else if (execution->compareShape(&compared_shape, operand_index))
- {
- if (!execution->setInput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- return ANEURALNETWORKS_NO_ERROR;
- }
- else
- {
- VERBOSE(NNAPI::Execution) << "setInput: Cannot handle fully-specified shape on model build "
- "but omitted input on execution"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->hasUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!execution->setInput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type, void *buffer,
- size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if ((buffer != nullptr) && (length == 0))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Zero length output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // Handle optional output
- if (buffer == nullptr)
- {
- return ANEURALNETWORKS_NO_ERROR;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->hasUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!execution->setOutput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Fail to set output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
- ANeuralNetworksEvent **event)
-{
- if ((execution == nullptr) || (event == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // TODO: Handle event
- auto instance = execution->instance();
- *event = new (std::nothrow) ANeuralNetworksEvent{instance};
- if (*event == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Fail to create event" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- if (!execution->startExecute())
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Fail to start execution" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution)
-{
- if (execution == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "Compute: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!execution->execute())
- {
- VERBOSE(NNAPI::Execution) << "Compute: Fail to execution" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) { delete execution; }
-
-int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset,
- size_t length)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (length == 0)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getInputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid input index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->hasUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!memory->vaildAccess(offset, length))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid memory access" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->setInput(index, type, reinterpret_cast<const void *>(memory->base() + offset),
- length))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset,
- size_t length)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (length == 0)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->hasUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!memory->vaildAccess(offset, length))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid memory access" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->setOutput(index, type, reinterpret_cast<void *>(memory->base() + offset), length))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution,
- int32_t index, uint32_t *rank)
-{
- if ((execution == nullptr) || (rank == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->getOutputOperandRank(index, rank))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Fail to get rank" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution,
- int32_t index, uint32_t *dimensions)
-{
- if ((execution == nullptr) || (dimensions == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->getOutputOperandDimensions(index, dimensions))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Fail to get rank" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/onert/frontend/nnapi/memory.cc b/runtime/onert/frontend/nnapi/memory.cc
deleted file mode 100644
index 6e568a926..000000000
--- a/runtime/onert/frontend/nnapi/memory.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <sys/mman.h>
-#include <new>
-#include <memory>
-
-#include <memory>
-#include "wrapper/ANeuralNetworksMemory.h"
-
-int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
- ANeuralNetworksMemory **memory)
-{
- if (memory == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- *memory = new (std::nothrow) ANeuralNetworksMemory{size, protect, fd, offset};
- if (*memory == nullptr)
- {
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
diff --git a/runtime/onert/frontend/nnapi/model.cc b/runtime/onert/frontend/nnapi/model.cc
deleted file mode 100644
index 8c7bd1789..000000000
--- a/runtime/onert/frontend/nnapi/model.cc
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <NeuralNetworksEx.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksModel.h"
-#include "wrapper/ANeuralNetworksMemory.h"
-#include "util/logging.h"
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "create: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- *model = new (std::nothrow) ANeuralNetworksModel{};
- if (*model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "create: Fail to create model object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
- const ANeuralNetworksOperandType *type)
-{
- if ((model == nullptr) || (type == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperand: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // scale and zeroPoint should be zero for scalars and non-fixed point tensors
- // Quantized:
- // scale: a 32 bit floating point value greater than zero
- // zeroPoint: a 32 bit integer, in range [0, 255]
- if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)
- {
- if (!(type->scale > 0.0f))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect scale value for quantization" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if ((type->zeroPoint < 0) || (type->zeroPoint > 255))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect zeroPoint value for quantization"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- // NOTE Validation of scale and zeroPoint would be skipped for a while.
- // We do not know whether scalar type can have scale and zeroPoint.
- // To pass ValidationTest and GeneratedTest, this validation code
- // would not be implemented until we can define this issue clearly.
- //
- // scale and zeroPoint should be zero for scalars and non-fixed point tensors
- // else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
- // {
- // return ANEURALNETWORKS_BAD_DATA;
- // }
-
- // dimensionCount should be zero for scalars
- if ((type->dimensionCount != 0) &&
- ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) ||
- (type->type == ANEURALNETWORKS_UINT32)))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect data type" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addOperand(type))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Fail to add operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
- const void *buffer, size_t length)
-{
- const bool optional_operand = ((buffer == nullptr) && (length == 0));
-
- if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
- // index
- // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
- //
- // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning.
- uint32_t ind = static_cast<uint32_t>(index);
-
- if (!model->isExistOperand(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (not exist)" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!optional_operand && (model->operandSize(ind) != length))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid data length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Already set operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // NNAPI spec in NeuralNetworks.h
- // For values of length greater than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES,
- // the application is responsible for not changing the content of this region
- // until all executions using this model have completed
- bool copy_value = false;
- if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES)
- {
- copy_value = true;
- }
-
- if (!model->setOperandValue(ind, buffer, length, optional_operand, copy_value))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Fail to set operand value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksMemory *memory,
- size_t offset, size_t length)
-{
- if ((model == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (negative)"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
- // index
- // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
- //
- // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning.
- uint32_t ind = static_cast<uint32_t>(index);
-
- if (!model->isExistOperand(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (not exist)"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if ((model->operandSize(ind) != length) || (memory->size() < (offset + length)))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid data length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already set operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->setOperandValue(ind, memory->base() + offset, length))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Fail to set operand value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
- ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const ANeuralNetworksOperationType FIRST_OPERATION = ANEURALNETWORKS_ADD;
- const ANeuralNetworksOperationType LAST_OPERATION = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- if (model->isUsageSet(outputs[i]))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!model->addOperation(type, inputCount, inputs, outputCount, outputs))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_CAST_EX;
- const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_SPLIT_V_EX;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Invalid operation type" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- if (model->isUsageSet(outputs[i]))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!model->addOperationEx(type, inputCount, inputs, outputCount, outputs))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- for (uint32_t n = 0; n < inputCount; ++n)
- {
- uint32_t ind = inputs[n];
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already set input operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addModelInput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- for (uint32_t n = 0; n < outputCount; ++n)
- {
- uint32_t ind = outputs[n];
-
- if (!model->isOperationOutput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Need to set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addModelOutput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "finish: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "finish: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- if (!model->finish())
- {
- VERBOSE(NNAPI::Model) << "finish: Fail to generate internal graph" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool allow)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Incorrect null pointer parameter"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- model->allowFloat32toFloat16(allow);
-
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc
deleted file mode 100644
index 81cd38f4f..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksCompilation.h"
-
-#include "util/logging.h"
-
-// TODO Support multiple subgraphs
-ANeuralNetworksCompilation::ANeuralNetworksCompilation(const ANeuralNetworksModel *model) noexcept
- : _subgraphs{model->getSubGraphs()}, _compiler{new onert::compiler::Compiler{_subgraphs}}
-{
- if (model->allowedToFp16())
- {
- _compiler->enableToFp16();
- }
-}
-
-bool ANeuralNetworksCompilation::finish() noexcept
-{
- try
- {
- _executors = _compiler->compile();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h
deleted file mode 100644
index 5f0650b9a..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __COMPILATION_H__
-#define __COMPILATION_H__
-
-#include "ANeuralNetworksModel.h"
-
-#include "compiler/Compiler.h"
-#include "ir/Graph.h"
-#include "ir/Subgraphs.h"
-#include "exec/IExecutor.h"
-
-struct ANeuralNetworksCompilation
-{
-public:
- ANeuralNetworksCompilation(const ANeuralNetworksModel *model) noexcept;
-
-public:
- bool finish() noexcept;
-
- onert::compiler::State state(void) noexcept { return _compiler->state(); }
- void publish(std::shared_ptr<onert::exec::ExecutorMap> &executors) noexcept
- {
- executors = _executors;
- }
-
-private:
- std::shared_ptr<onert::ir::Subgraphs> _subgraphs;
- std::shared_ptr<onert::compiler::Compiler> _compiler;
- std::shared_ptr<onert::exec::ExecutorMap> _executors;
-};
-
-#endif
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc
deleted file mode 100644
index 2bea729be..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksEvent.h"
-
-#include "exec/Execution.h"
-#include "util/logging.h"
-
-ANeuralNetworksEvent::ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution)
- : _execution{execution}
-{
- // DO NOTHING
-}
-
-bool ANeuralNetworksEvent::waitFinish(void) noexcept
-{
- try
- {
- _execution->waitFinish();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h
deleted file mode 100644
index 7b462d3d6..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __EVENT_H__
-#define __EVENT_H__
-
-#include <NeuralNetworks.h>
-
-#include <memory>
-
-namespace onert
-{
-namespace exec
-{
-class Execution;
-} // namespace exec
-} // namespace onert
-
-struct ANeuralNetworksEvent
-{
-public:
- ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution);
-
-public:
- bool waitFinish(void) noexcept;
-
-private:
- const std::shared_ptr<onert::exec::Execution> _execution;
-};
-
-#endif
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
deleted file mode 100644
index 6114b74b0..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksExecution.h"
-#include "NNAPIConvert.h"
-#include "util/logging.h"
-
-const onert::ir::OperandIndex ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept
-{
- if (index < 0)
- {
- // Negative index: return invalid index
- return onert::ir::OperandIndex{};
- }
-
- uint32_t cast_index = static_cast<uint32_t>(index);
- if (cast_index >= _execution->primary_subgraph().getInputs().size())
- {
- // Return invalid index
- return onert::ir::OperandIndex{};
- }
-
- onert::ir::IOIndex input_index{cast_index};
- const auto operand_index = _execution->primary_subgraph().getInputs().at(input_index);
- return operand_index;
-}
-
-const onert::ir::OperandIndex
-ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept
-{
- if (index < 0)
- {
- // Negative index: return invalid index
- return onert::ir::OperandIndex{};
- }
-
- uint32_t cast_index = static_cast<uint32_t>(index);
- if (cast_index >= _execution->primary_subgraph().getOutputs().size())
- {
- // Return invalid index
- return onert::ir::OperandIndex{};
- }
-
- onert::ir::IOIndex output_index{cast_index};
- const auto operand_index = _execution->primary_subgraph().getOutputs().at(output_index);
- return operand_index;
-}
-
-bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type,
- const onert::ir::OperandIndex index) noexcept
-{
- try
- {
- const auto operand_type = _execution->primary_subgraph().operands().at(index).typeInfo();
- const auto typeInfo = NNAPIConvert::getTypeInfo(type);
-
- if (operand_type != typeInfo)
- {
- // Data type mismatch
- return false;
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type,
- const onert::ir::OperandIndex index) noexcept
-{
- // Passed shape should be specified
- if (hasUnspecifiedDims(index))
- {
- return false;
- }
-
- const auto &operand_shape = _execution->primary_subgraph().operands().at(index).shape();
- const auto &shape_from_type = NNAPIConvert::getShape(type);
-
- return operand_shape == shape_from_type;
-}
-
-bool ANeuralNetworksExecution::IsOptionalInput(const onert::ir::OperandIndex index) noexcept
-{
- const auto &operand_shape = _execution->primary_subgraph().operands().at(index).shape();
- for (int32_t i = 0; i < operand_shape.rank(); ++i)
- {
- if (operand_shape.dim(i) != 0)
- return false;
- }
- return true;
-}
-
-bool ANeuralNetworksExecution::hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept
-{
- const auto operand_shape = _execution->primary_subgraph().operands().at(index).shape();
-
- return operand_shape.hasUnspecifiedDims();
-}
-
-size_t ANeuralNetworksExecution::getOperandSize(const onert::ir::OperandIndex index) noexcept
-{
- try
- {
- return _execution->primary_subgraph().operands().at(index).operandSize();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return 0;
- }
-}
-
-bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType *type,
- const void *buffer, size_t length) noexcept
-{
- try
- {
- onert::ir::IOIndex input_index{index};
- const auto operand_index = getInputOperandIndex(index);
-
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
- const auto shape = (type != nullptr)
- ? NNAPIConvert::getShape(type)
- : _execution->primary_subgraph().operands().at(operand_index).shape();
-
- // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other
- // words, we can assume that io_layout from nnapi always is the same as layout of the used
- // model.
- // TODO Set layout of model
- _execution->setInput(input_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::setOptionalInput(uint32_t index,
- const ANeuralNetworksOperandType *type,
- const void *buffer, size_t length) noexcept
-{
- assert(type == nullptr);
- assert(buffer == nullptr);
- assert(length == 0);
- try
- {
- onert::ir::IOIndex input_index{index};
- const auto operand_index = getInputOperandIndex(index);
-
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
- const auto shape = (type != nullptr)
- ? NNAPIConvert::getShape(type)
- : _execution->primary_subgraph().operands().at(operand_index).shape();
-
- // ANeuralNetworksExecution::setInput() uses only shape information
- ANeuralNetworksOperandType optional_input_type;
- optional_input_type.dimensionCount = shape.rank();
- std::vector<uint32_t> dims(optional_input_type.dimensionCount);
- for (uint32_t i = 0; i < optional_input_type.dimensionCount; ++i)
- {
- dims.at(i) = shape.dim(i);
- }
- optional_input_type.dimensions = dims.data();
-
- return setInput(index, &optional_input_type, buffer, length);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type,
- void *buffer, size_t length) noexcept
-{
- try
- {
- onert::ir::IOIndex output_index{index};
- const auto operand_index = getOutputOperandIndex(index);
-
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
- const auto shape = (type != nullptr)
- ? NNAPIConvert::getShape(type)
- : _execution->primary_subgraph().operands().at(operand_index).shape();
-
- // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other
- // words, we can assume that io_layout from nnapi always is the same as layout of the used
- // model.
- // TODO Set layout of model
- _execution->setOutput(output_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::startExecute(void) noexcept
-{
- try
- {
- _execution->startExecute();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::execute(void) noexcept
-{
- try
- {
- _execution->execute();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-const std::shared_ptr<onert::exec::Execution> ANeuralNetworksExecution::instance(void) noexcept
-{
- return _execution;
-}
-
-bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept
-{
- try
- {
- onert::ir::IOIndex output_index{index};
-
- // Check execution is finished
- if (!_execution->isFinished())
- {
- return false;
- }
-
- const auto shape = _execution->getOutputShape(output_index);
- if (shape.hasUnspecifiedDims())
- {
- throw std::runtime_error{"Internal error: Output tensor has unspecified dims"};
- }
-
- *rank = shape.rank();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32_t *dimensions)
-{
- try
- {
- onert::ir::IOIndex output_index{index};
-
- // Check execution is finished
- if (!_execution->isFinished())
- {
- return false;
- }
-
- const auto shape = _execution->getOutputShape(output_index);
- if (shape.hasUnspecifiedDims())
- {
- throw std::runtime_error{"Internal error: Output tensor has unspecified dims"};
- }
-
- for (int i = 0; i < shape.rank(); i++)
- {
- auto dim = shape.dim(i);
-
- if (dim <= 0)
- {
- throw std::runtime_error{"Invalid dimension value"};
- }
-
- dimensions[i] = static_cast<uint32_t>(dim);
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
deleted file mode 100644
index 1f4b868f6..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __EXECUTION_H__
-#define __EXECUTION_H__
-
-#include <NeuralNetworks.h>
-
-#include <memory>
-
-#include "exec/Execution.h"
-
-struct ANeuralNetworksExecution
-{
-public:
- ANeuralNetworksExecution(const std::shared_ptr<onert::exec::ExecutorMap> &executors)
- : _execution{std::make_shared<onert::exec::Execution>(executors)}
- {
- // DO NOTHING
- }
-
-public:
- bool setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer,
- size_t length) noexcept;
- bool setOptionalInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer,
- size_t length) noexcept;
- bool setOutput(uint32_t index, const ANeuralNetworksOperandType *type, void *buffer,
- size_t length) noexcept;
- bool startExecute(void) noexcept;
- bool execute(void) noexcept;
-
- const onert::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept;
- const onert::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept;
- bool compareDataType(const ANeuralNetworksOperandType *type,
- const onert::ir::OperandIndex index) noexcept;
- bool compareShape(const ANeuralNetworksOperandType *type,
- const onert::ir::OperandIndex index) noexcept;
- bool IsOptionalInput(const onert::ir::OperandIndex index) noexcept;
- bool hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept;
- size_t getOperandSize(const onert::ir::OperandIndex index) noexcept;
- const std::shared_ptr<onert::exec::Execution> instance(void) noexcept;
-
- /**
- * @brief Get output operand's rank
- * @param[in] index Output index
- * @param[out] rank Output operand's rank
- * @return @c true if success to get rank, otherwise @c false
- */
- bool getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept;
- /**
- * @brief Get dimensions of the output operand
- * @param[in] index Output index
- * @param[out] dimensions Output operand's dimensions
- * @return @c true if success to get rank, otherwise @c false
- * @note This must be called after execution is finished to get resolved output shape
- * unspecified in model
- */
- bool getOutputOperandDimensions(uint32_t index, uint32_t *dimensions);
-
-private:
- std::shared_ptr<onert::exec::Execution> _execution;
-};
-
-#endif
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc
deleted file mode 100644
index 9cc100585..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <sys/mman.h>
-
-#include "ANeuralNetworksMemory.h"
-
-//
-// ANeuralNetworksMemory
-//
-ANeuralNetworksMemory::ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset)
-{
- _base = reinterpret_cast<uint8_t *>(mmap(nullptr, size, protect, MAP_PRIVATE, fd, offset));
- _size = size;
-}
-
-ANeuralNetworksMemory::~ANeuralNetworksMemory() { munmap(reinterpret_cast<void *>(_base), _size); }
-
-bool ANeuralNetworksMemory::vaildAccess(size_t offset, size_t length) const
-{
- if ((offset >= _size) || (length > _size))
- {
- return false;
- }
-
- if ((offset + length) >= _size)
- {
- return false;
- }
-
- return true;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h
deleted file mode 100644
index 48a1bc5fc..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MEMORY_H__
-#define __MEMORY_H__
-
-#include <cstdint>
-
-struct ANeuralNetworksMemory
-{
-public:
- ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset);
- ~ANeuralNetworksMemory();
-
-public:
- size_t size(void) const { return _size; }
- uint8_t *base(void) { return _base; }
- uint8_t *base(void) const { return _base; }
- bool vaildAccess(size_t offset, size_t length) const;
-
-private:
- size_t _size;
- uint8_t *_base;
-};
-
-#endif // __MEMORY_H__
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc
deleted file mode 100644
index 97b820aea..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksModel.h"
-#include "OperationFactory.h"
-#include "NNAPIConvert.h"
-
-#include "ir/Operations.Include.h"
-#include "util/logging.h"
-
-#include <memory>
-
-//
-// ANeuralNetworksModel
-//
-ANeuralNetworksModel::ANeuralNetworksModel() noexcept
- : _optional_operands{}, _operand_usages{}, _allowFloat32toFloat16{false}
-{
- _graph = std::make_shared<onert::ir::Graph>();
-}
-
-bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept
-{
- try
- {
- const auto shape = NNAPIConvert::getShape(type);
- const auto typeInfo = NNAPIConvert::getTypeInfo(type);
- _graph->addOperand(shape, typeInfo);
- _operand_usages.emplace_back(OperandUsage::NOT_DEFINED);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length,
- bool optional, bool copy) noexcept
-{
- const onert::ir::OperandIndex ind{index};
-
- try
- {
- _operand_usages[index] = OperandUsage::CONSTANT;
-
- // Remain operands.at(ind).data()->base() as nullptr for optional operand
- // This will be filled when model finished
- if (optional)
- {
- setOptionalOperand(ind);
- }
-
- using onert::ir::CachedData;
- using onert::ir::ExternalData;
- if (copy)
- {
- _graph->operands().at(ind).data(
- std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
- }
- else
- {
- _graph->operands().at(ind).data(
- std::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length));
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept
-{
- try
- {
- for (uint32_t i = 0; i < outputCount; i++)
- {
- _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
- }
-
- auto &factory = OperationFactory::get();
- OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
-
- auto node = factory.create(type, param, _graph->operands());
- _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
-
- // TODO Move these codes to delegate.cpp
- if (type == ANEURALNETWORKS_FULLY_CONNECTED)
- {
- const auto &input_operand =
- _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::INPUT));
- auto &weights_operand =
- _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::WEIGHT));
- if (input_operand.typeInfo().type() == onert::ir::DataType::FLOAT32 &&
- weights_operand.typeInfo().type() == onert::ir::DataType::QUANT_UINT8_ASYMM)
- {
- weights_operand.type(onert::ir::DataType::QUANT_INT8_SYMM);
- }
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept
-{
- try
- {
- for (uint32_t i = 0; i < outputCount; i++)
- {
- _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
- }
-
- auto &factory = OperationFactory::get();
- OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
-
- auto node = factory.create(type, param, _graph->operands());
- _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
- }
- catch (const std::exception &e)
- {
- return false;
- }
- return true;
-}
-
-bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept
-{
- try
- {
- _operand_usages[index] = OperandUsage::MODEL_INPUT;
-
- const onert::ir::OperandIndex ind{index};
- _graph->addInput(ind);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept
-{
- try
- {
- const onert::ir::OperandIndex ind{index};
-
- // Duplicated output is not allowed
- if (_graph->getOutputs().contains(ind))
- {
- return false;
- }
-
- _graph->addOutput(ind);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-void ANeuralNetworksModel::allowFloat32toFloat16(bool allow) noexcept
-{
- _allowFloat32toFloat16 = allow;
-}
-
-bool ANeuralNetworksModel::finish() noexcept
-{
- try
- {
- fillOptionalOperand();
-
- _graph->finishBuilding();
-
- _operand_usages.clear();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << '\n';
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPhase(); }
-
-bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept
-{
- return _graph->operands().exist(onert::ir::OperandIndex{index});
-}
-
-size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept
-{
- try
- {
- return _graph->operands().at(onert::ir::OperandIndex{index}).operandSize();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << '\n';
-
- return 0;
- }
-}
-
-bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept
-{
- return (_operand_usages[index] != OperandUsage::NOT_DEFINED);
-}
-
-bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept
-{
- return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT);
-}
-
-void ANeuralNetworksModel::setOptionalOperand(const onert::ir::OperandIndex idx)
-{
- _optional_operands.insert(idx);
-}
-
-void ANeuralNetworksModel::fillOptionalOperand(void)
-{
- _graph->operations().iterate([&](const onert::ir::OperationIndex &, onert::ir::Operation &node) {
- for (auto input : node.getInputs())
- {
- // TODO fill default value for optional operands
- if (_optional_operands.find(input) != _optional_operands.end())
- {
- throw std::runtime_error{"Optional operand is not supported yet"};
- }
- }
- });
-}
-
-std::shared_ptr<onert::ir::Subgraphs> ANeuralNetworksModel::getSubGraphs() const
-{
- auto all_subgs = std::make_shared<onert::ir::Subgraphs>();
-
- all_subgs->push(onert::ir::SubgraphIndex{0}, _graph);
- // TODO Find all child subgraphs and copy them to all_subgs
- // Must find the same subgraph by using to compare pointer of subgraphs and set subgraph's index
- // to operands of control flow operations
- // Must clean all child subgraphs's pointer to prevent memory leak in case of that graph has
- // subgraph itself recursively
-
- return all_subgs;
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h
deleted file mode 100644
index df6c97c44..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <unordered_set>
-#include <NeuralNetworks.h>
-#include <NeuralNetworksEx.h>
-
-#include "ir/Graph.h"
-#include "ir/Subgraphs.h"
-
-struct ANeuralNetworksModel
-{
-public:
- enum class OperandUsage
- {
- NOT_DEFINED = 0,
- MODEL_INPUT,
- CONSTANT,
- OPERATION_OUTPUT,
- };
-
-public:
- ANeuralNetworksModel() noexcept;
-
-public:
- bool addOperand(const ANeuralNetworksOperandType *type) noexcept;
- bool setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional = false,
- bool copy = false) noexcept;
- bool addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount, const uint32_t *outputs) noexcept;
- bool addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept;
- bool addModelInput(uint32_t index) noexcept;
- bool addModelOutput(uint32_t index) noexcept;
- void allowFloat32toFloat16(bool allow) noexcept;
- bool allowedToFp16() const noexcept { return _allowFloat32toFloat16; }
- bool finish() noexcept;
-
- onert::ir::Graph &deref(void) { return *_graph; }
- bool isFinished() noexcept;
- bool isExistOperand(uint32_t index) noexcept;
- size_t operandSize(uint32_t index) noexcept;
- bool isUsageSet(uint32_t index) noexcept;
- bool isOperationOutput(uint32_t index) noexcept;
- std::shared_ptr<onert::ir::Subgraphs> getSubGraphs() const;
-
-private:
- void setOptionalOperand(const onert::ir::OperandIndex idx);
- void fillOptionalOperand(void);
-
-private:
- std::shared_ptr<onert::ir::Graph> _graph;
- std::unordered_set<onert::ir::OperandIndex> _optional_operands;
- std::vector<OperandUsage> _operand_usages;
- bool _allowFloat32toFloat16;
-};
-
-#endif // __MODEL_H__
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc
deleted file mode 100644
index bb42f2b08..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ANeuralNetworksModel.h"
-
-TEST(MODEL, neg_model_build)
-{
- ANeuralNetworksModel model;
- ASSERT_FALSE(model.isFinished());
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc
deleted file mode 100644
index 63d4e3c09..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "NNAPIConvert.h"
-
-#include <numeric>
-
-using namespace onert::ir;
-
-DataType NNAPIConvert::getDataType(OperandCode type)
-{
- switch (type)
- {
- case ANEURALNETWORKS_FLOAT32:
- case ANEURALNETWORKS_TENSOR_FLOAT32:
- return DataType::FLOAT32;
- case ANEURALNETWORKS_INT32:
- case ANEURALNETWORKS_TENSOR_INT32:
- return DataType::INT32;
- case ANEURALNETWORKS_UINT32:
- return DataType::UINT32;
- case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
- return DataType::QUANT_UINT8_ASYMM;
- case ANEURALNETWORKS_TENSOR_QUANT8_SYMM:
- return DataType::QUANT_INT8_SYMM;
- case ANEURALNETWORKS_BOOL:
- case ANEURALNETWORKS_TENSOR_BOOL8:
- return DataType::BOOL8;
- default:
- throw std::runtime_error("Unsupported type");
- }
-}
-
-TypeInfo NNAPIConvert::getTypeInfo(const ANeuralNetworksOperandType *type)
-{
- return TypeInfo(getDataType((OperandCode)(type->type)), type->scale, type->zeroPoint);
-}
-
-Shape NNAPIConvert::getShape(const ANeuralNetworksOperandType *type)
-{
- Shape shape(type->dimensionCount);
-
- for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
- {
- shape.dim(axis) = type->dimensions[axis];
- }
-
- return shape;
-}
-
-size_t NNAPIConvert::calculateSizeFromType(const ANeuralNetworksOperandType *type)
-{
- auto shape = getShape(type);
- auto data_type = getDataType((OperandCode)(type->type));
-
- return shape.num_elements() * sizeOfDataType(data_type);
-}
-
-Activation NNAPIConvert::getFusedActivation(FuseCode act)
-{
- switch (act)
- {
- case ANEURALNETWORKS_FUSED_NONE:
- return Activation::NONE;
- case ANEURALNETWORKS_FUSED_RELU:
- return Activation::RELU;
- case ANEURALNETWORKS_FUSED_RELU1:
- return Activation::RELU1;
- case ANEURALNETWORKS_FUSED_RELU6:
- return Activation::RELU6;
- default:
- throw std::runtime_error("Unsupported activation type");
- }
-}
-
-PaddingType NNAPIConvert::getPaddingType(PaddingCode type)
-{
- switch (type)
- {
- case ANEURALNETWORKS_PADDING_SAME:
- return PaddingType::SAME;
- case ANEURALNETWORKS_PADDING_VALID:
- return PaddingType::VALID;
- default:
- throw std::runtime_error("Unsupported type");
- }
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h
deleted file mode 100644
index 4fd985e6e..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file NNAPIConvert.h
- * @brief This file contains convereter(s)\n
- * from NNAPI frontend's struct to onert's internal struct
- */
-#ifndef __ONERT_NNAPI_CONVERT_H__
-#define __ONERT_NNAPI_CONVERT_H__
-
-#include <NeuralNetworks.h>
-
-#include <ir/TypeInfo.h>
-#include <ir/Shape.h>
-#include <ir/Padding.h>
-#include <ir/InternalType.h>
-
-class NNAPIConvert
-{
-
-public:
- /**
- * @brief Convert data type from NNAPI to internal data type
- * @param[in] type NNAPI's data type
- * @return onert's internal data type
- */
- static onert::ir::DataType getDataType(OperandCode type);
-
- /**
- * @brief Convert operand type info from NNAPI to interanl operand type info
- * @param[in] type NNAPI's operand type
- * @return onert's internal operand type info
- */
- static onert::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Convert operand shape info from NNAPI to internal operand shape
- * @param[in] type NNAPI's operand type
- * @return onert's internal operand shape
- */
- static onert::ir::Shape getShape(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Calcaulate operand size from NNAPI type
- * @param[in] type NNAPI's operand type
- * @return Operand size
- */
- static size_t calculateSizeFromType(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Convert NNAPI FuseCode to internal activation type
- * @param[in] act NNAPI's FuseCode type
- * @return onert's internal activation type
- */
- static onert::ir::Activation getFusedActivation(FuseCode act);
-
- /**
- * @brief Convert NNAPI PaddingCode to internal padding type
- * @param[in] type NNAPI's PaddingCode type
- * @return onert's internal padding type
- */
- static onert::ir::PaddingType getPaddingType(PaddingCode type);
-};
-
-#endif // __ONERT_NNAPI_CONVERT_H__
diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
deleted file mode 100644
index e6c38f5f8..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
+++ /dev/null
@@ -1,1914 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationFactory.h"
-#include "NNAPIConvert.h"
-
-#include <ir/Operations.Include.h>
-#include <string.h>
-
-namespace
-{
-using namespace onert::ir;
-
-void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type)
-{
- assert(operands.exist(index));
- operands.at(index).type(type);
-}
-
-ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index,
- const OperandIndex &right_index, const OperandIndex &top_index,
- const OperandIndex &bottom_index)
-{
- auto left = operands.at(left_index).asScalar<int32_t>();
- auto right = operands.at(right_index).asScalar<int32_t>();
- auto top = operands.at(top_index).asScalar<int32_t>();
- auto bottom = operands.at(bottom_index).asScalar<int32_t>();
-
- if (left < 0 || right < 0 || top < 0 || bottom < 0)
- {
- throw std::runtime_error{"Cannot handle negative explicit padding value"};
- }
-
- ExplicitPadding param;
- param.left = static_cast<uint32_t>(left);
- param.right = static_cast<uint32_t>(right);
- param.top = static_cast<uint32_t>(top);
- param.bottom = static_cast<uint32_t>(bottom);
-
- return param;
-}
-
-Stride makeStride(Operands &operands, const OperandIndex &horizontal_index,
- const OperandIndex &vertical_index)
-{
- auto horizontal = operands.at(horizontal_index).asScalar<int32_t>();
- auto vertical = operands.at(vertical_index).asScalar<int32_t>();
-
- if (vertical < 0 || horizontal < 0)
- {
- throw std::runtime_error{"Cannot handle negative stride value"};
- }
-
- Stride stride;
- stride.horizontal = static_cast<uint32_t>(horizontal);
- stride.vertical = static_cast<uint32_t>(vertical);
-
- return stride;
-}
-
-uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
-{
- auto int32_value = operands.at(index).asScalar<int32_t>();
- if (int32_value < 0)
- {
- throw std::runtime_error{"Cannot handle negative value"};
- }
-
- return static_cast<uint32_t>(int32_value);
-}
-
-OperationFactory::Generator
-getElementwiseActivationGenerator(const onert::ir::operation::ElementwiseActivation::Type op_type,
- float alpha = 0.f, float beta = 0.f)
-{
- return [op_type, alpha, beta](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::ElementwiseActivation::Param param;
- param.op_type = op_type;
- param.alpha = alpha;
- param.beta = beta;
-
- return new operation::ElementwiseActivation{inputs, outputs, param};
- };
-}
-
-OperationFactory::Generator getElementwiseBinaryGenerator(
- const onert::ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
-{
- return [op_type](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::ElementwiseBinary::Param param;
- param.op_type = op_type;
-
- return new operation::ElementwiseBinary{inputs, outputs, param};
- };
-}
-
-OperationFactory::Generator
-getElementwiseUnaryGenerator(const onert::ir::operation::ElementwiseUnary::Type op_type)
-{
- return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 1);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::ElementwiseUnary::Param param;
- param.op_type = op_type;
-
- if (op_type == operation::ElementwiseUnary::Type::CAST)
- {
- // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's
- // input/output
- if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
- {
- replaceDataType(operands, inputs.at(0), DataType::UINT8);
- }
- if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
- {
- replaceDataType(operands, outputs.at(0), DataType::UINT8);
- }
- }
-
- return new operation::ElementwiseUnary{inputs, outputs, param};
- };
-}
-
-OperationFactory::Generator
-getBinaryArithmeticGenerator(const onert::ir::operation::BinaryArithmetic::ArithmeticType op_type)
-{
- return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::BinaryArithmetic::Param param;
- param.arithmetic_type = op_type;
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::BinaryArithmetic{inputs, outputs, param};
- };
-}
-
-OperationFactory::Generator
-getPool2DGenerator(const onert::ir::operation::Pool2D::PoolType pool_type)
-{
- return [pool_type](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- // In common
- // 0 -> IFM Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Pool2D::Param param;
- param.op_type = pool_type;
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- const auto padding_index = OperandIndex{init_param.inputs[1]};
- const auto hstride_index = OperandIndex{init_param.inputs[2]};
- const auto vstride_index = OperandIndex{init_param.inputs[3]};
- const auto kw_index = OperandIndex{init_param.inputs[4]};
- const auto kh_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = operands.at(kh_index).asScalar<uint32_t>();
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[1]};
- const auto padding_right_index = OperandIndex{init_param.inputs[2]};
- const auto padding_top_index = OperandIndex{init_param.inputs[3]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
- const auto hstride_index = OperandIndex{init_param.inputs[5]};
- const auto vstride_index = OperandIndex{init_param.inputs[6]};
- const auto kw_index = OperandIndex{init_param.inputs[7]};
- const auto kh_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- return new operation::Pool2D{inputs, outputs, param};
- };
-}
-
-OperationFactory::Generator
-getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type)
-{
- return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Reduced Axes Tensor Index
- // 2 -> keep_dims Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Reduce::Param param;
- param.reduce_type = reduce_type;
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
-
- return new operation::Reduce{inputs, outputs, param};
- };
-}
-
-template <typename T>
-Operation *CreateSimpleUnaryOp(const OperationFactory::Param &init_param, Operands &)
-{
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new T{inputs, outputs};
-}
-
-// A generator function for binary ops with no params
-template <typename T>
-Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Operands &)
-{
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new T{inputs, outputs};
-}
-
-OperationFactory::Generator getComparisonGenerator(operation::Comparison::ComparisonType type)
-{
- return [type](const OperationFactory::Param &init_param, Operands &) -> Operation * {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = type;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-}
-
-} // namespace
-
-OperationFactory &OperationFactory::get()
-{
- static OperationFactory factory;
- return factory;
-}
-
-OperationFactory::OperationFactory()
-{
- // Each input should be interpreted as follows:
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = createSimpleBinaryOp<operation::BatchToSpaceND>;
-
- _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert((init_param.input_count == 8 || init_param.input_count == 11) &&
- init_param.output_count == 1);
-
- // In common
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::DepthwiseConv2D::Param param;
- if (init_param.input_count == 8)
- {
- // Imlicit Padding case
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Depthwise multiplier
- // 7 -> Activation Index
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
- const auto multiplier_index = OperandIndex{init_param.inputs[6]};
- const auto activation_index = OperandIndex{init_param.inputs[7]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.multiplier = getUint32Scalar(operands, multiplier_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else
- {
- // Explicit Padding case
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding On the Left
- // 4 -> Padding On the Right
- // 5 -> Padding On the Top
- // 6 -> Padding On the Bottom
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) Index
- // 9 -> Depthwise multiplier
- // 10-> Activation Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[3]};
- const auto padding_right_index = OperandIndex{init_param.inputs[4]};
- const auto padding_top_index = OperandIndex{init_param.inputs[5]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
- const auto hstride_index = OperandIndex{init_param.inputs[7]};
- const auto vstride_index = OperandIndex{init_param.inputs[8]};
- const auto multiplier_index = OperandIndex{init_param.inputs[9]};
- const auto activation_index = OperandIndex{init_param.inputs[10]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.multiplier = getUint32Scalar(operands, multiplier_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
-
- return new operation::DepthwiseConv2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_MAX_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::MAX);
-
- _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::AVG);
-
- _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count >= 2); // At least one one input tensor and axis
- assert(init_param.output_count == 1);
-
- // When there are N + 1 inputs, each input should be interpreted as follows:
- //
- // [0, N) -> Input tensors
- // N -> Axis
- //
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Concat::Param param;
- const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]};
- param.axis = operands.at(axis_index).asScalar<int32_t>();
-
- return new operation::Concat{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor, specifying the tensor to be reshaped.
- // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
- // tensor
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Reshape::Param param{};
-
- return new operation::Reshape{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 4 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor, specifying the input.
- // 1 -> A 2-D tensor, specifying the weights
- // 2 -> A 1-D tensor, specifying the bias
- // 3 -> An INT32 value, and has to be one of the FuseCode values
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::FullyConnected::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[3]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::FullyConnected{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
- // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- const auto beta_index = OperandIndex{init_param.inputs[1]};
-
- operation::Softmax::Param param;
- param.beta = operands.at(beta_index).asScalar<float>();
-
- return new operation::Softmax{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_CAST] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::CAST);
-
- // ANEURALNETWORKS_CAST_EX is deprecated
- // TODO Remove ANEURALNETWORKS_CAST_EX
- _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST];
-
- _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- using operation::Conv2D;
-
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(init_param.input_count == 7 || init_param.input_count == 10 ||
- init_param.input_count == 13);
- assert(init_param.output_count == 1);
-
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- Conv2D::Param param;
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Activation Index
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
-
- param.dilation.width_factor = 1;
- param.dilation.height_factor = 1;
-
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding_left index
- // 4 -> Padding_right index
- // 5 -> Padding_top index
- // 6 -> Padding_bottom index
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) INdex
- // 9 -> Activation Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[3]};
- const auto padding_right_index = OperandIndex{init_param.inputs[4]};
- const auto padding_top_index = OperandIndex{init_param.inputs[5]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
- const auto hstride_index = OperandIndex{init_param.inputs[7]};
- const auto vstride_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
-
- param.dilation.width_factor = 1;
- param.dilation.height_factor = 1;
-
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else if (init_param.input_count == 13) // support dilation
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding_left Index
- // 4 -> Padding_right Index
- // 5 -> Padding_top Index
- // 6 -> Padding_bottom Index
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) Index
- // 9 -> Activation Index
- // 11 -> Dilation (width_factor) Index
- // 12 -> Dilation (height_factor) INdex
-
- const auto padding_left_index = OperandIndex{init_param.inputs[3]};
- const auto padding_right_index = OperandIndex{init_param.inputs[4]};
- const auto padding_top_index = OperandIndex{init_param.inputs[5]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
- const auto hstride_index = OperandIndex{init_param.inputs[7]};
- const auto vstride_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
- const auto width_factor_index = OperandIndex{init_param.inputs[11]};
- const auto height_factor_index = OperandIndex{init_param.inputs[12]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
-
- auto width_factor = operands.at(width_factor_index).asScalar<int32_t>();
- auto height_factor = operands.at(height_factor_index).asScalar<int32_t>();
-
- param.dilation.width_factor = width_factor;
- param.dilation.height_factor = height_factor;
-
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else
- {
- throw std::runtime_error{"Conv2D: unsupported input operand count"};
- }
-
- return new Conv2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_ADD] =
- getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::ADD);
-
- _map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD];
-
- _map[ANEURALNETWORKS_REDUCE_SUM] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::SUM);
-
- // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated
- // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX
- _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM];
-
- _map[ANEURALNETWORKS_SUB] =
- getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::SUB);
-
- _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Begins Tensor Index
- // 2 -> Sizes Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::Slice{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 7 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
- init_param.inputs[3]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
- // of begin_mask is set, begin[i] is ignored and the fullest possible
- // range in that dimension is used instead.
- // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
- // end_mask is set, end[i] is ignored and the fullest possible range in
- // that dimension is used instead.
- // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
- // mask. If the ith bit of shrink_axis_mask is set, it implies that the
- // ith specification shrinks the dimensionality by 1. A slice of size 1
- // starting from begin[i] in the dimension must be preserved.
-
- operation::StridedSlice::Param param;
-
- param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
- param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
- param.shrink_axis_mask =
- operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
-
- return new operation::StridedSlice{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_TRANSPOSE] = createSimpleBinaryOp<operation::Transpose>;
-
- _map[ANEURALNETWORKS_MUL] =
- getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::MUL);
-
- _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 1 || init_param.input_count == 2);
- assert(init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> An n-D tensor, the tensor to be squeezed.
- // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze.
- // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
- // The dimension index starts at 0. An error must be reported if squeezing a dimension that
- // is not 1.
-
- // Add mandatory input index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // Add dims index if specified
- operation::Squeeze::Param param{};
- if (init_param.input_count == 2)
- {
- auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]};
- assert(operands.at(squeeze_dims_idx).shape().rank() == 1);
- assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0);
- assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <=
- sizeof(param.dims));
- param.ndim = operands.at(squeeze_dims_idx).shape().dim(0);
- if (param.ndim > 0)
- {
- assert(operands.at(squeeze_dims_idx).data());
- memcpy(param.dims, operands.at(squeeze_dims_idx).data()->base(),
- param.ndim * sizeof(param.dims[0]));
- }
- }
-
- return new operation::Squeeze{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_TANH] = getElementwiseActivationGenerator(
- onert::ir::operation::ElementwiseActivation::Type::TANH, 1.f, 1.f);
-
- _map[ANEURALNETWORKS_LOG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOG);
-
- _map[ANEURALNETWORKS_LOGISTIC] = getElementwiseActivationGenerator(
- onert::ir::operation::ElementwiseActivation::Type::LOGISTIC);
-
- _map[ANEURALNETWORKS_DIV] =
- getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::DIV);
-
- _map[ANEURALNETWORKS_EXP] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::EXP);
-
- // ANEURALNETWORKS_EXP_EX is deprecated
- // TODO Remove ANEURALNETWORKS_EXP_EX
- _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP];
-
- // Each input should be interpreted as follows:
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
- _map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp<operation::ExpandDims>;
-
- _map[ANEURALNETWORKS_GREATER] =
- getComparisonGenerator(operation::Comparison::ComparisonType::Greater);
- _map[ANEURALNETWORKS_GREATER_EQUAL] =
- getComparisonGenerator(operation::Comparison::ComparisonType::GreaterEqual);
- _map[ANEURALNETWORKS_LESS] = getComparisonGenerator(operation::Comparison::ComparisonType::Less);
- _map[ANEURALNETWORKS_LESS_EQUAL] =
- getComparisonGenerator(operation::Comparison::ComparisonType::LessEqual);
- _map[ANEURALNETWORKS_NOT_EQUAL] =
- getComparisonGenerator(operation::Comparison::ComparisonType::NotEqual);
- _map[ANEURALNETWORKS_EQUAL] =
- getComparisonGenerator(operation::Comparison::ComparisonType::Equal);
-
- // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated
- // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX
- _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_LESS_EX is deprecated
- // TODO Remove ANEURALNETWORKS_LESS_EX
- _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Less;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_REDUCE_ALL] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ALL);
-
- _map[ANEURALNETWORKS_REDUCE_ANY] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ANY);
-
- _map[ANEURALNETWORKS_REDUCE_MAX] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MAX);
-
- // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated
- // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
- _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX];
-
- // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated
- // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX
- _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input1 Tensor Index
- // 1 -> input2 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOGICAL_AND] = getElementwiseBinaryGenerator(
- operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND);
-
- // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated
- // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX
- _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, inputs.at(1), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- operation::ElementwiseBinary::Param param;
- param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND;
-
- return new operation::ElementwiseBinary{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RSQRT] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::RSQRT);
-
- _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Condition Tensor Index
- // 1 -> Input X(true) Tensor Index
- // 2 -> Input Y(false) Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::Select{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_SELECT_V2_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Condition Tensor Index
- // 1 -> Input X(true) Tensor Index
- // 2 -> Input Y(false) Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::Select{inputs, outputs};
- };
-
- // ANEURALNETWORKS_RSQRT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_RSQRT_EX
- _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
-
- _map[ANEURALNETWORKS_RELU] =
- getElementwiseActivationGenerator(onert::ir::operation::ElementwiseActivation::Type::RELU,
- onert::ir::operation::ElementwiseActivation::infinity, 0);
-
- _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Index
- // 1 -> Height Index
- // 2 -> Width Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::ResizeBilinear::Param param;
- param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
- param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
- param.align_corners = false;
- param.half_pixel_centers = false;
- return new operation::ResizeBilinear{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert((init_param.input_count == 3 || init_param.input_count == 4) &&
- init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Index
- // 1 -> Height Index
- // 2 -> Width Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::ResizeNearestNeighbor::Param param;
- param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
- param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
- param.align_corners = false;
- // The layout input is not supported yet
- return new operation::ResizeNearestNeighbor{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RELU1] = getElementwiseActivationGenerator(
- onert::ir::operation::ElementwiseActivation::Type::RELU, 1.f, -1.f);
-
- _map[ANEURALNETWORKS_RELU6] = getElementwiseActivationGenerator(
- onert::ir::operation::ElementwiseActivation::Type::RELU, 6.f, 0.f);
-
- _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Reverse{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 6 && init_param.output_count == 2);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Weights Tensor Index
- // 2 -> Recurrent Weights Tensor Index
- // 3 -> Bias Tensor Index
- // 4 -> Hidden state (in) Index
- // 5 -> Activation Index
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::RNN::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[5]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::RNN{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_FLOOR] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::FLOOR);
-
- _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- // 2 -> Paddings Index
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- return new operation::SpaceToBatchND{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::SpaceToDepth::Param param;
- param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::SpaceToDepth{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_L2_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::L2);
-
- _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lookups Index
- // 1 -> Values Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::EmbeddingLookup{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::L2Normalization{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 2);
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Output Index
- // 1 -> Hits Index
- OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lookups Index
- // 1 -> Keys Index
- // 2 -> Values Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::HashtableLookup{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_PRELU] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- // 1 -> alpha Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::PReLU{inputs, outputs};
- };
-
- // ANEURALNETWORKS_PRELU_EX is deprecated
- // TODO Remove ANEURALNETWORKS_PRELU_EX
- _map[ANEURALNETWORKS_PRELU_EX] = _map[ANEURALNETWORKS_PRELU];
-
- _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 6 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Output Shape Index
- // 1 -> Weights Index
- // 2 -> Input Tensor Index
- // 3 -> Padding Type
- // 4 -> Stride width
- // 5 -> Stride height
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- operation::TransposeConv::Param param;
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
-
- return new operation::TransposeConv{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SQRT] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SQRT);
-
- // ANEURALNETWORKS_SQRT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_SQRT_EX
- _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT];
-
- _map[ANEURALNETWORKS_LOGICAL_OR] = getElementwiseBinaryGenerator(
- operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
-
- // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated
- // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX
- _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, inputs.at(1), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- operation::ElementwiseBinary::Param param;
- param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR;
-
- return new operation::ElementwiseBinary{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOGICAL_NOT] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOGICAL_NOT);
-
- // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX
- _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- operation::ElementwiseUnary::Param param;
- param.op_type = operation::ElementwiseUnary::Type::LOGICAL_NOT;
-
- return new operation::ElementwiseUnary{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 23 && init_param.output_count == 4);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Input to Input Tensor Index
- // 2 -> Input to Forget Tensor Index
- // 3 -> Input to Cell Tensor Index
- // 4 -> Input to Output Tensor Index
- // 5 -> Recurrent to Input Weights Tensor Index
- // 6 -> Recurrent to Forget Weights Tensor Index
- // 7 -> Recurrent to Cell Weights Tensor Index
- // 8 -> Recurrent to Output Weights Tensor Index
- // 9 -> Cell to Input Weights Tensor Index
- // 10 -> Cell to Forget Weights Tensor Index
- // 11 -> Cell to Output Weights Tensor Index
- // 12 -> Input Gate Bias Tensor Index
- // 13 -> Forget Gate Bias Tensor Index
- // 14 -> Cell Bias Tensor Index
- // 15 -> Output Gate Bias Tensor Index
- // 16 -> Projection Weights Tensor Index
- // 17 -> Projection Bias Tensor Index
- // 18 -> Output State In Tensor Index
- // 19 -> Cell State In Tensor Index
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Scratch Buffer Tensor Index
- // 1 -> Output State Out Tensor Index
- // 2 -> Cell State Out Tensor Index
- // 3 -> Output Tensor Index
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::LSTM::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[20]};
- switch (operands.at(activation_index).asScalar<int32_t>())
- {
- case 0:
- param.activation = Activation::NONE;
- break;
- case 1:
- param.activation = Activation::RELU;
- break;
- case 2:
- param.activation = Activation::RELU1;
- break;
- case 3:
- param.activation = Activation::RELU6;
- break;
- case 4:
- param.activation = Activation::TANH;
- break;
- case 6:
- param.activation = Activation::SIGMOID;
- break;
- default:
- throw std::runtime_error("Unsupported activation type");
- break;
- }
- param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
- param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
- // This is initialization to prevent warning or error by static code analyzer. LSTM operation
- // does not need time_major
- param.time_major = false;
-
- return new operation::LSTM{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert((init_param.input_count >= 24 || init_param.input_count <= 28) &&
- (init_param.output_count >= 1 && init_param.output_count <= 3));
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Input to Input Tensor Index
- // 2 -> Input to Forget Tensor Index
- // 3 -> Input to Cell Tensor Index
- // 4 -> Input to Output Tensor Index
- // 5 -> Recurrent to Input Weights Tensor Index
- // 6 -> Recurrent to Forget Weights Tensor Index
- // 7 -> Recurrent to Cell Weights Tensor Index
- // 8 -> Recurrent to Output Weights Tensor Index
- // 9 -> Cell to Input Weights Tensor Index
- // 10 -> Cell to Forget Weights Tensor Index
- // 11 -> Cell to Output Weights Tensor Index
- // 12 -> Input Gate Bias Tensor Index
- // 13 -> Forget Gate Bias Tensor Index
- // 14 -> Cell Bias Tensor Index
- // 15 -> Output Gate Bias Tensor Index
- // 16 -> Projection Weights Tensor Index
- // 17 -> Projection Bias Tensor Index
- // 18 -> Output State In Tensor Index
- // 19 -> Cell State In Tensor Index
- assert(init_param.input_count - 3 > 20);
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < 20; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- // 24 -> Input Layer Normalization Weights Tensor Index
- // 25 -> Forget Layer Normalization Weights Tensor Index
- // 26 -> Cell Layer Normalization Weights Tensor Index
- // 27 -> Output Layer Normalization Weights Tensor Index
- if (init_param.input_count > 24)
- {
- for (uint32_t n = 24; n < 28; ++n)
- {
- if (init_param.input_count > n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- }
- }
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Output Tensor Index -> 3
- // 1 -> Output State Out Tensor Index
- // 2 -> Cell State Out Tensor Index
- const OperandIndex scratch_buffer_index;
- OperandIndex output_state_index =
- init_param.output_count >= 2 ? OperandIndex{init_param.outputs[1]} : OperandIndex();
- OperandIndex cell_state_index =
- init_param.output_count >= 3 ? OperandIndex{init_param.outputs[2]} : OperandIndex();
- const OperandIndex output_index = OperandIndex{init_param.outputs[0]};
- OperandIndexSequence outputs{scratch_buffer_index, output_state_index, cell_state_index,
- output_index};
-
- operation::LSTM::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[20]};
- switch (operands.at(activation_index).asScalar<int32_t>())
- {
- case 0:
- param.activation = Activation::NONE;
- break;
- case 1:
- param.activation = Activation::RELU;
- break;
- case 2:
- param.activation = Activation::RELU1;
- break;
- case 3:
- param.activation = Activation::RELU6;
- break;
- case 4:
- param.activation = Activation::TANH;
- break;
- case 6:
- param.activation = Activation::SIGMOID;
- break;
- default:
- throw std::runtime_error("Unsupported activation type");
- break;
- }
- param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
- param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
- param.time_major = operands.at(OperandIndex{init_param.inputs[23]}).asScalar<bool>();
-
- return new operation::LSTM{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_EQUAL_EX is deprecated
- // TODO Remove ANEURALNETWORKS_EQUAL_EX
- _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Equal;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::SquaredDifference{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 2);
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Index for Output Values
- // 1 -> Index for Output Indices
- OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Index for Input Data
- // 1 -> Index for K
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::TopKV2::Param param;
- param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::TopKV2{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_CAST_EX is deprecated
- // TODO Remove ANEURALNETWORKS_CAST_EX
- _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2];
-
- _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- // 1 -> axis Index
- // 2 -> indices Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]};
-
- operation::Gather::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
-
- return new operation::Gather{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_GATHER_EX is deprecated
- // TODO Remove ANEURALNETWORKS_GATHER_EX
- _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER];
-
- _map[ANEURALNETWORKS_NEG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::NEG);
-
- // ANEURALNETWORKS_NEG_EX is deprecated
- // TODO Remove ANEURALNETWORKS_NEG_EX
- _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG];
-
- _map[ANEURALNETWORKS_ABS] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ABS);
-
- // ANEURALNETWORKS_ABS_EX is deprecated
- // TODO Remove ANEURALNETWORKS_ABS_EX
- _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS];
-
- _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::ArgMax::Param param;
- // NNAPI ARGMAX output type is always int32
- param.output_type = DataType::INT32;
-
- return new operation::ArgMax{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_ARGMAX_EX is deprecated
- // TODO Remove ANEURALNETWORKS_ARGMAX_EX
- _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX];
-
- _map[ANEURALNETWORKS_DEQUANTIZE] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::DEQUANTIZE);
-
- _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> ifm Tensor Index
- // 1 -> axis Tensor Index
- // 2 -> keep_dims Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Reduce::Param param;
- param.reduce_type = operation::Reduce::ReduceType::MEAN;
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
-
- return new operation::Reduce{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 5 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::LocalResponseNormalization::Param param;
- param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
- param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>();
- param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>();
- param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>();
-
- return new operation::LocalResponseNormalization{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::DepthToSpace::Param param;
- param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::DepthToSpace{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count >= 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 2; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- operation::Pack::Param param;
- const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]};
- const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
- param.num = operands.at(num_index).asScalar<int32_t>();
- param.axis = operands.at(axis_index).asScalar<int32_t>();
-
- return new operation::Pack{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_REDUCE_MIN] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MIN);
-
- // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated
- // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX
- _map[ANEURALNETWORKS_REDUCE_MIN_EX] = _map[ANEURALNETWORKS_REDUCE_MIN];
-
- _map[ANEURALNETWORKS_SPLIT] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count >= 1); // At least one output tensor and axis
-
- OperandIndexSequence inputs{init_param.inputs[1], init_param.inputs[0]};
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::Split::Param param;
- param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
-
- return new operation::Split{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SPLIT_V_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 4);
- assert(init_param.output_count >= 1); // At least one output tensor and axis
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::SplitV::Param param;
- param.num_splits = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<std::int32_t>();
- return new operation::SplitV{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_SPLIT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_SPLIT_EX
- _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT];
-
- _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count >= 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::Unpack::Param param;
- const auto num_index = OperandIndex{init_param.inputs[1]};
- const auto axis_index = OperandIndex{init_param.inputs[2]};
- param.num = operands.at(num_index).asScalar<int32_t>();
- param.axis = operands.at(axis_index).asScalar<int32_t>();
-
- return new operation::Unpack{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count >= 2 && init_param.input_count <= 3 &&
- init_param.output_count >= 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- if (init_param.input_count == 3)
- {
- inputs.append(OperandIndex{init_param.inputs[2]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Pad{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD];
-
- _map[ANEURALNETWORKS_MINIMUM] =
- getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
-
- _map[ANEURALNETWORKS_MAXIMUM] =
- getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
-
- _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 5);
- assert(init_param.output_count == 1);
- // Each input should be interpreted as follows:
- //
- // 0 -> indices tensor
- // 1 -> depth tensor
- // 2 -> on_value tensor
- // 3 -> off_value tensor
- // 4 -> axis scalar
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::OneHot::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
-
- return new operation::OneHot{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_COS_EX] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::COS);
-
- _map[ANEURALNETWORKS_SIN] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SIN);
-
- _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Shape{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_REDUCE_PROD] =
- getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD);
-
- _map[ANEURALNETWORKS_ROUND_EX] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ROUND);
-
- _map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> start Tensor Index
- // 1 -> limit Tensor Index
- // 2 -> delta Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::Range{inputs, outputs};
- };
-
- // Each input should be interpreted as follows:
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- _map[ANEURALNETWORKS_POW] = createSimpleBinaryOp<operation::Pow>;
-
- // Each input should be interpreted as follows:
- // 0 -> A tensor, specifying the input.
- // 1 -> A 1-D tensor, specifying the value
- _map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp<operation::Fill>;
-
- _map[ANEURALNETWORKS_ZEROS_LIKE_EX] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ZEROS_LIKE);
- // Each input should be interpreted as follows:
- // 0 -> Input Tensor Index
- // 1 -> Multiple Tensor Index
- _map[ANEURALNETWORKS_TILE] = createSimpleBinaryOp<operation::Tile>;
-
- _map[ANEURALNETWORKS_MATRIX_BAND_PART_EX] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor, input
- // 1 -> A 0-D tensor, number of lower diagnonals to keep
- // 2 -> A 0-D tensor, number of upper diagnonals to keep
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::MatrixBandPart{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_BATCH_MATMUL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 4 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lhs Tensor Index
- // 1 -> Rhs Tensor Index
- // 2 -> adj_x boolean scalar Index
- // 3 -> adj_y boolean scalar Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::BatchMatMul::Param param;
- param.adj_x = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<bool>();
- param.adj_y = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<bool>();
-
- return new operation::BatchMatMul{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_EINSUM_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- // Each input should be interpreted as follows:
- //
- // 0....n - 1 -> n Input Tensors Index
- // n -> equation
- assert(init_param.input_count >= 1 && init_param.output_count == 1);
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Einsum::Param param;
- const OperandIndex equation_index{init_param.inputs[init_param.input_count - 1]};
- std::vector<char> equation_vector = operands.at(equation_index).asVector<char>();
- param.equation = std::string(equation_vector.begin(), equation_vector.end());
-
- return new operation::Einsum{inputs, outputs, param};
- };
-
- // 0 -> Input Tensor Index
- // 1 -> int32, int64, An 1-D int tensor Index
- _map[ANEURALNETWORKS_BROADCAST_TO_EX] = createSimpleBinaryOp<operation::BroadcastTo>;
-
- _map[ANEURALNETWORKS_STATELESS_RANDOM_UNIFORM_EX] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Shape Tensor Index
- // 1 -> int32, int64, An 1-D int tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::StatelessRandomUniform{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_FUSED_BATCH_NORM_V3_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- // Each input should be interpreted as follows:
- //
- // 0....4 -> 5 Input Tensors Index
- // n-2 -> is_training
- // n-1 -> data_format
- // n -> epsilon
-
- assert(init_param.input_count == 8 && init_param.output_count == 1);
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::FusedBatchNorm::Param param;
- const OperandIndex is_training_index{init_param.inputs[init_param.input_count - 3]};
- param.is_training = operands.at(is_training_index).asScalar<bool>();
-
- const OperandIndex data_format_index{init_param.inputs[init_param.input_count - 2]};
- std::vector<char> data_format_vector = operands.at(data_format_index).asVector<char>();
- param.data_format = std::string(data_format_vector.begin(), data_format_vector.end());
-
- const OperandIndex epsilon_index{init_param.inputs[init_param.input_count - 1]};
- param.epsilon = operands.at(epsilon_index).asScalar<float>();
- return new operation::FusedBatchNorm{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOG_SOFTMAX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor specifying the input logits.
- // 1 -> A scalar, specifying the positive scaling factor for the exponent, beta.
- // 2 -> An scalar specifying the axis to reduce across.
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- const auto beta_index = OperandIndex{init_param.inputs[1]};
- const auto axis_index = OperandIndex{init_param.inputs[2]};
-
- operation::LogSoftmax::Param param;
- param.beta = operands.at(beta_index).asScalar<float>();
- param.axis = operands.at(axis_index).asScalar<int>();
-
- return new operation::LogSoftmax{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_QUANTIZE] =
- getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::QUANTIZE);
-}
-
-Operation *OperationFactory::create(ANeuralNetworksOperationType type,
- const OperationFactory::Param &param, Operands &operands)
-{
- auto it = _map.find(type);
- if (it == _map.end())
- {
- throw std::runtime_error("Unsupported operation type: " + std::to_string(type));
- }
- return it->second(param, operands);
-}
diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h
deleted file mode 100644
index 367cf74db..000000000
--- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __OPERATION_FACTORY_H__
-#define __OPERATION_FACTORY_H__
-
-#include <unordered_map>
-
-#include "ir/Operands.h"
-#include "ir/Operation.h"
-#include "NeuralNetworks.h"
-#include "NeuralNetworksEx.h"
-
-/**
- * @brief A class to create a onert operation object from NN API input parameters
- */
-class OperationFactory
-{
-public:
- struct Param
- {
- uint32_t input_count;
- const uint32_t *inputs;
- uint32_t output_count;
- const uint32_t *outputs;
- };
-
-public:
- using Generator =
- std::function<onert::ir::Operation *(const OperationFactory::Param &, onert::ir::Operands &)>;
-
-public:
- static OperationFactory &get();
-
-private:
- OperationFactory();
-
-public:
- onert::ir::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param &param,
- onert::ir::Operands &operands);
- // TODO add "register" method for separating registration, possibly supporting custom-ops
-
-private:
- std::unordered_map<ANeuralNetworksOperationType, Generator> _map;
-};
-
-#endif // __OPERATION_FACTORY_H__
diff --git a/runtime/onert/frontend/tflite/CMakeLists.txt b/runtime/onert/frontend/tflite/CMakeLists.txt
deleted file mode 100644
index 604a9e4cb..000000000
--- a/runtime/onert/frontend/tflite/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-if(NOT BUILD_TFLITE_LOADER)
- return()
-endif(NOT BUILD_TFLITE_LOADER)
-
-set(TFLITE_LOADER_SOURCES src/tflite_loader.cc)
-
-add_library(tflite_loader SHARED ${TFLITE_LOADER_SOURCES})
-
-target_include_directories(tflite_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-
-target_link_libraries(tflite_loader PRIVATE onert_core)
-target_link_libraries(tflite_loader PRIVATE base_loader nnfw_common nnfw_coverage)
-
-install(TARGETS tflite_loader DESTINATION lib)
diff --git a/runtime/onert/frontend/tflite/include/tflite_loader.h b/runtime/onert/frontend/tflite/include/tflite_loader.h
deleted file mode 100644
index 743c05f9e..000000000
--- a/runtime/onert/frontend/tflite/include/tflite_loader.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __TFLITE_TFLITE_LOADER_H__
-#define __TFLITE_TFLITE_LOADER_H__
-
-#include "ir/Graph.h"
-
-#include <memory>
-
-namespace onert
-{
-namespace tflite_loader
-{
-
-std::unique_ptr<ir::Subgraphs> loadModel(const char *filename);
-
-} // namespace tflite_loader
-} // namespace onert
-
-#endif // __TFLITE_TFLITE_LOADER_H__
diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc
deleted file mode 100644
index fe4295ada..000000000
--- a/runtime/onert/frontend/tflite/src/tflite_loader.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite_loader.h"
-#include "base_loader.h"
-#include "tflite_schema_generated.h"
-
-namespace onert
-{
-namespace tflite_loader
-{
-
-namespace
-{
-
-struct LoaderDomain
-{
- using Verifier = flatbuffers::Verifier;
- using ActivationFunctionType = onert_tflite::ActivationFunctionType;
- using Buffer = onert_tflite::Buffer;
- using BuiltinOperator = onert_tflite::BuiltinOperator;
- using CustomOptionsFormat = onert_tflite::CustomOptionsFormat;
- using Model = onert_tflite::Model;
- using Operator = onert_tflite::Operator;
- using Padding = onert_tflite::Padding;
- using Pool2DOptions = onert_tflite::Pool2DOptions;
- using Tensor = onert_tflite::Tensor;
- using TensorType = onert_tflite::TensorType;
- using SubGraph = onert_tflite::SubGraph;
- using DimensionType = onert_tflite::DimensionType;
- using SparseIndexVector = onert_tflite::SparseIndexVector;
-
- static const char *EnumNameBuiltinOperator(BuiltinOperator e)
- {
- return onert_tflite::EnumNameBuiltinOperator(e);
- }
- static const char *EnumNameActivationFunctionType(ActivationFunctionType e)
- {
- return onert_tflite::EnumNameActivationFunctionType(e);
- }
- static const char *EnumNameTensorType(TensorType e)
- {
- return onert_tflite::EnumNameTensorType(e);
- }
- static const Model *GetModel(const void *buf) { return onert_tflite::GetModel(buf); }
- static bool VerifyModelBuffer(Verifier &verifier)
- {
- return onert_tflite::VerifyModelBuffer(verifier);
- }
-};
-
-class TFLiteLoader final : public base_loader::BaseLoader<LoaderDomain>
-{
-public:
- using BaseLoader::BaseLoader;
-
- bool allowOptionalInputTensor(BuiltinOperator op) override
- {
- switch (op)
- {
- case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
- return true;
- default:
- return false;
- }
- }
-
-private:
- std::unique_ptr<ir::Graph> loadSubgraph(const onert_tflite::SubGraph *tflite_subg) override
- {
- auto subg = std::make_unique<ir::Graph>();
- // Load tensors
- _tensor_to_operand.resize(tflite_subg->tensors()->size());
- for (flatbuffers::uoffset_t i = 0; i < tflite_subg->tensors()->size(); ++i)
- {
- _tensor_to_operand[i] = loadOperand(tflite_subg->tensors()->Get(i), *subg);
- }
- // Set inputs
- for (const std::int32_t input_ind : *tflite_subg->inputs())
- {
- subg->addInput(tensorIdxToOperandIdx(input_ind),
- _tensor_names.at(_tensor_to_operand[input_ind]));
- }
- // Set outputs
- for (const std::int32_t output_ind : *tflite_subg->outputs())
- {
- subg->addOutput(tensorIdxToOperandIdx(output_ind),
- _tensor_names.at(_tensor_to_operand[output_ind]));
- }
- // Create operations
- for (const auto *op : *tflite_subg->operators())
- {
- loadOperation(op, *subg);
- }
-
- subg->finishBuilding();
-
- return subg;
- }
-};
-
-} // namespace
-
-std::unique_ptr<ir::Subgraphs> loadModel(const char *filename)
-{
- auto subgraphs = std::make_unique<ir::Subgraphs>();
- TFLiteLoader loader(subgraphs);
- loader.loadFromFile(filename);
- return subgraphs;
-}
-
-} // namespace tflite_loader
-} // namespace onert
diff --git a/runtime/onert/frontend/tflite/src/tflite_schema_generated.h b/runtime/onert/frontend/tflite/src/tflite_schema_generated.h
deleted file mode 100644
index c6e9147cd..000000000
--- a/runtime/onert/frontend/tflite/src/tflite_schema_generated.h
+++ /dev/null
@@ -1,9553 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd. All Rights Reserved
- * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// automatically generated by the FlatBuffers compiler, do not modify
-
-#ifndef FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_
-#define FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace onert_tflite
-{
-
-struct CustomQuantization;
-
-struct QuantizationParameters;
-
-struct Int32Vector;
-
-struct Uint16Vector;
-
-struct Uint8Vector;
-
-struct DimensionMetadata;
-
-struct SparsityParameters;
-
-struct Tensor;
-
-struct Conv2DOptions;
-
-struct Pool2DOptions;
-
-struct DepthwiseConv2DOptions;
-
-struct ConcatEmbeddingsOptions;
-
-struct LSHProjectionOptions;
-
-struct SVDFOptions;
-
-struct RNNOptions;
-
-struct SequenceRNNOptions;
-
-struct BidirectionalSequenceRNNOptions;
-
-struct FullyConnectedOptions;
-
-struct SoftmaxOptions;
-
-struct ConcatenationOptions;
-
-struct AddOptions;
-
-struct MulOptions;
-
-struct L2NormOptions;
-
-struct LocalResponseNormalizationOptions;
-
-struct LSTMOptions;
-
-struct UnidirectionalSequenceLSTMOptions;
-
-struct BidirectionalSequenceLSTMOptions;
-
-struct ResizeBilinearOptions;
-
-struct ResizeNearestNeighborOptions;
-
-struct CallOptions;
-
-struct PadOptions;
-
-struct PadV2Options;
-
-struct ReshapeOptions;
-
-struct SpaceToBatchNDOptions;
-
-struct BatchToSpaceNDOptions;
-
-struct SkipGramOptions;
-
-struct SpaceToDepthOptions;
-
-struct DepthToSpaceOptions;
-
-struct SubOptions;
-
-struct DivOptions;
-
-struct TopKV2Options;
-
-struct EmbeddingLookupSparseOptions;
-
-struct GatherOptions;
-
-struct TransposeOptions;
-
-struct ExpOptions;
-
-struct CosOptions;
-
-struct ReducerOptions;
-
-struct SqueezeOptions;
-
-struct SplitOptions;
-
-struct SplitVOptions;
-
-struct StridedSliceOptions;
-
-struct LogSoftmaxOptions;
-
-struct CastOptions;
-
-struct DequantizeOptions;
-
-struct MaximumMinimumOptions;
-
-struct TileOptions;
-
-struct ArgMaxOptions;
-
-struct ArgMinOptions;
-
-struct GreaterOptions;
-
-struct GreaterEqualOptions;
-
-struct LessOptions;
-
-struct LessEqualOptions;
-
-struct NegOptions;
-
-struct SelectOptions;
-
-struct SliceOptions;
-
-struct TransposeConvOptions;
-
-struct ExpandDimsOptions;
-
-struct SparseToDenseOptions;
-
-struct EqualOptions;
-
-struct NotEqualOptions;
-
-struct ShapeOptions;
-
-struct RankOptions;
-
-struct PowOptions;
-
-struct FakeQuantOptions;
-
-struct PackOptions;
-
-struct LogicalOrOptions;
-
-struct OneHotOptions;
-
-struct AbsOptions;
-
-struct HardSwishOptions;
-
-struct LogicalAndOptions;
-
-struct LogicalNotOptions;
-
-struct UnpackOptions;
-
-struct FloorDivOptions;
-
-struct SquareOptions;
-
-struct ZerosLikeOptions;
-
-struct FillOptions;
-
-struct FloorModOptions;
-
-struct RangeOptions;
-
-struct LeakyReluOptions;
-
-struct SquaredDifferenceOptions;
-
-struct MirrorPadOptions;
-
-struct UniqueOptions;
-
-struct ReverseV2Options;
-
-struct AddNOptions;
-
-struct GatherNdOptions;
-
-struct WhereOptions;
-
-struct ReverseSequenceOptions;
-
-struct MatrixDiagOptions;
-
-struct QuantizeOptions;
-
-struct MatrixSetDiagOptions;
-
-struct IfOptions;
-
-struct WhileOptions;
-
-struct NonMaxSuppressionV4Options;
-
-struct NonMaxSuppressionV5Options;
-
-struct ScatterNdOptions;
-
-struct SelectV2Options;
-
-struct DensifyOptions;
-
-struct SegmentSumOptions;
-
-struct BatchMatMulOptions;
-
-struct OperatorCode;
-
-struct Operator;
-
-struct SubGraph;
-
-struct Buffer;
-
-struct Metadata;
-
-struct Model;
-
-enum TensorType
-{
- TensorType_FLOAT32 = 0,
- TensorType_FLOAT16 = 1,
- TensorType_INT32 = 2,
- TensorType_UINT8 = 3,
- TensorType_INT64 = 4,
- TensorType_STRING = 5,
- TensorType_BOOL = 6,
- TensorType_INT16 = 7,
- TensorType_COMPLEX64 = 8,
- TensorType_INT8 = 9,
- TensorType_FLOAT64 = 10,
- TensorType_MIN = TensorType_FLOAT32,
- TensorType_MAX = TensorType_FLOAT64
-};
-
-inline const TensorType (&EnumValuesTensorType())[11]
-{
- static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32,
- TensorType_UINT8, TensorType_INT64, TensorType_STRING,
- TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64,
- TensorType_INT8, TensorType_FLOAT64};
- return values;
-}
-
-inline const char *const *EnumNamesTensorType()
-{
- static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8",
- "INT64", "STRING", "BOOL", "INT16",
- "COMPLEX64", "INT8", "FLOAT64", nullptr};
- return names;
-}
-
-inline const char *EnumNameTensorType(TensorType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesTensorType()[index];
-}
-
-enum QuantizationDetails
-{
- QuantizationDetails_NONE = 0,
- QuantizationDetails_CustomQuantization = 1,
- QuantizationDetails_MIN = QuantizationDetails_NONE,
- QuantizationDetails_MAX = QuantizationDetails_CustomQuantization
-};
-
-inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2]
-{
- static const QuantizationDetails values[] = {QuantizationDetails_NONE,
- QuantizationDetails_CustomQuantization};
- return values;
-}
-
-inline const char *const *EnumNamesQuantizationDetails()
-{
- static const char *const names[] = {"NONE", "CustomQuantization", nullptr};
- return names;
-}
-
-inline const char *EnumNameQuantizationDetails(QuantizationDetails e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesQuantizationDetails()[index];
-}
-
-template <typename T> struct QuantizationDetailsTraits
-{
- static const QuantizationDetails enum_value = QuantizationDetails_NONE;
-};
-
-template <> struct QuantizationDetailsTraits<CustomQuantization>
-{
- static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization;
-};
-
-bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type);
-bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum DimensionType
-{
- DimensionType_DENSE = 0,
- DimensionType_SPARSE_CSR = 1,
- DimensionType_MIN = DimensionType_DENSE,
- DimensionType_MAX = DimensionType_SPARSE_CSR
-};
-
-inline const DimensionType (&EnumValuesDimensionType())[2]
-{
- static const DimensionType values[] = {DimensionType_DENSE, DimensionType_SPARSE_CSR};
- return values;
-}
-
-inline const char *const *EnumNamesDimensionType()
-{
- static const char *const names[] = {"DENSE", "SPARSE_CSR", nullptr};
- return names;
-}
-
-inline const char *EnumNameDimensionType(DimensionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesDimensionType()[index];
-}
-
-enum SparseIndexVector
-{
- SparseIndexVector_NONE = 0,
- SparseIndexVector_Int32Vector = 1,
- SparseIndexVector_Uint16Vector = 2,
- SparseIndexVector_Uint8Vector = 3,
- SparseIndexVector_MIN = SparseIndexVector_NONE,
- SparseIndexVector_MAX = SparseIndexVector_Uint8Vector
-};
-
-inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4]
-{
- static const SparseIndexVector values[] = {SparseIndexVector_NONE, SparseIndexVector_Int32Vector,
- SparseIndexVector_Uint16Vector,
- SparseIndexVector_Uint8Vector};
- return values;
-}
-
-inline const char *const *EnumNamesSparseIndexVector()
-{
- static const char *const names[] = {"NONE", "Int32Vector", "Uint16Vector", "Uint8Vector",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameSparseIndexVector(SparseIndexVector e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesSparseIndexVector()[index];
-}
-
-template <typename T> struct SparseIndexVectorTraits
-{
- static const SparseIndexVector enum_value = SparseIndexVector_NONE;
-};
-
-template <> struct SparseIndexVectorTraits<Int32Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector;
-};
-
-template <> struct SparseIndexVectorTraits<Uint16Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector;
-};
-
-template <> struct SparseIndexVectorTraits<Uint8Vector>
-{
- static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector;
-};
-
-bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
- SparseIndexVector type);
-bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum BuiltinOperator
-{
- BuiltinOperator_ADD = 0,
- BuiltinOperator_AVERAGE_POOL_2D = 1,
- BuiltinOperator_CONCATENATION = 2,
- BuiltinOperator_CONV_2D = 3,
- BuiltinOperator_DEPTHWISE_CONV_2D = 4,
- BuiltinOperator_DEPTH_TO_SPACE = 5,
- BuiltinOperator_DEQUANTIZE = 6,
- BuiltinOperator_EMBEDDING_LOOKUP = 7,
- BuiltinOperator_FLOOR = 8,
- BuiltinOperator_FULLY_CONNECTED = 9,
- BuiltinOperator_HASHTABLE_LOOKUP = 10,
- BuiltinOperator_L2_NORMALIZATION = 11,
- BuiltinOperator_L2_POOL_2D = 12,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
- BuiltinOperator_LOGISTIC = 14,
- BuiltinOperator_LSH_PROJECTION = 15,
- BuiltinOperator_LSTM = 16,
- BuiltinOperator_MAX_POOL_2D = 17,
- BuiltinOperator_MUL = 18,
- BuiltinOperator_RELU = 19,
- BuiltinOperator_RELU_N1_TO_1 = 20,
- BuiltinOperator_RELU6 = 21,
- BuiltinOperator_RESHAPE = 22,
- BuiltinOperator_RESIZE_BILINEAR = 23,
- BuiltinOperator_RNN = 24,
- BuiltinOperator_SOFTMAX = 25,
- BuiltinOperator_SPACE_TO_DEPTH = 26,
- BuiltinOperator_SVDF = 27,
- BuiltinOperator_TANH = 28,
- BuiltinOperator_CONCAT_EMBEDDINGS = 29,
- BuiltinOperator_SKIP_GRAM = 30,
- BuiltinOperator_CALL = 31,
- BuiltinOperator_CUSTOM = 32,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
- BuiltinOperator_PAD = 34,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- BuiltinOperator_GATHER = 36,
- BuiltinOperator_BATCH_TO_SPACE_ND = 37,
- BuiltinOperator_SPACE_TO_BATCH_ND = 38,
- BuiltinOperator_TRANSPOSE = 39,
- BuiltinOperator_MEAN = 40,
- BuiltinOperator_SUB = 41,
- BuiltinOperator_DIV = 42,
- BuiltinOperator_SQUEEZE = 43,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- BuiltinOperator_STRIDED_SLICE = 45,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
- BuiltinOperator_EXP = 47,
- BuiltinOperator_TOPK_V2 = 48,
- BuiltinOperator_SPLIT = 49,
- BuiltinOperator_LOG_SOFTMAX = 50,
- BuiltinOperator_DELEGATE = 51,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- BuiltinOperator_CAST = 53,
- BuiltinOperator_PRELU = 54,
- BuiltinOperator_MAXIMUM = 55,
- BuiltinOperator_ARG_MAX = 56,
- BuiltinOperator_MINIMUM = 57,
- BuiltinOperator_LESS = 58,
- BuiltinOperator_NEG = 59,
- BuiltinOperator_PADV2 = 60,
- BuiltinOperator_GREATER = 61,
- BuiltinOperator_GREATER_EQUAL = 62,
- BuiltinOperator_LESS_EQUAL = 63,
- BuiltinOperator_SELECT = 64,
- BuiltinOperator_SLICE = 65,
- BuiltinOperator_SIN = 66,
- BuiltinOperator_TRANSPOSE_CONV = 67,
- BuiltinOperator_SPARSE_TO_DENSE = 68,
- BuiltinOperator_TILE = 69,
- BuiltinOperator_EXPAND_DIMS = 70,
- BuiltinOperator_EQUAL = 71,
- BuiltinOperator_NOT_EQUAL = 72,
- BuiltinOperator_LOG = 73,
- BuiltinOperator_SUM = 74,
- BuiltinOperator_SQRT = 75,
- BuiltinOperator_RSQRT = 76,
- BuiltinOperator_SHAPE = 77,
- BuiltinOperator_POW = 78,
- BuiltinOperator_ARG_MIN = 79,
- BuiltinOperator_FAKE_QUANT = 80,
- BuiltinOperator_REDUCE_PROD = 81,
- BuiltinOperator_REDUCE_MAX = 82,
- BuiltinOperator_PACK = 83,
- BuiltinOperator_LOGICAL_OR = 84,
- BuiltinOperator_ONE_HOT = 85,
- BuiltinOperator_LOGICAL_AND = 86,
- BuiltinOperator_LOGICAL_NOT = 87,
- BuiltinOperator_UNPACK = 88,
- BuiltinOperator_REDUCE_MIN = 89,
- BuiltinOperator_FLOOR_DIV = 90,
- BuiltinOperator_REDUCE_ANY = 91,
- BuiltinOperator_SQUARE = 92,
- BuiltinOperator_ZEROS_LIKE = 93,
- BuiltinOperator_FILL = 94,
- BuiltinOperator_FLOOR_MOD = 95,
- BuiltinOperator_RANGE = 96,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
- BuiltinOperator_LEAKY_RELU = 98,
- BuiltinOperator_SQUARED_DIFFERENCE = 99,
- BuiltinOperator_MIRROR_PAD = 100,
- BuiltinOperator_ABS = 101,
- BuiltinOperator_SPLIT_V = 102,
- BuiltinOperator_UNIQUE = 103,
- BuiltinOperator_CEIL = 104,
- BuiltinOperator_REVERSE_V2 = 105,
- BuiltinOperator_ADD_N = 106,
- BuiltinOperator_GATHER_ND = 107,
- BuiltinOperator_COS = 108,
- BuiltinOperator_WHERE = 109,
- BuiltinOperator_RANK = 110,
- BuiltinOperator_ELU = 111,
- BuiltinOperator_REVERSE_SEQUENCE = 112,
- BuiltinOperator_MATRIX_DIAG = 113,
- BuiltinOperator_QUANTIZE = 114,
- BuiltinOperator_MATRIX_SET_DIAG = 115,
- BuiltinOperator_ROUND = 116,
- BuiltinOperator_HARD_SWISH = 117,
- BuiltinOperator_IF = 118,
- BuiltinOperator_WHILE = 119,
- BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120,
- BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121,
- BuiltinOperator_SCATTER_ND = 122,
- BuiltinOperator_SELECT_V2 = 123,
- BuiltinOperator_DENSIFY = 124,
- BuiltinOperator_SEGMENT_SUM = 125,
- BuiltinOperator_BATCH_MATMUL = 126,
- BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_BATCH_MATMUL
-};
-
-inline const BuiltinOperator (&EnumValuesBuiltinOperator())[127]
-{
- static const BuiltinOperator values[] = {BuiltinOperator_ADD,
- BuiltinOperator_AVERAGE_POOL_2D,
- BuiltinOperator_CONCATENATION,
- BuiltinOperator_CONV_2D,
- BuiltinOperator_DEPTHWISE_CONV_2D,
- BuiltinOperator_DEPTH_TO_SPACE,
- BuiltinOperator_DEQUANTIZE,
- BuiltinOperator_EMBEDDING_LOOKUP,
- BuiltinOperator_FLOOR,
- BuiltinOperator_FULLY_CONNECTED,
- BuiltinOperator_HASHTABLE_LOOKUP,
- BuiltinOperator_L2_NORMALIZATION,
- BuiltinOperator_L2_POOL_2D,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- BuiltinOperator_LOGISTIC,
- BuiltinOperator_LSH_PROJECTION,
- BuiltinOperator_LSTM,
- BuiltinOperator_MAX_POOL_2D,
- BuiltinOperator_MUL,
- BuiltinOperator_RELU,
- BuiltinOperator_RELU_N1_TO_1,
- BuiltinOperator_RELU6,
- BuiltinOperator_RESHAPE,
- BuiltinOperator_RESIZE_BILINEAR,
- BuiltinOperator_RNN,
- BuiltinOperator_SOFTMAX,
- BuiltinOperator_SPACE_TO_DEPTH,
- BuiltinOperator_SVDF,
- BuiltinOperator_TANH,
- BuiltinOperator_CONCAT_EMBEDDINGS,
- BuiltinOperator_SKIP_GRAM,
- BuiltinOperator_CALL,
- BuiltinOperator_CUSTOM,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
- BuiltinOperator_PAD,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_GATHER,
- BuiltinOperator_BATCH_TO_SPACE_ND,
- BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOperator_TRANSPOSE,
- BuiltinOperator_MEAN,
- BuiltinOperator_SUB,
- BuiltinOperator_DIV,
- BuiltinOperator_SQUEEZE,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_STRIDED_SLICE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_EXP,
- BuiltinOperator_TOPK_V2,
- BuiltinOperator_SPLIT,
- BuiltinOperator_LOG_SOFTMAX,
- BuiltinOperator_DELEGATE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_CAST,
- BuiltinOperator_PRELU,
- BuiltinOperator_MAXIMUM,
- BuiltinOperator_ARG_MAX,
- BuiltinOperator_MINIMUM,
- BuiltinOperator_LESS,
- BuiltinOperator_NEG,
- BuiltinOperator_PADV2,
- BuiltinOperator_GREATER,
- BuiltinOperator_GREATER_EQUAL,
- BuiltinOperator_LESS_EQUAL,
- BuiltinOperator_SELECT,
- BuiltinOperator_SLICE,
- BuiltinOperator_SIN,
- BuiltinOperator_TRANSPOSE_CONV,
- BuiltinOperator_SPARSE_TO_DENSE,
- BuiltinOperator_TILE,
- BuiltinOperator_EXPAND_DIMS,
- BuiltinOperator_EQUAL,
- BuiltinOperator_NOT_EQUAL,
- BuiltinOperator_LOG,
- BuiltinOperator_SUM,
- BuiltinOperator_SQRT,
- BuiltinOperator_RSQRT,
- BuiltinOperator_SHAPE,
- BuiltinOperator_POW,
- BuiltinOperator_ARG_MIN,
- BuiltinOperator_FAKE_QUANT,
- BuiltinOperator_REDUCE_PROD,
- BuiltinOperator_REDUCE_MAX,
- BuiltinOperator_PACK,
- BuiltinOperator_LOGICAL_OR,
- BuiltinOperator_ONE_HOT,
- BuiltinOperator_LOGICAL_AND,
- BuiltinOperator_LOGICAL_NOT,
- BuiltinOperator_UNPACK,
- BuiltinOperator_REDUCE_MIN,
- BuiltinOperator_FLOOR_DIV,
- BuiltinOperator_REDUCE_ANY,
- BuiltinOperator_SQUARE,
- BuiltinOperator_ZEROS_LIKE,
- BuiltinOperator_FILL,
- BuiltinOperator_FLOOR_MOD,
- BuiltinOperator_RANGE,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- BuiltinOperator_LEAKY_RELU,
- BuiltinOperator_SQUARED_DIFFERENCE,
- BuiltinOperator_MIRROR_PAD,
- BuiltinOperator_ABS,
- BuiltinOperator_SPLIT_V,
- BuiltinOperator_UNIQUE,
- BuiltinOperator_CEIL,
- BuiltinOperator_REVERSE_V2,
- BuiltinOperator_ADD_N,
- BuiltinOperator_GATHER_ND,
- BuiltinOperator_COS,
- BuiltinOperator_WHERE,
- BuiltinOperator_RANK,
- BuiltinOperator_ELU,
- BuiltinOperator_REVERSE_SEQUENCE,
- BuiltinOperator_MATRIX_DIAG,
- BuiltinOperator_QUANTIZE,
- BuiltinOperator_MATRIX_SET_DIAG,
- BuiltinOperator_ROUND,
- BuiltinOperator_HARD_SWISH,
- BuiltinOperator_IF,
- BuiltinOperator_WHILE,
- BuiltinOperator_NON_MAX_SUPPRESSION_V4,
- BuiltinOperator_NON_MAX_SUPPRESSION_V5,
- BuiltinOperator_SCATTER_ND,
- BuiltinOperator_SELECT_V2,
- BuiltinOperator_DENSIFY,
- BuiltinOperator_SEGMENT_SUM,
- BuiltinOperator_BATCH_MATMUL};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOperator()
-{
- static const char *const names[] = {"ADD",
- "AVERAGE_POOL_2D",
- "CONCATENATION",
- "CONV_2D",
- "DEPTHWISE_CONV_2D",
- "DEPTH_TO_SPACE",
- "DEQUANTIZE",
- "EMBEDDING_LOOKUP",
- "FLOOR",
- "FULLY_CONNECTED",
- "HASHTABLE_LOOKUP",
- "L2_NORMALIZATION",
- "L2_POOL_2D",
- "LOCAL_RESPONSE_NORMALIZATION",
- "LOGISTIC",
- "LSH_PROJECTION",
- "LSTM",
- "MAX_POOL_2D",
- "MUL",
- "RELU",
- "RELU_N1_TO_1",
- "RELU6",
- "RESHAPE",
- "RESIZE_BILINEAR",
- "RNN",
- "SOFTMAX",
- "SPACE_TO_DEPTH",
- "SVDF",
- "TANH",
- "CONCAT_EMBEDDINGS",
- "SKIP_GRAM",
- "CALL",
- "CUSTOM",
- "EMBEDDING_LOOKUP_SPARSE",
- "PAD",
- "UNIDIRECTIONAL_SEQUENCE_RNN",
- "GATHER",
- "BATCH_TO_SPACE_ND",
- "SPACE_TO_BATCH_ND",
- "TRANSPOSE",
- "MEAN",
- "SUB",
- "DIV",
- "SQUEEZE",
- "UNIDIRECTIONAL_SEQUENCE_LSTM",
- "STRIDED_SLICE",
- "BIDIRECTIONAL_SEQUENCE_RNN",
- "EXP",
- "TOPK_V2",
- "SPLIT",
- "LOG_SOFTMAX",
- "DELEGATE",
- "BIDIRECTIONAL_SEQUENCE_LSTM",
- "CAST",
- "PRELU",
- "MAXIMUM",
- "ARG_MAX",
- "MINIMUM",
- "LESS",
- "NEG",
- "PADV2",
- "GREATER",
- "GREATER_EQUAL",
- "LESS_EQUAL",
- "SELECT",
- "SLICE",
- "SIN",
- "TRANSPOSE_CONV",
- "SPARSE_TO_DENSE",
- "TILE",
- "EXPAND_DIMS",
- "EQUAL",
- "NOT_EQUAL",
- "LOG",
- "SUM",
- "SQRT",
- "RSQRT",
- "SHAPE",
- "POW",
- "ARG_MIN",
- "FAKE_QUANT",
- "REDUCE_PROD",
- "REDUCE_MAX",
- "PACK",
- "LOGICAL_OR",
- "ONE_HOT",
- "LOGICAL_AND",
- "LOGICAL_NOT",
- "UNPACK",
- "REDUCE_MIN",
- "FLOOR_DIV",
- "REDUCE_ANY",
- "SQUARE",
- "ZEROS_LIKE",
- "FILL",
- "FLOOR_MOD",
- "RANGE",
- "RESIZE_NEAREST_NEIGHBOR",
- "LEAKY_RELU",
- "SQUARED_DIFFERENCE",
- "MIRROR_PAD",
- "ABS",
- "SPLIT_V",
- "UNIQUE",
- "CEIL",
- "REVERSE_V2",
- "ADD_N",
- "GATHER_ND",
- "COS",
- "WHERE",
- "RANK",
- "ELU",
- "REVERSE_SEQUENCE",
- "MATRIX_DIAG",
- "QUANTIZE",
- "MATRIX_SET_DIAG",
- "ROUND",
- "HARD_SWISH",
- "IF",
- "WHILE",
- "NON_MAX_SUPPRESSION_V4",
- "NON_MAX_SUPPRESSION_V5",
- "SCATTER_ND",
- "SELECT_V2",
- "DENSIFY",
- "SEGMENT_SUM",
- "BATCH_MATMUL",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOperator(BuiltinOperator e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOperator()[index];
-}
-
-enum BuiltinOptions
-{
- BuiltinOptions_NONE = 0,
- BuiltinOptions_Conv2DOptions = 1,
- BuiltinOptions_DepthwiseConv2DOptions = 2,
- BuiltinOptions_ConcatEmbeddingsOptions = 3,
- BuiltinOptions_LSHProjectionOptions = 4,
- BuiltinOptions_Pool2DOptions = 5,
- BuiltinOptions_SVDFOptions = 6,
- BuiltinOptions_RNNOptions = 7,
- BuiltinOptions_FullyConnectedOptions = 8,
- BuiltinOptions_SoftmaxOptions = 9,
- BuiltinOptions_ConcatenationOptions = 10,
- BuiltinOptions_AddOptions = 11,
- BuiltinOptions_L2NormOptions = 12,
- BuiltinOptions_LocalResponseNormalizationOptions = 13,
- BuiltinOptions_LSTMOptions = 14,
- BuiltinOptions_ResizeBilinearOptions = 15,
- BuiltinOptions_CallOptions = 16,
- BuiltinOptions_ReshapeOptions = 17,
- BuiltinOptions_SkipGramOptions = 18,
- BuiltinOptions_SpaceToDepthOptions = 19,
- BuiltinOptions_EmbeddingLookupSparseOptions = 20,
- BuiltinOptions_MulOptions = 21,
- BuiltinOptions_PadOptions = 22,
- BuiltinOptions_GatherOptions = 23,
- BuiltinOptions_BatchToSpaceNDOptions = 24,
- BuiltinOptions_SpaceToBatchNDOptions = 25,
- BuiltinOptions_TransposeOptions = 26,
- BuiltinOptions_ReducerOptions = 27,
- BuiltinOptions_SubOptions = 28,
- BuiltinOptions_DivOptions = 29,
- BuiltinOptions_SqueezeOptions = 30,
- BuiltinOptions_SequenceRNNOptions = 31,
- BuiltinOptions_StridedSliceOptions = 32,
- BuiltinOptions_ExpOptions = 33,
- BuiltinOptions_TopKV2Options = 34,
- BuiltinOptions_SplitOptions = 35,
- BuiltinOptions_LogSoftmaxOptions = 36,
- BuiltinOptions_CastOptions = 37,
- BuiltinOptions_DequantizeOptions = 38,
- BuiltinOptions_MaximumMinimumOptions = 39,
- BuiltinOptions_ArgMaxOptions = 40,
- BuiltinOptions_LessOptions = 41,
- BuiltinOptions_NegOptions = 42,
- BuiltinOptions_PadV2Options = 43,
- BuiltinOptions_GreaterOptions = 44,
- BuiltinOptions_GreaterEqualOptions = 45,
- BuiltinOptions_LessEqualOptions = 46,
- BuiltinOptions_SelectOptions = 47,
- BuiltinOptions_SliceOptions = 48,
- BuiltinOptions_TransposeConvOptions = 49,
- BuiltinOptions_SparseToDenseOptions = 50,
- BuiltinOptions_TileOptions = 51,
- BuiltinOptions_ExpandDimsOptions = 52,
- BuiltinOptions_EqualOptions = 53,
- BuiltinOptions_NotEqualOptions = 54,
- BuiltinOptions_ShapeOptions = 55,
- BuiltinOptions_PowOptions = 56,
- BuiltinOptions_ArgMinOptions = 57,
- BuiltinOptions_FakeQuantOptions = 58,
- BuiltinOptions_PackOptions = 59,
- BuiltinOptions_LogicalOrOptions = 60,
- BuiltinOptions_OneHotOptions = 61,
- BuiltinOptions_LogicalAndOptions = 62,
- BuiltinOptions_LogicalNotOptions = 63,
- BuiltinOptions_UnpackOptions = 64,
- BuiltinOptions_FloorDivOptions = 65,
- BuiltinOptions_SquareOptions = 66,
- BuiltinOptions_ZerosLikeOptions = 67,
- BuiltinOptions_FillOptions = 68,
- BuiltinOptions_BidirectionalSequenceLSTMOptions = 69,
- BuiltinOptions_BidirectionalSequenceRNNOptions = 70,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71,
- BuiltinOptions_FloorModOptions = 72,
- BuiltinOptions_RangeOptions = 73,
- BuiltinOptions_ResizeNearestNeighborOptions = 74,
- BuiltinOptions_LeakyReluOptions = 75,
- BuiltinOptions_SquaredDifferenceOptions = 76,
- BuiltinOptions_MirrorPadOptions = 77,
- BuiltinOptions_AbsOptions = 78,
- BuiltinOptions_SplitVOptions = 79,
- BuiltinOptions_UniqueOptions = 80,
- BuiltinOptions_ReverseV2Options = 81,
- BuiltinOptions_AddNOptions = 82,
- BuiltinOptions_GatherNdOptions = 83,
- BuiltinOptions_CosOptions = 84,
- BuiltinOptions_WhereOptions = 85,
- BuiltinOptions_RankOptions = 86,
- BuiltinOptions_ReverseSequenceOptions = 87,
- BuiltinOptions_MatrixDiagOptions = 88,
- BuiltinOptions_QuantizeOptions = 89,
- BuiltinOptions_MatrixSetDiagOptions = 90,
- BuiltinOptions_HardSwishOptions = 91,
- BuiltinOptions_IfOptions = 92,
- BuiltinOptions_WhileOptions = 93,
- BuiltinOptions_DepthToSpaceOptions = 94,
- BuiltinOptions_NonMaxSuppressionV4Options = 95,
- BuiltinOptions_NonMaxSuppressionV5Options = 96,
- BuiltinOptions_ScatterNdOptions = 97,
- BuiltinOptions_SelectV2Options = 98,
- BuiltinOptions_DensifyOptions = 99,
- BuiltinOptions_SegmentSumOptions = 100,
- BuiltinOptions_BatchMatMulOptions = 101,
- BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_BatchMatMulOptions
-};
-
-inline const BuiltinOptions (&EnumValuesBuiltinOptions())[102]
-{
- static const BuiltinOptions values[] = {BuiltinOptions_NONE,
- BuiltinOptions_Conv2DOptions,
- BuiltinOptions_DepthwiseConv2DOptions,
- BuiltinOptions_ConcatEmbeddingsOptions,
- BuiltinOptions_LSHProjectionOptions,
- BuiltinOptions_Pool2DOptions,
- BuiltinOptions_SVDFOptions,
- BuiltinOptions_RNNOptions,
- BuiltinOptions_FullyConnectedOptions,
- BuiltinOptions_SoftmaxOptions,
- BuiltinOptions_ConcatenationOptions,
- BuiltinOptions_AddOptions,
- BuiltinOptions_L2NormOptions,
- BuiltinOptions_LocalResponseNormalizationOptions,
- BuiltinOptions_LSTMOptions,
- BuiltinOptions_ResizeBilinearOptions,
- BuiltinOptions_CallOptions,
- BuiltinOptions_ReshapeOptions,
- BuiltinOptions_SkipGramOptions,
- BuiltinOptions_SpaceToDepthOptions,
- BuiltinOptions_EmbeddingLookupSparseOptions,
- BuiltinOptions_MulOptions,
- BuiltinOptions_PadOptions,
- BuiltinOptions_GatherOptions,
- BuiltinOptions_BatchToSpaceNDOptions,
- BuiltinOptions_SpaceToBatchNDOptions,
- BuiltinOptions_TransposeOptions,
- BuiltinOptions_ReducerOptions,
- BuiltinOptions_SubOptions,
- BuiltinOptions_DivOptions,
- BuiltinOptions_SqueezeOptions,
- BuiltinOptions_SequenceRNNOptions,
- BuiltinOptions_StridedSliceOptions,
- BuiltinOptions_ExpOptions,
- BuiltinOptions_TopKV2Options,
- BuiltinOptions_SplitOptions,
- BuiltinOptions_LogSoftmaxOptions,
- BuiltinOptions_CastOptions,
- BuiltinOptions_DequantizeOptions,
- BuiltinOptions_MaximumMinimumOptions,
- BuiltinOptions_ArgMaxOptions,
- BuiltinOptions_LessOptions,
- BuiltinOptions_NegOptions,
- BuiltinOptions_PadV2Options,
- BuiltinOptions_GreaterOptions,
- BuiltinOptions_GreaterEqualOptions,
- BuiltinOptions_LessEqualOptions,
- BuiltinOptions_SelectOptions,
- BuiltinOptions_SliceOptions,
- BuiltinOptions_TransposeConvOptions,
- BuiltinOptions_SparseToDenseOptions,
- BuiltinOptions_TileOptions,
- BuiltinOptions_ExpandDimsOptions,
- BuiltinOptions_EqualOptions,
- BuiltinOptions_NotEqualOptions,
- BuiltinOptions_ShapeOptions,
- BuiltinOptions_PowOptions,
- BuiltinOptions_ArgMinOptions,
- BuiltinOptions_FakeQuantOptions,
- BuiltinOptions_PackOptions,
- BuiltinOptions_LogicalOrOptions,
- BuiltinOptions_OneHotOptions,
- BuiltinOptions_LogicalAndOptions,
- BuiltinOptions_LogicalNotOptions,
- BuiltinOptions_UnpackOptions,
- BuiltinOptions_FloorDivOptions,
- BuiltinOptions_SquareOptions,
- BuiltinOptions_ZerosLikeOptions,
- BuiltinOptions_FillOptions,
- BuiltinOptions_BidirectionalSequenceLSTMOptions,
- BuiltinOptions_BidirectionalSequenceRNNOptions,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions,
- BuiltinOptions_FloorModOptions,
- BuiltinOptions_RangeOptions,
- BuiltinOptions_ResizeNearestNeighborOptions,
- BuiltinOptions_LeakyReluOptions,
- BuiltinOptions_SquaredDifferenceOptions,
- BuiltinOptions_MirrorPadOptions,
- BuiltinOptions_AbsOptions,
- BuiltinOptions_SplitVOptions,
- BuiltinOptions_UniqueOptions,
- BuiltinOptions_ReverseV2Options,
- BuiltinOptions_AddNOptions,
- BuiltinOptions_GatherNdOptions,
- BuiltinOptions_CosOptions,
- BuiltinOptions_WhereOptions,
- BuiltinOptions_RankOptions,
- BuiltinOptions_ReverseSequenceOptions,
- BuiltinOptions_MatrixDiagOptions,
- BuiltinOptions_QuantizeOptions,
- BuiltinOptions_MatrixSetDiagOptions,
- BuiltinOptions_HardSwishOptions,
- BuiltinOptions_IfOptions,
- BuiltinOptions_WhileOptions,
- BuiltinOptions_DepthToSpaceOptions,
- BuiltinOptions_NonMaxSuppressionV4Options,
- BuiltinOptions_NonMaxSuppressionV5Options,
- BuiltinOptions_ScatterNdOptions,
- BuiltinOptions_SelectV2Options,
- BuiltinOptions_DensifyOptions,
- BuiltinOptions_SegmentSumOptions,
- BuiltinOptions_BatchMatMulOptions};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOptions()
-{
- static const char *const names[] = {"NONE",
- "Conv2DOptions",
- "DepthwiseConv2DOptions",
- "ConcatEmbeddingsOptions",
- "LSHProjectionOptions",
- "Pool2DOptions",
- "SVDFOptions",
- "RNNOptions",
- "FullyConnectedOptions",
- "SoftmaxOptions",
- "ConcatenationOptions",
- "AddOptions",
- "L2NormOptions",
- "LocalResponseNormalizationOptions",
- "LSTMOptions",
- "ResizeBilinearOptions",
- "CallOptions",
- "ReshapeOptions",
- "SkipGramOptions",
- "SpaceToDepthOptions",
- "EmbeddingLookupSparseOptions",
- "MulOptions",
- "PadOptions",
- "GatherOptions",
- "BatchToSpaceNDOptions",
- "SpaceToBatchNDOptions",
- "TransposeOptions",
- "ReducerOptions",
- "SubOptions",
- "DivOptions",
- "SqueezeOptions",
- "SequenceRNNOptions",
- "StridedSliceOptions",
- "ExpOptions",
- "TopKV2Options",
- "SplitOptions",
- "LogSoftmaxOptions",
- "CastOptions",
- "DequantizeOptions",
- "MaximumMinimumOptions",
- "ArgMaxOptions",
- "LessOptions",
- "NegOptions",
- "PadV2Options",
- "GreaterOptions",
- "GreaterEqualOptions",
- "LessEqualOptions",
- "SelectOptions",
- "SliceOptions",
- "TransposeConvOptions",
- "SparseToDenseOptions",
- "TileOptions",
- "ExpandDimsOptions",
- "EqualOptions",
- "NotEqualOptions",
- "ShapeOptions",
- "PowOptions",
- "ArgMinOptions",
- "FakeQuantOptions",
- "PackOptions",
- "LogicalOrOptions",
- "OneHotOptions",
- "LogicalAndOptions",
- "LogicalNotOptions",
- "UnpackOptions",
- "FloorDivOptions",
- "SquareOptions",
- "ZerosLikeOptions",
- "FillOptions",
- "BidirectionalSequenceLSTMOptions",
- "BidirectionalSequenceRNNOptions",
- "UnidirectionalSequenceLSTMOptions",
- "FloorModOptions",
- "RangeOptions",
- "ResizeNearestNeighborOptions",
- "LeakyReluOptions",
- "SquaredDifferenceOptions",
- "MirrorPadOptions",
- "AbsOptions",
- "SplitVOptions",
- "UniqueOptions",
- "ReverseV2Options",
- "AddNOptions",
- "GatherNdOptions",
- "CosOptions",
- "WhereOptions",
- "RankOptions",
- "ReverseSequenceOptions",
- "MatrixDiagOptions",
- "QuantizeOptions",
- "MatrixSetDiagOptions",
- "HardSwishOptions",
- "IfOptions",
- "WhileOptions",
- "DepthToSpaceOptions",
- "NonMaxSuppressionV4Options",
- "NonMaxSuppressionV5Options",
- "ScatterNdOptions",
- "SelectV2Options",
- "DensifyOptions",
- "SegmentSumOptions",
- "BatchMatMulOptions",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOptions(BuiltinOptions e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOptions()[index];
-}
-
-template <typename T> struct BuiltinOptionsTraits
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NONE;
-};
-
-template <> struct BuiltinOptionsTraits<Conv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSHProjectionOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
-};
-
-template <> struct BuiltinOptionsTraits<Pool2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SVDFOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FullyConnectedOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatenationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AddOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
-};
-
-template <> struct BuiltinOptionsTraits<L2NormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeBilinearOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CallOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReshapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SkipGramOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToDepthOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReducerOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SubOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SubOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SqueezeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<StridedSliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TopKV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<SplitOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogSoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CastOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CastOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DequantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MaximumMinimumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NegOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NegOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeConvOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SparseToDenseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpandDimsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NotEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ShapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PowOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMinOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FakeQuantOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalOrOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
-};
-
-template <> struct BuiltinOptionsTraits<OneHotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalAndOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalNotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnpackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorDivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquareOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ZerosLikeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FillOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FillOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorModOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RangeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LeakyReluOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MirrorPadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AbsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SplitVOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UniqueOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReverseV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<AddNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherNdOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CosOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CosOptions;
-};
-
-template <> struct BuiltinOptionsTraits<WhereOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RankOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RankOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReverseSequenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MatrixDiagOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions;
-};
-
-template <> struct BuiltinOptionsTraits<QuantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MatrixSetDiagOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions;
-};
-
-template <> struct BuiltinOptionsTraits<HardSwishOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions;
-};
-
-template <> struct BuiltinOptionsTraits<IfOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_IfOptions;
-};
-
-template <> struct BuiltinOptionsTraits<WhileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthToSpaceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NonMaxSuppressionV4Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options;
-};
-
-template <> struct BuiltinOptionsTraits<NonMaxSuppressionV5Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options;
-};
-
-template <> struct BuiltinOptionsTraits<ScatterNdOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<DensifyOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SegmentSumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchMatMulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions;
-};
-
-bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
-bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum Padding
-{
- Padding_SAME = 0,
- Padding_VALID = 1,
- Padding_MIN = Padding_SAME,
- Padding_MAX = Padding_VALID
-};
-
-inline const Padding (&EnumValuesPadding())[2]
-{
- static const Padding values[] = {Padding_SAME, Padding_VALID};
- return values;
-}
-
-inline const char *const *EnumNamesPadding()
-{
- static const char *const names[] = {"SAME", "VALID", nullptr};
- return names;
-}
-
-inline const char *EnumNamePadding(Padding e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesPadding()[index];
-}
-
-enum ActivationFunctionType
-{
- ActivationFunctionType_NONE = 0,
- ActivationFunctionType_RELU = 1,
- ActivationFunctionType_RELU_N1_TO_1 = 2,
- ActivationFunctionType_RELU6 = 3,
- ActivationFunctionType_TANH = 4,
- ActivationFunctionType_SIGN_BIT = 5,
- ActivationFunctionType_MIN = ActivationFunctionType_NONE,
- ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
-};
-
-inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6]
-{
- static const ActivationFunctionType values[] = {
- ActivationFunctionType_NONE, ActivationFunctionType_RELU,
- ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6,
- ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
- return values;
-}
-
-inline const char *const *EnumNamesActivationFunctionType()
-{
- static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
- "TANH", "SIGN_BIT", nullptr};
- return names;
-}
-
-inline const char *EnumNameActivationFunctionType(ActivationFunctionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesActivationFunctionType()[index];
-}
-
-enum LSHProjectionType
-{
- LSHProjectionType_UNKNOWN = 0,
- LSHProjectionType_SPARSE = 1,
- LSHProjectionType_DENSE = 2,
- LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
- LSHProjectionType_MAX = LSHProjectionType_DENSE
-};
-
-inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3]
-{
- static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE,
- LSHProjectionType_DENSE};
- return values;
-}
-
-inline const char *const *EnumNamesLSHProjectionType()
-{
- static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSHProjectionType(LSHProjectionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSHProjectionType()[index];
-}
-
-enum FullyConnectedOptionsWeightsFormat
-{
- FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
- FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
-};
-
-inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2]
-{
- static const FullyConnectedOptionsWeightsFormat values[] = {
- FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8};
- return values;
-}
-
-inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat()
-{
- static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
-}
-
-enum LSTMKernelType
-{
- LSTMKernelType_FULL = 0,
- LSTMKernelType_BASIC = 1,
- LSTMKernelType_MIN = LSTMKernelType_FULL,
- LSTMKernelType_MAX = LSTMKernelType_BASIC
-};
-
-inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2]
-{
- static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC};
- return values;
-}
-
-inline const char *const *EnumNamesLSTMKernelType()
-{
- static const char *const names[] = {"FULL", "BASIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSTMKernelType(LSTMKernelType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSTMKernelType()[index];
-}
-
-enum CombinerType
-{
- CombinerType_SUM = 0,
- CombinerType_MEAN = 1,
- CombinerType_SQRTN = 2,
- CombinerType_MIN = CombinerType_SUM,
- CombinerType_MAX = CombinerType_SQRTN
-};
-
-inline const CombinerType (&EnumValuesCombinerType())[3]
-{
- static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN};
- return values;
-}
-
-inline const char *const *EnumNamesCombinerType()
-{
- static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr};
- return names;
-}
-
-inline const char *EnumNameCombinerType(CombinerType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCombinerType()[index];
-}
-
-enum MirrorPadMode
-{
- MirrorPadMode_REFLECT = 0,
- MirrorPadMode_SYMMETRIC = 1,
- MirrorPadMode_MIN = MirrorPadMode_REFLECT,
- MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC
-};
-
-inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2]
-{
- static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC};
- return values;
-}
-
-inline const char *const *EnumNamesMirrorPadMode()
-{
- static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameMirrorPadMode(MirrorPadMode e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesMirrorPadMode()[index];
-}
-
-enum CustomOptionsFormat
-{
- CustomOptionsFormat_FLEXBUFFERS = 0,
- CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
- CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
-};
-
-inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1]
-{
- static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS};
- return values;
-}
-
-inline const char *const *EnumNamesCustomOptionsFormat()
-{
- static const char *const names[] = {"FLEXBUFFERS", nullptr};
- return names;
-}
-
-inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCustomOptionsFormat()[index];
-}
-
-struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_CUSTOM = 4
- };
- const flatbuffers::Vector<uint8_t> *custom() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) &&
- verifier.VerifyVector(custom()) && verifier.EndTable();
- }
-};
-
-struct CustomQuantizationBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom)
- {
- fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
- }
- explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &);
- flatbuffers::Offset<CustomQuantization> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CustomQuantization>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0)
-{
- CustomQuantizationBuilder builder_(_fbb);
- builder_.add_custom(custom);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *custom = nullptr)
-{
- return onert_tflite::CreateCustomQuantization(_fbb,
- custom ? _fbb.CreateVector<uint8_t>(*custom) : 0);
-}
-
-struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_SCALE = 8,
- VT_ZERO_POINT = 10,
- VT_DETAILS_TYPE = 12,
- VT_DETAILS = 14,
- VT_QUANTIZED_DIMENSION = 16
- };
- const flatbuffers::Vector<float> *min() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
- }
- const flatbuffers::Vector<float> *max() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
- }
- const flatbuffers::Vector<float> *scale() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
- }
- const flatbuffers::Vector<int64_t> *zero_point() const
- {
- return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
- }
- QuantizationDetails details_type() const
- {
- return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
- }
- const void *details() const { return GetPointer<const void *>(VT_DETAILS); }
- template <typename T> const T *details_as() const;
- const CustomQuantization *details_as_CustomQuantization() const
- {
- return details_type() == QuantizationDetails_CustomQuantization
- ? static_cast<const CustomQuantization *>(details())
- : nullptr;
- }
- int32_t quantized_dimension() const { return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) &&
- verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) &&
- verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) &&
- verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) &&
- verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) &&
- VerifyOffset(verifier, VT_DETAILS) &&
- VerifyQuantizationDetails(verifier, details(), details_type()) &&
- VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable();
- }
-};
-
-template <>
-inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const
-{
- return details_as_CustomQuantization();
-}
-
-struct QuantizationParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
- }
- void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
- }
- void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale)
- {
- fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
- }
- void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point)
- {
- fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
- }
- void add_details_type(QuantizationDetails details_type)
- {
- fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE,
- static_cast<uint8_t>(details_type), 0);
- }
- void add_details(flatbuffers::Offset<void> details)
- {
- fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
- }
- void add_quantized_dimension(int32_t quantized_dimension)
- {
- fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension,
- 0);
- }
- explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &);
- flatbuffers::Offset<QuantizationParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizationParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizationParameters>
-CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
- flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
-{
- QuantizationParametersBuilder builder_(_fbb);
- builder_.add_quantized_dimension(quantized_dimension);
- builder_.add_details(details);
- builder_.add_zero_point(zero_point);
- builder_.add_scale(scale);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_details_type(details_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr,
- const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr,
- const std::vector<int64_t> *zero_point = nullptr,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
-{
- return onert_tflite::CreateQuantizationParameters(
- _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0,
- scale ? _fbb.CreateVector<float>(*scale) : 0,
- zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details,
- quantized_dimension);
-}
-
-struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<int32_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Int32VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values)
- {
- fbb_.AddOffset(Int32Vector::VT_VALUES, values);
- }
- explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Int32VectorBuilder &operator=(const Int32VectorBuilder &);
- flatbuffers::Offset<Int32Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Int32Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Int32Vector>
-CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0)
-{
- Int32VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Int32Vector>
-CreateInt32VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *values = nullptr)
-{
- return onert_tflite::CreateInt32Vector(_fbb, values ? _fbb.CreateVector<int32_t>(*values) : 0);
-}
-
-struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<uint16_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Uint16VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values)
- {
- fbb_.AddOffset(Uint16Vector::VT_VALUES, values);
- }
- explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Uint16VectorBuilder &operator=(const Uint16VectorBuilder &);
- flatbuffers::Offset<Uint16Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Uint16Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Uint16Vector>
-CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0)
-{
- Uint16VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Uint16Vector>
-CreateUint16VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint16_t> *values = nullptr)
-{
- return onert_tflite::CreateUint16Vector(_fbb, values ? _fbb.CreateVector<uint16_t>(*values) : 0);
-}
-
-struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES = 4
- };
- const flatbuffers::Vector<uint8_t> *values() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
- verifier.VerifyVector(values()) && verifier.EndTable();
- }
-};
-
-struct Uint8VectorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values)
- {
- fbb_.AddOffset(Uint8Vector::VT_VALUES, values);
- }
- explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Uint8VectorBuilder &operator=(const Uint8VectorBuilder &);
- flatbuffers::Offset<Uint8Vector> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Uint8Vector>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Uint8Vector>
-CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0)
-{
- Uint8VectorBuilder builder_(_fbb);
- builder_.add_values(values);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Uint8Vector>
-CreateUint8VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *values = nullptr)
-{
- return onert_tflite::CreateUint8Vector(_fbb, values ? _fbb.CreateVector<uint8_t>(*values) : 0);
-}
-
-struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FORMAT = 4,
- VT_DENSE_SIZE = 6,
- VT_ARRAY_SEGMENTS_TYPE = 8,
- VT_ARRAY_SEGMENTS = 10,
- VT_ARRAY_INDICES_TYPE = 12,
- VT_ARRAY_INDICES = 14
- };
- DimensionType format() const
- {
- return static_cast<DimensionType>(GetField<int8_t>(VT_FORMAT, 0));
- }
- int32_t dense_size() const { return GetField<int32_t>(VT_DENSE_SIZE, 0); }
- SparseIndexVector array_segments_type() const
- {
- return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_SEGMENTS_TYPE, 0));
- }
- const void *array_segments() const { return GetPointer<const void *>(VT_ARRAY_SEGMENTS); }
- template <typename T> const T *array_segments_as() const;
- const Int32Vector *array_segments_as_Int32Vector() const
- {
- return array_segments_type() == SparseIndexVector_Int32Vector
- ? static_cast<const Int32Vector *>(array_segments())
- : nullptr;
- }
- const Uint16Vector *array_segments_as_Uint16Vector() const
- {
- return array_segments_type() == SparseIndexVector_Uint16Vector
- ? static_cast<const Uint16Vector *>(array_segments())
- : nullptr;
- }
- const Uint8Vector *array_segments_as_Uint8Vector() const
- {
- return array_segments_type() == SparseIndexVector_Uint8Vector
- ? static_cast<const Uint8Vector *>(array_segments())
- : nullptr;
- }
- SparseIndexVector array_indices_type() const
- {
- return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_INDICES_TYPE, 0));
- }
- const void *array_indices() const { return GetPointer<const void *>(VT_ARRAY_INDICES); }
- template <typename T> const T *array_indices_as() const;
- const Int32Vector *array_indices_as_Int32Vector() const
- {
- return array_indices_type() == SparseIndexVector_Int32Vector
- ? static_cast<const Int32Vector *>(array_indices())
- : nullptr;
- }
- const Uint16Vector *array_indices_as_Uint16Vector() const
- {
- return array_indices_type() == SparseIndexVector_Uint16Vector
- ? static_cast<const Uint16Vector *>(array_indices())
- : nullptr;
- }
- const Uint8Vector *array_indices_as_Uint8Vector() const
- {
- return array_indices_type() == SparseIndexVector_Uint8Vector
- ? static_cast<const Uint8Vector *>(array_indices())
- : nullptr;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_FORMAT) &&
- VerifyField<int32_t>(verifier, VT_DENSE_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_ARRAY_SEGMENTS_TYPE) &&
- VerifyOffset(verifier, VT_ARRAY_SEGMENTS) &&
- VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) &&
- VerifyField<uint8_t>(verifier, VT_ARRAY_INDICES_TYPE) &&
- VerifyOffset(verifier, VT_ARRAY_INDICES) &&
- VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) &&
- verifier.EndTable();
- }
-};
-
-template <> inline const Int32Vector *DimensionMetadata::array_segments_as<Int32Vector>() const
-{
- return array_segments_as_Int32Vector();
-}
-
-template <> inline const Uint16Vector *DimensionMetadata::array_segments_as<Uint16Vector>() const
-{
- return array_segments_as_Uint16Vector();
-}
-
-template <> inline const Uint8Vector *DimensionMetadata::array_segments_as<Uint8Vector>() const
-{
- return array_segments_as_Uint8Vector();
-}
-
-template <> inline const Int32Vector *DimensionMetadata::array_indices_as<Int32Vector>() const
-{
- return array_indices_as_Int32Vector();
-}
-
-template <> inline const Uint16Vector *DimensionMetadata::array_indices_as<Uint16Vector>() const
-{
- return array_indices_as_Uint16Vector();
-}
-
-template <> inline const Uint8Vector *DimensionMetadata::array_indices_as<Uint8Vector>() const
-{
- return array_indices_as_Uint8Vector();
-}
-
-struct DimensionMetadataBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_format(DimensionType format)
- {
- fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0);
- }
- void add_dense_size(int32_t dense_size)
- {
- fbb_.AddElement<int32_t>(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0);
- }
- void add_array_segments_type(SparseIndexVector array_segments_type)
- {
- fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE,
- static_cast<uint8_t>(array_segments_type), 0);
- }
- void add_array_segments(flatbuffers::Offset<void> array_segments)
- {
- fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments);
- }
- void add_array_indices_type(SparseIndexVector array_indices_type)
- {
- fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE,
- static_cast<uint8_t>(array_indices_type), 0);
- }
- void add_array_indices(flatbuffers::Offset<void> array_indices)
- {
- fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices);
- }
- explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &);
- flatbuffers::Offset<DimensionMetadata> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DimensionMetadata>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DimensionMetadata>
-CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb,
- DimensionType format = DimensionType_DENSE, int32_t dense_size = 0,
- SparseIndexVector array_segments_type = SparseIndexVector_NONE,
- flatbuffers::Offset<void> array_segments = 0,
- SparseIndexVector array_indices_type = SparseIndexVector_NONE,
- flatbuffers::Offset<void> array_indices = 0)
-{
- DimensionMetadataBuilder builder_(_fbb);
- builder_.add_array_indices(array_indices);
- builder_.add_array_segments(array_segments);
- builder_.add_dense_size(dense_size);
- builder_.add_array_indices_type(array_indices_type);
- builder_.add_array_segments_type(array_segments_type);
- builder_.add_format(format);
- return builder_.Finish();
-}
-
-struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TRAVERSAL_ORDER = 4,
- VT_BLOCK_MAP = 6,
- VT_DIM_METADATA = 8
- };
- const flatbuffers::Vector<int32_t> *traversal_order() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER);
- }
- const flatbuffers::Vector<int32_t> *block_map() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP);
- }
- const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *>(
- VT_DIM_METADATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) &&
- verifier.VerifyVector(traversal_order()) && VerifyOffset(verifier, VT_BLOCK_MAP) &&
- verifier.VerifyVector(block_map()) && VerifyOffset(verifier, VT_DIM_METADATA) &&
- verifier.VerifyVector(dim_metadata()) && verifier.VerifyVectorOfTables(dim_metadata()) &&
- verifier.EndTable();
- }
-};
-
-struct SparsityParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order)
- {
- fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order);
- }
- void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map)
- {
- fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map);
- }
- void add_dim_metadata(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata)
- {
- fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata);
- }
- explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparsityParametersBuilder &operator=(const SparsityParametersBuilder &);
- flatbuffers::Offset<SparsityParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparsityParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata =
- 0)
-{
- SparsityParametersBuilder builder_(_fbb);
- builder_.add_dim_metadata(dim_metadata);
- builder_.add_block_map(block_map);
- builder_.add_traversal_order(traversal_order);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *traversal_order = nullptr,
- const std::vector<int32_t> *block_map = nullptr,
- const std::vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata = nullptr)
-{
- return onert_tflite::CreateSparsityParameters(
- _fbb, traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0,
- block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0,
- dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<DimensionMetadata>>(*dim_metadata) : 0);
-}
-
-struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SHAPE = 4,
- VT_TYPE = 6,
- VT_BUFFER = 8,
- VT_NAME = 10,
- VT_QUANTIZATION = 12,
- VT_IS_VARIABLE = 14,
- VT_SPARSITY = 16,
- VT_SHAPE_SIGNATURE = 18
- };
- const flatbuffers::Vector<int32_t> *shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const QuantizationParameters *quantization() const
- {
- return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION);
- }
- bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; }
- const SparsityParameters *sparsity() const
- {
- return GetPointer<const SparsityParameters *>(VT_SPARSITY);
- }
- const flatbuffers::Vector<int32_t> *shape_signature() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) &&
- verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) &&
- VerifyOffset(verifier, VT_SPARSITY) && verifier.VerifyTable(sparsity()) &&
- VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && verifier.VerifyVector(shape_signature()) &&
- verifier.EndTable();
- }
-};
-
-struct TensorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE, shape);
- }
- void add_type(TensorType type)
- {
- fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Tensor::VT_NAME, name);
- }
- void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization)
- {
- fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
- }
- void add_is_variable(bool is_variable)
- {
- fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
- }
- void add_sparsity(flatbuffers::Offset<SparsityParameters> sparsity)
- {
- fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity);
- }
- void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature);
- }
- explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TensorBuilder &operator=(const TensorBuilder &);
- flatbuffers::Offset<Tensor> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Tensor>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Tensor>
-CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false,
- flatbuffers::Offset<SparsityParameters> sparsity = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0)
-{
- TensorBuilder builder_(_fbb);
- builder_.add_shape_signature(shape_signature);
- builder_.add_sparsity(sparsity);
- builder_.add_quantization(quantization);
- builder_.add_name(name);
- builder_.add_buffer(buffer);
- builder_.add_shape(shape);
- builder_.add_is_variable(is_variable);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Tensor> CreateTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false,
- flatbuffers::Offset<SparsityParameters> sparsity = 0,
- const std::vector<int32_t> *shape_signature = nullptr)
-{
- return onert_tflite::CreateTensor(
- _fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type, buffer,
- name ? _fbb.CreateString(name) : 0, quantization, is_variable, sparsity,
- shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0);
-}
-
-struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FUSED_ACTIVATION_FUNCTION = 10,
- VT_DILATION_W_FACTOR = 12,
- VT_DILATION_H_FACTOR = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct Conv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &);
- flatbuffers::Offset<Conv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Conv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Conv2DOptions>
-CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- Conv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FILTER_WIDTH = 10,
- VT_FILTER_HEIGHT = 12,
- VT_FUSED_ACTIVATION_FUNCTION = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); }
- int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
- VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct Pool2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_filter_width(int32_t filter_width)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
- }
- void add_filter_height(int32_t filter_height)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &);
- flatbuffers::Offset<Pool2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Pool2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Pool2DOptions>
-CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0,
- int32_t filter_height = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- Pool2DOptionsBuilder builder_(_fbb);
- builder_.add_filter_height(filter_height);
- builder_.add_filter_width(filter_width);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_DEPTH_MULTIPLIER = 10,
- VT_FUSED_ACTIVATION_FUNCTION = 12,
- VT_DILATION_W_FACTOR = 14,
- VT_DILATION_H_FACTOR = 16
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct DepthwiseConv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_depth_multiplier(int32_t depth_multiplier)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &);
- flatbuffers::Offset<DepthwiseConv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
- flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0,
- int32_t stride_h = 0, int32_t depth_multiplier = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- DepthwiseConv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_depth_multiplier(depth_multiplier);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_CHANNELS = 4,
- VT_NUM_COLUMNS_PER_CHANNEL = 6,
- VT_EMBEDDING_DIM_PER_CHANNEL = 8
- };
- int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); }
- const flatbuffers::Vector<int32_t> *num_columns_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
- }
- const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
- VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
- verifier.VerifyVector(num_columns_per_channel()) &&
- VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
- verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable();
- }
-};
-
-struct ConcatEmbeddingsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_channels(int32_t num_channels)
- {
- fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
- }
- void add_num_columns_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
- }
- void add_embedding_dim_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL,
- embedding_dim_per_channel);
- }
- explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &);
- flatbuffers::Offset<ConcatEmbeddingsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0)
-{
- ConcatEmbeddingsOptionsBuilder builder_(_fbb);
- builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
- builder_.add_num_columns_per_channel(num_columns_per_channel);
- builder_.add_num_channels(num_channels);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions>
-CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- const std::vector<int32_t> *num_columns_per_channel = nullptr,
- const std::vector<int32_t> *embedding_dim_per_channel = nullptr)
-{
- return onert_tflite::CreateConcatEmbeddingsOptions(
- _fbb, num_channels,
- num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0,
- embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0);
-}
-
-struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TYPE = 4
- };
- LSHProjectionType type() const
- {
- return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct LSHProjectionOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_type(LSHProjectionType type)
- {
- fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &);
- flatbuffers::Offset<LSHProjectionOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSHProjectionOptions>
-CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb,
- LSHProjectionType type = LSHProjectionType_UNKNOWN)
-{
- LSHProjectionOptionsBuilder builder_(_fbb);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RANK = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
- };
- int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct SVDFOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &);
- flatbuffers::Offset<SVDFOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SVDFOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SVDFOptions>
-CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- SVDFOptionsBuilder builder_(_fbb);
- builder_.add_rank(rank);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 6
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct RNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RNNOptionsBuilder &operator=(const RNNOptionsBuilder &);
- flatbuffers::Offset<RNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RNNOptions>
-CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- RNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct SequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major),
- 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &);
- flatbuffers::Offset<SequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool asymmetric_quantize_inputs = false)
-{
- SequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_MERGE_OUTPUTS = 8,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool merge_outputs = false, bool asymmetric_quantize_inputs = false)
-{
- BidirectionalSequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_WEIGHTS_FORMAT = 6,
- VT_KEEP_NUM_DIMS = 8,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- FullyConnectedOptionsWeightsFormat weights_format() const
- {
- return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
- }
- bool keep_num_dims() const { return GetField<uint8_t>(VT_KEEP_NUM_DIMS, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) &&
- VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct FullyConnectedOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT,
- static_cast<int8_t>(weights_format), 0);
- }
- void add_keep_num_dims(bool keep_num_dims)
- {
- fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_KEEP_NUM_DIMS,
- static_cast<uint8_t>(keep_num_dims), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &);
- flatbuffers::Offset<FullyConnectedOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT,
- bool keep_num_dims = false, bool asymmetric_quantize_inputs = false)
-{
- FullyConnectedOptionsBuilder builder_(_fbb);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_keep_num_dims(keep_num_dims);
- builder_.add_weights_format(weights_format);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BETA = 4
- };
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) &&
- verifier.EndTable();
- }
-};
-
-struct SoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); }
- explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &);
- flatbuffers::Offset<SoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SoftmaxOptions>
-CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f)
-{
- SoftmaxOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- return builder_.Finish();
-}
-
-struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct ConcatenationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &);
- flatbuffers::Offset<ConcatenationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatenationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- ConcatenationOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct AddOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddOptionsBuilder &operator=(const AddOptionsBuilder &);
- flatbuffers::Offset<AddOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddOptions>
-CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- AddOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct MulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MulOptionsBuilder &operator=(const MulOptionsBuilder &);
- flatbuffers::Offset<MulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MulOptions>
-CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- MulOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct L2NormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &);
- flatbuffers::Offset<L2NormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<L2NormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<L2NormOptions>
-CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- L2NormOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RADIUS = 4,
- VT_BIAS = 6,
- VT_ALPHA = 8,
- VT_BETA = 10
- };
- int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); }
- float bias() const { return GetField<float>(VT_BIAS, 0.0f); }
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) &&
- VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) &&
- VerifyField<float>(verifier, VT_BETA) && verifier.EndTable();
- }
-};
-
-struct LocalResponseNormalizationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_radius(int32_t radius)
- {
- fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
- }
- void add_bias(float bias)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
- }
- void add_alpha(float alpha)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
- }
- void add_beta(float beta)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
- }
- explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LocalResponseNormalizationOptionsBuilder &
- operator=(const LocalResponseNormalizationOptionsBuilder &);
- flatbuffers::Offset<LocalResponseNormalizationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LocalResponseNormalizationOptions>
-CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0,
- float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f)
-{
- LocalResponseNormalizationOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- builder_.add_alpha(alpha);
- builder_.add_bias(bias);
- builder_.add_radius(radius);
- return builder_.Finish();
-}
-
-struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_KERNEL_TYPE = 10,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- LSTMKernelType kernel_type() const
- {
- return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0));
- }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct LSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_kernel_type(LSTMKernelType kernel_type)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &);
- flatbuffers::Offset<LSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSTMOptions>
-CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f,
- LSTMKernelType kernel_type = LSTMKernelType_FULL,
- bool asymmetric_quantize_inputs = false)
-{
- LSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_kernel_type(kernel_type);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_TIME_MAJOR = 10,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct UnidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnidirectionalSequenceLSTMOptionsBuilder &
- operator=(const UnidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
-CreateUnidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false,
- bool asymmetric_quantize_inputs = false)
-{
- UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_time_major(time_major);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_MERGE_OUTPUTS = 10,
- VT_TIME_MAJOR = 12,
- VT_ASYMMETRIC_QUANTIZE_INPUTS = 14
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 1) != 0; }
- bool asymmetric_quantize_inputs() const
- {
- return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 1);
- }
- void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
- static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
- }
- explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceLSTMOptionsBuilder &
- operator=(const BidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false,
- bool time_major = true, bool asymmetric_quantize_inputs = false)
-{
- BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
- builder_.add_time_major(time_major);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 8,
- VT_HALF_PIXEL_CENTERS = 10
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable();
- }
-};
-
-struct ResizeBilinearOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- void add_half_pixel_centers(bool half_pixel_centers)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS,
- static_cast<uint8_t>(half_pixel_centers), 0);
- }
- explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &);
- flatbuffers::Offset<ResizeBilinearOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeBilinearOptions>
-CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false,
- bool half_pixel_centers = false)
-{
- ResizeBilinearOptionsBuilder builder_(_fbb);
- builder_.add_half_pixel_centers(half_pixel_centers);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 4
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeNearestNeighborOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &);
- flatbuffers::Offset<ResizeNearestNeighborOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeNearestNeighborOptions>
-CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeNearestNeighborOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SUBGRAPH = 4
- };
- uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
- verifier.EndTable();
- }
-};
-
-struct CallOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_subgraph(uint32_t subgraph)
- {
- fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
- }
- explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CallOptionsBuilder &operator=(const CallOptionsBuilder &);
- flatbuffers::Offset<CallOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CallOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb,
- uint32_t subgraph = 0)
-{
- CallOptionsBuilder builder_(_fbb);
- builder_.add_subgraph(subgraph);
- return builder_.Finish();
-}
-
-struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadOptionsBuilder &operator=(const PadOptionsBuilder &);
- flatbuffers::Offset<PadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &);
- flatbuffers::Offset<PadV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NEW_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *new_shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) &&
- verifier.VerifyVector(new_shape()) && verifier.EndTable();
- }
-};
-
-struct ReshapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape)
- {
- fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
- }
- explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &);
- flatbuffers::Offset<ReshapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0)
-{
- ReshapeOptionsBuilder builder_(_fbb);
- builder_.add_new_shape(new_shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *new_shape = nullptr)
-{
- return onert_tflite::CreateReshapeOptions(_fbb,
- new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0);
-}
-
-struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SpaceToBatchNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &);
- flatbuffers::Offset<SpaceToBatchNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToBatchNDOptions>
-CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SpaceToBatchNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct BatchToSpaceNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &);
- flatbuffers::Offset<BatchToSpaceNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchToSpaceNDOptions>
-CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- BatchToSpaceNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NGRAM_SIZE = 4,
- VT_MAX_SKIP_SIZE = 6,
- VT_INCLUDE_ALL_NGRAMS = 8
- };
- int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); }
- int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); }
- bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
- VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable();
- }
-};
-
-struct SkipGramOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_ngram_size(int32_t ngram_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
- }
- void add_max_skip_size(int32_t max_skip_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
- }
- void add_include_all_ngrams(bool include_all_ngrams)
- {
- fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS,
- static_cast<uint8_t>(include_all_ngrams), 0);
- }
- explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &);
- flatbuffers::Offset<SkipGramOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SkipGramOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SkipGramOptions>
-CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0,
- int32_t max_skip_size = 0, bool include_all_ngrams = false)
-{
- SkipGramOptionsBuilder builder_(_fbb);
- builder_.add_max_skip_size(max_skip_size);
- builder_.add_ngram_size(ngram_size);
- builder_.add_include_all_ngrams(include_all_ngrams);
- return builder_.Finish();
-}
-
-struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct SpaceToDepthOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &);
- flatbuffers::Offset<SpaceToDepthOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToDepthOptions>
-CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- SpaceToDepthOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct DepthToSpaceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &);
- flatbuffers::Offset<DepthToSpaceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthToSpaceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthToSpaceOptions>
-CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- DepthToSpaceOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SubOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubOptionsBuilder &operator=(const SubOptionsBuilder &);
- flatbuffers::Offset<SubOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubOptions>
-CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SubOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct DivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DivOptionsBuilder &operator=(const DivOptionsBuilder &);
- flatbuffers::Offset<DivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DivOptions>
-CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- DivOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TopKV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &);
- flatbuffers::Offset<TopKV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TopKV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TopKV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COMBINER = 4
- };
- CombinerType combiner() const
- {
- return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) &&
- verifier.EndTable();
- }
-};
-
-struct EmbeddingLookupSparseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_combiner(CombinerType combiner)
- {
- fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER,
- static_cast<int8_t>(combiner), 0);
- }
- explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &);
- flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
-CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
- CombinerType combiner = CombinerType_SUM)
-{
- EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
- builder_.add_combiner(combiner);
- return builder_.Finish();
-}
-
-struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct GatherOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); }
- explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherOptionsBuilder &operator=(const GatherOptionsBuilder &);
- flatbuffers::Offset<GatherOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- GatherOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TransposeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &);
- flatbuffers::Offset<TransposeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeOptions>
-CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TransposeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpOptionsBuilder &operator=(const ExpOptionsBuilder &);
- flatbuffers::Offset<ExpOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct CosOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CosOptionsBuilder &operator=(const CosOptionsBuilder &);
- flatbuffers::Offset<CosOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CosOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- CosOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_KEEP_DIMS = 4
- };
- bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) &&
- verifier.EndTable();
- }
-};
-
-struct ReducerOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_keep_dims(bool keep_dims)
- {
- fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
- }
- explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &);
- flatbuffers::Offset<ReducerOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReducerOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReducerOptions>
-CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false)
-{
- ReducerOptionsBuilder builder_(_fbb);
- builder_.add_keep_dims(keep_dims);
- return builder_.Finish();
-}
-
-struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SQUEEZE_DIMS = 4
- };
- const flatbuffers::Vector<int32_t> *squeeze_dims() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
- verifier.VerifyVector(squeeze_dims()) && verifier.EndTable();
- }
-};
-
-struct SqueezeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims)
- {
- fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
- }
- explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &);
- flatbuffers::Offset<SqueezeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SqueezeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0)
-{
- SqueezeOptionsBuilder builder_(_fbb);
- builder_.add_squeeze_dims(squeeze_dims);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *squeeze_dims = nullptr)
-{
- return onert_tflite::CreateSqueezeOptions(
- _fbb, squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0);
-}
-
-struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitOptionsBuilder &operator=(const SplitOptionsBuilder &);
- flatbuffers::Offset<SplitOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitVOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &);
- flatbuffers::Offset<SplitVOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitVOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitVOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BEGIN_MASK = 4,
- VT_END_MASK = 6,
- VT_ELLIPSIS_MASK = 8,
- VT_NEW_AXIS_MASK = 10,
- VT_SHRINK_AXIS_MASK = 12
- };
- int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); }
- int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); }
- int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); }
- int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); }
- int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) &&
- VerifyField<int32_t>(verifier, VT_END_MASK) &&
- VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable();
- }
-};
-
-struct StridedSliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_begin_mask(int32_t begin_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
- }
- void add_end_mask(int32_t end_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0);
- }
- void add_ellipsis_mask(int32_t ellipsis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0);
- }
- void add_new_axis_mask(int32_t new_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0);
- }
- void add_shrink_axis_mask(int32_t shrink_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0);
- }
- explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &);
- flatbuffers::Offset<StridedSliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<StridedSliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<StridedSliceOptions>
-CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0,
- int32_t end_mask = 0, int32_t ellipsis_mask = 0,
- int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0)
-{
- StridedSliceOptionsBuilder builder_(_fbb);
- builder_.add_shrink_axis_mask(shrink_axis_mask);
- builder_.add_new_axis_mask(new_axis_mask);
- builder_.add_ellipsis_mask(ellipsis_mask);
- builder_.add_end_mask(end_mask);
- builder_.add_begin_mask(begin_mask);
- return builder_.Finish();
-}
-
-struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogSoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &);
- flatbuffers::Offset<LogSoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogSoftmaxOptions>
-CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogSoftmaxOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IN_DATA_TYPE = 4,
- VT_OUT_DATA_TYPE = 6
- };
- TensorType in_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0));
- }
- TensorType out_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) &&
- VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable();
- }
-};
-
-struct CastOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_in_data_type(TensorType in_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
- }
- void add_out_data_type(TensorType out_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
- }
- explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CastOptionsBuilder &operator=(const CastOptionsBuilder &);
- flatbuffers::Offset<CastOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CastOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CastOptions>
-CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType in_data_type = TensorType_FLOAT32,
- TensorType out_data_type = TensorType_FLOAT32)
-{
- CastOptionsBuilder builder_(_fbb);
- builder_.add_out_data_type(out_data_type);
- builder_.add_in_data_type(in_data_type);
- return builder_.Finish();
-}
-
-struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DequantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &);
- flatbuffers::Offset<DequantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DequantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DequantizeOptions>
-CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DequantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MaximumMinimumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &);
- flatbuffers::Offset<MaximumMinimumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MaximumMinimumOptions>
-CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MaximumMinimumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TileOptionsBuilder &operator=(const TileOptionsBuilder &);
- flatbuffers::Offset<TileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TileOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &);
- flatbuffers::Offset<ArgMaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMaxOptions>
-CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMaxOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMinOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &);
- flatbuffers::Offset<ArgMinOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMinOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMinOptions>
-CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMinOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &);
- flatbuffers::Offset<GreaterOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterOptions>
-CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &);
- flatbuffers::Offset<GreaterEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterEqualOptions>
-CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessOptionsBuilder &operator=(const LessOptionsBuilder &);
- flatbuffers::Offset<LessOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &);
- flatbuffers::Offset<LessEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessEqualOptions>
-CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NegOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NegOptionsBuilder &operator=(const NegOptionsBuilder &);
- flatbuffers::Offset<NegOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NegOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NegOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectOptionsBuilder &operator=(const SelectOptionsBuilder &);
- flatbuffers::Offset<SelectOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SliceOptionsBuilder &operator=(const SliceOptionsBuilder &);
- flatbuffers::Offset<SliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SliceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable();
- }
-};
-
-struct TransposeConvOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0);
- }
- explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &);
- flatbuffers::Offset<TransposeConvOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConvOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeConvOptions>
-CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0)
-{
- TransposeConvOptionsBuilder builder_(_fbb);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpandDimsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &);
- flatbuffers::Offset<ExpandDimsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpandDimsOptions>
-CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpandDimsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALIDATE_INDICES = 4
- };
- bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
- verifier.EndTable();
- }
-};
-
-struct SparseToDenseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_validate_indices(bool validate_indices)
- {
- fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES,
- static_cast<uint8_t>(validate_indices), 0);
- }
- explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &);
- flatbuffers::Offset<SparseToDenseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparseToDenseOptions>
-CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false)
-{
- SparseToDenseOptionsBuilder builder_(_fbb);
- builder_.add_validate_indices(validate_indices);
- return builder_.Finish();
-}
-
-struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct EqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EqualOptionsBuilder &operator=(const EqualOptionsBuilder &);
- flatbuffers::Offset<EqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- EqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NotEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &);
- flatbuffers::Offset<NotEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NotEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NotEqualOptions>
-CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NotEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUT_TYPE = 4
- };
- TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ShapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_out_type(TensorType out_type)
- {
- fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
- }
- explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &);
- flatbuffers::Offset<ShapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ShapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ShapeOptions>
-CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32)
-{
- ShapeOptionsBuilder builder_(_fbb);
- builder_.add_out_type(out_type);
- return builder_.Finish();
-}
-
-struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RankOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RankOptionsBuilder &operator=(const RankOptionsBuilder &);
- flatbuffers::Offset<RankOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RankOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RankOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PowOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PowOptionsBuilder &operator=(const PowOptionsBuilder &);
- flatbuffers::Offset<PowOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PowOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PowOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_NUM_BITS = 8,
- VT_NARROW_RANGE = 10
- };
- float min() const { return GetField<float>(VT_MIN, 0.0f); }
- float max() const { return GetField<float>(VT_MAX, 0.0f); }
- int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); }
- bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) &&
- VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
- VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable();
- }
-};
-
-struct FakeQuantOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); }
- void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); }
- void add_num_bits(int32_t num_bits)
- {
- fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
- }
- void add_narrow_range(bool narrow_range)
- {
- fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range),
- 0);
- }
- explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &);
- flatbuffers::Offset<FakeQuantOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FakeQuantOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FakeQuantOptions>
-CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f,
- int32_t num_bits = 0, bool narrow_range = false)
-{
- FakeQuantOptionsBuilder builder_(_fbb);
- builder_.add_num_bits(num_bits);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_narrow_range(narrow_range);
- return builder_.Finish();
-}
-
-struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES_COUNT = 4,
- VT_AXIS = 6
- };
- int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct PackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values_count(int32_t values_count)
- {
- fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
- }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); }
- explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PackOptionsBuilder &operator=(const PackOptionsBuilder &);
- flatbuffers::Offset<PackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PackOptions>
-CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0)
-{
- PackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_values_count(values_count);
- return builder_.Finish();
-}
-
-struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalOrOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &);
- flatbuffers::Offset<LogicalOrOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalOrOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalOrOptions>
-CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalOrOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct OneHotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); }
- explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &);
- flatbuffers::Offset<OneHotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OneHotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- OneHotOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AbsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AbsOptionsBuilder &operator=(const AbsOptionsBuilder &);
- flatbuffers::Offset<AbsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AbsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AbsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct HardSwishOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &);
- flatbuffers::Offset<HardSwishOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<HardSwishOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<HardSwishOptions>
-CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- HardSwishOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalAndOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &);
- flatbuffers::Offset<LogicalAndOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalAndOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalAndOptions>
-CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalAndOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalNotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &);
- flatbuffers::Offset<LogicalNotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalNotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalNotOptions>
-CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalNotOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM = 4,
- VT_AXIS = 6
- };
- int32_t num() const { return GetField<int32_t>(VT_NUM, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct UnpackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); }
- explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &);
- flatbuffers::Offset<UnpackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnpackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num = 0, int32_t axis = 0)
-{
- UnpackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_num(num);
- return builder_.Finish();
-}
-
-struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorDivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &);
- flatbuffers::Offset<FloorDivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorDivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorDivOptions>
-CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorDivOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquareOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquareOptionsBuilder &operator=(const SquareOptionsBuilder &);
- flatbuffers::Offset<SquareOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquareOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquareOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ZerosLikeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &);
- flatbuffers::Offset<ZerosLikeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ZerosLikeOptions>
-CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ZerosLikeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FillOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FillOptionsBuilder &operator=(const FillOptionsBuilder &);
- flatbuffers::Offset<FillOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FillOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FillOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorModOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &);
- flatbuffers::Offset<FloorModOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorModOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorModOptions>
-CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorModOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RangeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RangeOptionsBuilder &operator=(const RangeOptionsBuilder &);
- flatbuffers::Offset<RangeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RangeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RangeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALPHA = 4
- };
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) &&
- verifier.EndTable();
- }
-};
-
-struct LeakyReluOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); }
- explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &);
- flatbuffers::Offset<LeakyReluOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LeakyReluOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LeakyReluOptions>
-CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f)
-{
- LeakyReluOptionsBuilder builder_(_fbb);
- builder_.add_alpha(alpha);
- return builder_.Finish();
-}
-
-struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquaredDifferenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &);
- flatbuffers::Offset<SquaredDifferenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquaredDifferenceOptions>
-CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquaredDifferenceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MODE = 4
- };
- MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) &&
- verifier.EndTable();
- }
-};
-
-struct MirrorPadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_mode(MirrorPadMode mode)
- {
- fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
- }
- explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &);
- flatbuffers::Offset<MirrorPadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MirrorPadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MirrorPadOptions>
-CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb,
- MirrorPadMode mode = MirrorPadMode_REFLECT)
-{
- MirrorPadOptionsBuilder builder_(_fbb);
- builder_.add_mode(mode);
- return builder_.Finish();
-}
-
-struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IDX_OUT_TYPE = 4
- };
- TensorType idx_out_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct UniqueOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_idx_out_type(TensorType idx_out_type)
- {
- fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2);
- }
- explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &);
- flatbuffers::Offset<UniqueOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UniqueOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UniqueOptions>
-CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType idx_out_type = TensorType_INT32)
-{
- UniqueOptionsBuilder builder_(_fbb);
- builder_.add_idx_out_type(idx_out_type);
- return builder_.Finish();
-}
-
-struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ReverseV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &);
- flatbuffers::Offset<ReverseV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReverseV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReverseV2Options>
-CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ReverseV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AddNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddNOptionsBuilder &operator=(const AddNOptionsBuilder &);
- flatbuffers::Offset<AddNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AddNOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GatherNdOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &);
- flatbuffers::Offset<GatherNdOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherNdOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherNdOptions>
-CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GatherNdOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct WhereOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- WhereOptionsBuilder &operator=(const WhereOptionsBuilder &);
- flatbuffers::Offset<WhereOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhereOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- WhereOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SEQ_DIM = 4,
- VT_BATCH_DIM = 6
- };
- int32_t seq_dim() const { return GetField<int32_t>(VT_SEQ_DIM, 0); }
- int32_t batch_dim() const { return GetField<int32_t>(VT_BATCH_DIM, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEQ_DIM) &&
- VerifyField<int32_t>(verifier, VT_BATCH_DIM) && verifier.EndTable();
- }
-};
-
-struct ReverseSequenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_seq_dim(int32_t seq_dim)
- {
- fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0);
- }
- void add_batch_dim(int32_t batch_dim)
- {
- fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0);
- }
- explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &);
- flatbuffers::Offset<ReverseSequenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReverseSequenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReverseSequenceOptions>
-CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t seq_dim = 0,
- int32_t batch_dim = 0)
-{
- ReverseSequenceOptionsBuilder builder_(_fbb);
- builder_.add_batch_dim(batch_dim);
- builder_.add_seq_dim(seq_dim);
- return builder_.Finish();
-}
-
-struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MatrixDiagOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &);
- flatbuffers::Offset<MatrixDiagOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatrixDiagOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MatrixDiagOptions>
-CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MatrixDiagOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct QuantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &);
- flatbuffers::Offset<QuantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizeOptions>
-CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- QuantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MatrixSetDiagOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &);
- flatbuffers::Offset<MatrixSetDiagOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MatrixSetDiagOptions>
-CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MatrixSetDiagOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_THEN_SUBGRAPH_INDEX = 4,
- VT_ELSE_SUBGRAPH_INDEX = 6
- };
- int32_t then_subgraph_index() const { return GetField<int32_t>(VT_THEN_SUBGRAPH_INDEX, 0); }
- int32_t else_subgraph_index() const { return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX) &&
- VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX) && verifier.EndTable();
- }
-};
-
-struct IfOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_then_subgraph_index(int32_t then_subgraph_index)
- {
- fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0);
- }
- void add_else_subgraph_index(int32_t else_subgraph_index)
- {
- fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0);
- }
- explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- IfOptionsBuilder &operator=(const IfOptionsBuilder &);
- flatbuffers::Offset<IfOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<IfOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t then_subgraph_index = 0,
- int32_t else_subgraph_index = 0)
-{
- IfOptionsBuilder builder_(_fbb);
- builder_.add_else_subgraph_index(else_subgraph_index);
- builder_.add_then_subgraph_index(then_subgraph_index);
- return builder_.Finish();
-}
-
-struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COND_SUBGRAPH_INDEX = 4,
- VT_BODY_SUBGRAPH_INDEX = 6
- };
- int32_t cond_subgraph_index() const { return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0); }
- int32_t body_subgraph_index() const { return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX) &&
- VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX) && verifier.EndTable();
- }
-};
-
-struct WhileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_cond_subgraph_index(int32_t cond_subgraph_index)
- {
- fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0);
- }
- void add_body_subgraph_index(int32_t body_subgraph_index)
- {
- fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
- }
- explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- WhileOptionsBuilder &operator=(const WhileOptionsBuilder &);
- flatbuffers::Offset<WhileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t cond_subgraph_index = 0,
- int32_t body_subgraph_index = 0)
-{
- WhileOptionsBuilder builder_(_fbb);
- builder_.add_body_subgraph_index(body_subgraph_index);
- builder_.add_cond_subgraph_index(cond_subgraph_index);
- return builder_.Finish();
-}
-
-struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NonMaxSuppressionV4OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &);
- flatbuffers::Offset<NonMaxSuppressionV4Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NonMaxSuppressionV4Options>
-CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NonMaxSuppressionV4OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NonMaxSuppressionV5OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &);
- flatbuffers::Offset<NonMaxSuppressionV5Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NonMaxSuppressionV5Options>
-CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NonMaxSuppressionV5OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ScatterNdOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &);
- flatbuffers::Offset<ScatterNdOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ScatterNdOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ScatterNdOptions>
-CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ScatterNdOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &);
- flatbuffers::Offset<SelectV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectV2Options>
-CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DensifyOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &);
- flatbuffers::Offset<DensifyOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DensifyOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DensifyOptions>
-CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DensifyOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SegmentSumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &);
- flatbuffers::Offset<SegmentSumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SegmentSumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SegmentSumOptions>
-CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SegmentSumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ADJOINT_LHS = 4,
- VT_ADJOINT_RHS = 6
- };
- bool adjoint_lhs() const { return GetField<uint8_t>(VT_ADJOINT_LHS, 0) != 0; }
- bool adjoint_rhs() const { return GetField<uint8_t>(VT_ADJOINT_RHS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ADJOINT_LHS) &&
- VerifyField<uint8_t>(verifier, VT_ADJOINT_RHS) && verifier.EndTable();
- }
-};
-
-struct BatchMatMulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_adjoint_lhs(bool adjoint_lhs)
- {
- fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_LHS, static_cast<uint8_t>(adjoint_lhs),
- 0);
- }
- void add_adjoint_rhs(bool adjoint_rhs)
- {
- fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_RHS, static_cast<uint8_t>(adjoint_rhs),
- 0);
- }
- explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &);
- flatbuffers::Offset<BatchMatMulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchMatMulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchMatMulOptions>
-CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, bool adjoint_lhs = false,
- bool adjoint_rhs = false)
-{
- BatchMatMulOptionsBuilder builder_(_fbb);
- builder_.add_adjoint_rhs(adjoint_rhs);
- builder_.add_adjoint_lhs(adjoint_lhs);
- return builder_.Finish();
-}
-
-struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BUILTIN_CODE = 4,
- VT_CUSTOM_CODE = 6,
- VT_VERSION = 8
- };
- BuiltinOperator builtin_code() const
- {
- return static_cast<BuiltinOperator>(GetField<int8_t>(VT_BUILTIN_CODE, 0));
- }
- const flatbuffers::String *custom_code() const
- {
- return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
- }
- int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_BUILTIN_CODE) &&
- VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) &&
- VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable();
- }
-};
-
-struct OperatorCodeBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_builtin_code(BuiltinOperator builtin_code)
- {
- fbb_.AddElement<int8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int8_t>(builtin_code), 0);
- }
- void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code)
- {
- fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
- }
- void add_version(int32_t version)
- {
- fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1);
- }
- explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorCodeBuilder &operator=(const OperatorCodeBuilder &);
- flatbuffers::Offset<OperatorCode> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OperatorCode>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1)
-{
- OperatorCodeBuilder builder_(_fbb);
- builder_.add_version(version);
- builder_.add_custom_code(custom_code);
- builder_.add_builtin_code(builtin_code);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- const char *custom_code = nullptr, int32_t version = 1)
-{
- return onert_tflite::CreateOperatorCode(
- _fbb, builtin_code, custom_code ? _fbb.CreateString(custom_code) : 0, version);
-}
-
-struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OPCODE_INDEX = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_BUILTIN_OPTIONS_TYPE = 10,
- VT_BUILTIN_OPTIONS = 12,
- VT_CUSTOM_OPTIONS = 14,
- VT_CUSTOM_OPTIONS_FORMAT = 16,
- VT_MUTATING_VARIABLE_INPUTS = 18,
- VT_INTERMEDIATES = 20
- };
- uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- BuiltinOptions builtin_options_type() const
- {
- return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
- }
- const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); }
- template <typename T> const T *builtin_options_as() const;
- const Conv2DOptions *builtin_options_as_Conv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Conv2DOptions
- ? static_cast<const Conv2DOptions *>(builtin_options())
- : nullptr;
- }
- const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions
- ? static_cast<const DepthwiseConv2DOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions
- ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options())
- : nullptr;
- }
- const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSHProjectionOptions
- ? static_cast<const LSHProjectionOptions *>(builtin_options())
- : nullptr;
- }
- const Pool2DOptions *builtin_options_as_Pool2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Pool2DOptions
- ? static_cast<const Pool2DOptions *>(builtin_options())
- : nullptr;
- }
- const SVDFOptions *builtin_options_as_SVDFOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SVDFOptions
- ? static_cast<const SVDFOptions *>(builtin_options())
- : nullptr;
- }
- const RNNOptions *builtin_options_as_RNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RNNOptions
- ? static_cast<const RNNOptions *>(builtin_options())
- : nullptr;
- }
- const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FullyConnectedOptions
- ? static_cast<const FullyConnectedOptions *>(builtin_options())
- : nullptr;
- }
- const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SoftmaxOptions
- ? static_cast<const SoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatenationOptions
- ? static_cast<const ConcatenationOptions *>(builtin_options())
- : nullptr;
- }
- const AddOptions *builtin_options_as_AddOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddOptions
- ? static_cast<const AddOptions *>(builtin_options())
- : nullptr;
- }
- const L2NormOptions *builtin_options_as_L2NormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_L2NormOptions
- ? static_cast<const L2NormOptions *>(builtin_options())
- : nullptr;
- }
- const LocalResponseNormalizationOptions *
- builtin_options_as_LocalResponseNormalizationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions
- ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options())
- : nullptr;
- }
- const LSTMOptions *builtin_options_as_LSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSTMOptions
- ? static_cast<const LSTMOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions
- ? static_cast<const ResizeBilinearOptions *>(builtin_options())
- : nullptr;
- }
- const CallOptions *builtin_options_as_CallOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CallOptions
- ? static_cast<const CallOptions *>(builtin_options())
- : nullptr;
- }
- const ReshapeOptions *builtin_options_as_ReshapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReshapeOptions
- ? static_cast<const ReshapeOptions *>(builtin_options())
- : nullptr;
- }
- const SkipGramOptions *builtin_options_as_SkipGramOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SkipGramOptions
- ? static_cast<const SkipGramOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions
- ? static_cast<const SpaceToDepthOptions *>(builtin_options())
- : nullptr;
- }
- const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions
- ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options())
- : nullptr;
- }
- const MulOptions *builtin_options_as_MulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MulOptions
- ? static_cast<const MulOptions *>(builtin_options())
- : nullptr;
- }
- const PadOptions *builtin_options_as_PadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PadOptions
- ? static_cast<const PadOptions *>(builtin_options())
- : nullptr;
- }
- const GatherOptions *builtin_options_as_GatherOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherOptions
- ? static_cast<const GatherOptions *>(builtin_options())
- : nullptr;
- }
- const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions
- ? static_cast<const BatchToSpaceNDOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions
- ? static_cast<const SpaceToBatchNDOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeOptions *builtin_options_as_TransposeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeOptions
- ? static_cast<const TransposeOptions *>(builtin_options())
- : nullptr;
- }
- const ReducerOptions *builtin_options_as_ReducerOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReducerOptions
- ? static_cast<const ReducerOptions *>(builtin_options())
- : nullptr;
- }
- const SubOptions *builtin_options_as_SubOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SubOptions
- ? static_cast<const SubOptions *>(builtin_options())
- : nullptr;
- }
- const DivOptions *builtin_options_as_DivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DivOptions
- ? static_cast<const DivOptions *>(builtin_options())
- : nullptr;
- }
- const SqueezeOptions *builtin_options_as_SqueezeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SqueezeOptions
- ? static_cast<const SqueezeOptions *>(builtin_options())
- : nullptr;
- }
- const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SequenceRNNOptions
- ? static_cast<const SequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_StridedSliceOptions
- ? static_cast<const StridedSliceOptions *>(builtin_options())
- : nullptr;
- }
- const ExpOptions *builtin_options_as_ExpOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpOptions
- ? static_cast<const ExpOptions *>(builtin_options())
- : nullptr;
- }
- const TopKV2Options *builtin_options_as_TopKV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_TopKV2Options
- ? static_cast<const TopKV2Options *>(builtin_options())
- : nullptr;
- }
- const SplitOptions *builtin_options_as_SplitOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitOptions
- ? static_cast<const SplitOptions *>(builtin_options())
- : nullptr;
- }
- const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions
- ? static_cast<const LogSoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const CastOptions *builtin_options_as_CastOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CastOptions
- ? static_cast<const CastOptions *>(builtin_options())
- : nullptr;
- }
- const DequantizeOptions *builtin_options_as_DequantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DequantizeOptions
- ? static_cast<const DequantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions
- ? static_cast<const MaximumMinimumOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMaxOptions
- ? static_cast<const ArgMaxOptions *>(builtin_options())
- : nullptr;
- }
- const LessOptions *builtin_options_as_LessOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessOptions
- ? static_cast<const LessOptions *>(builtin_options())
- : nullptr;
- }
- const NegOptions *builtin_options_as_NegOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NegOptions
- ? static_cast<const NegOptions *>(builtin_options())
- : nullptr;
- }
- const PadV2Options *builtin_options_as_PadV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_PadV2Options
- ? static_cast<const PadV2Options *>(builtin_options())
- : nullptr;
- }
- const GreaterOptions *builtin_options_as_GreaterOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterOptions
- ? static_cast<const GreaterOptions *>(builtin_options())
- : nullptr;
- }
- const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterEqualOptions
- ? static_cast<const GreaterEqualOptions *>(builtin_options())
- : nullptr;
- }
- const LessEqualOptions *builtin_options_as_LessEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessEqualOptions
- ? static_cast<const LessEqualOptions *>(builtin_options())
- : nullptr;
- }
- const SelectOptions *builtin_options_as_SelectOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SelectOptions
- ? static_cast<const SelectOptions *>(builtin_options())
- : nullptr;
- }
- const SliceOptions *builtin_options_as_SliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SliceOptions
- ? static_cast<const SliceOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeConvOptions
- ? static_cast<const TransposeConvOptions *>(builtin_options())
- : nullptr;
- }
- const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SparseToDenseOptions
- ? static_cast<const SparseToDenseOptions *>(builtin_options())
- : nullptr;
- }
- const TileOptions *builtin_options_as_TileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TileOptions
- ? static_cast<const TileOptions *>(builtin_options())
- : nullptr;
- }
- const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpandDimsOptions
- ? static_cast<const ExpandDimsOptions *>(builtin_options())
- : nullptr;
- }
- const EqualOptions *builtin_options_as_EqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EqualOptions
- ? static_cast<const EqualOptions *>(builtin_options())
- : nullptr;
- }
- const NotEqualOptions *builtin_options_as_NotEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NotEqualOptions
- ? static_cast<const NotEqualOptions *>(builtin_options())
- : nullptr;
- }
- const ShapeOptions *builtin_options_as_ShapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ShapeOptions
- ? static_cast<const ShapeOptions *>(builtin_options())
- : nullptr;
- }
- const PowOptions *builtin_options_as_PowOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PowOptions
- ? static_cast<const PowOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMinOptions *builtin_options_as_ArgMinOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMinOptions
- ? static_cast<const ArgMinOptions *>(builtin_options())
- : nullptr;
- }
- const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FakeQuantOptions
- ? static_cast<const FakeQuantOptions *>(builtin_options())
- : nullptr;
- }
- const PackOptions *builtin_options_as_PackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PackOptions
- ? static_cast<const PackOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalOrOptions
- ? static_cast<const LogicalOrOptions *>(builtin_options())
- : nullptr;
- }
- const OneHotOptions *builtin_options_as_OneHotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_OneHotOptions
- ? static_cast<const OneHotOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalAndOptions
- ? static_cast<const LogicalAndOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalNotOptions
- ? static_cast<const LogicalNotOptions *>(builtin_options())
- : nullptr;
- }
- const UnpackOptions *builtin_options_as_UnpackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnpackOptions
- ? static_cast<const UnpackOptions *>(builtin_options())
- : nullptr;
- }
- const FloorDivOptions *builtin_options_as_FloorDivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorDivOptions
- ? static_cast<const FloorDivOptions *>(builtin_options())
- : nullptr;
- }
- const SquareOptions *builtin_options_as_SquareOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquareOptions
- ? static_cast<const SquareOptions *>(builtin_options())
- : nullptr;
- }
- const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ZerosLikeOptions
- ? static_cast<const ZerosLikeOptions *>(builtin_options())
- : nullptr;
- }
- const FillOptions *builtin_options_as_FillOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FillOptions
- ? static_cast<const FillOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceLSTMOptions *
- builtin_options_as_BidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions
- ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions
- ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const UnidirectionalSequenceLSTMOptions *
- builtin_options_as_UnidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions
- ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const FloorModOptions *builtin_options_as_FloorModOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorModOptions
- ? static_cast<const FloorModOptions *>(builtin_options())
- : nullptr;
- }
- const RangeOptions *builtin_options_as_RangeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RangeOptions
- ? static_cast<const RangeOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions
- ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options())
- : nullptr;
- }
- const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LeakyReluOptions
- ? static_cast<const LeakyReluOptions *>(builtin_options())
- : nullptr;
- }
- const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions
- ? static_cast<const SquaredDifferenceOptions *>(builtin_options())
- : nullptr;
- }
- const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MirrorPadOptions
- ? static_cast<const MirrorPadOptions *>(builtin_options())
- : nullptr;
- }
- const AbsOptions *builtin_options_as_AbsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AbsOptions
- ? static_cast<const AbsOptions *>(builtin_options())
- : nullptr;
- }
- const SplitVOptions *builtin_options_as_SplitVOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitVOptions
- ? static_cast<const SplitVOptions *>(builtin_options())
- : nullptr;
- }
- const UniqueOptions *builtin_options_as_UniqueOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UniqueOptions
- ? static_cast<const UniqueOptions *>(builtin_options())
- : nullptr;
- }
- const ReverseV2Options *builtin_options_as_ReverseV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_ReverseV2Options
- ? static_cast<const ReverseV2Options *>(builtin_options())
- : nullptr;
- }
- const AddNOptions *builtin_options_as_AddNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddNOptions
- ? static_cast<const AddNOptions *>(builtin_options())
- : nullptr;
- }
- const GatherNdOptions *builtin_options_as_GatherNdOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherNdOptions
- ? static_cast<const GatherNdOptions *>(builtin_options())
- : nullptr;
- }
- const CosOptions *builtin_options_as_CosOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CosOptions
- ? static_cast<const CosOptions *>(builtin_options())
- : nullptr;
- }
- const WhereOptions *builtin_options_as_WhereOptions() const
- {
- return builtin_options_type() == BuiltinOptions_WhereOptions
- ? static_cast<const WhereOptions *>(builtin_options())
- : nullptr;
- }
- const RankOptions *builtin_options_as_RankOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RankOptions
- ? static_cast<const RankOptions *>(builtin_options())
- : nullptr;
- }
- const ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReverseSequenceOptions
- ? static_cast<const ReverseSequenceOptions *>(builtin_options())
- : nullptr;
- }
- const MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MatrixDiagOptions
- ? static_cast<const MatrixDiagOptions *>(builtin_options())
- : nullptr;
- }
- const QuantizeOptions *builtin_options_as_QuantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_QuantizeOptions
- ? static_cast<const QuantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MatrixSetDiagOptions
- ? static_cast<const MatrixSetDiagOptions *>(builtin_options())
- : nullptr;
- }
- const HardSwishOptions *builtin_options_as_HardSwishOptions() const
- {
- return builtin_options_type() == BuiltinOptions_HardSwishOptions
- ? static_cast<const HardSwishOptions *>(builtin_options())
- : nullptr;
- }
- const IfOptions *builtin_options_as_IfOptions() const
- {
- return builtin_options_type() == BuiltinOptions_IfOptions
- ? static_cast<const IfOptions *>(builtin_options())
- : nullptr;
- }
- const WhileOptions *builtin_options_as_WhileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_WhileOptions
- ? static_cast<const WhileOptions *>(builtin_options())
- : nullptr;
- }
- const DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthToSpaceOptions
- ? static_cast<const DepthToSpaceOptions *>(builtin_options())
- : nullptr;
- }
- const NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const
- {
- return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV4Options
- ? static_cast<const NonMaxSuppressionV4Options *>(builtin_options())
- : nullptr;
- }
- const NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const
- {
- return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV5Options
- ? static_cast<const NonMaxSuppressionV5Options *>(builtin_options())
- : nullptr;
- }
- const ScatterNdOptions *builtin_options_as_ScatterNdOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ScatterNdOptions
- ? static_cast<const ScatterNdOptions *>(builtin_options())
- : nullptr;
- }
- const SelectV2Options *builtin_options_as_SelectV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_SelectV2Options
- ? static_cast<const SelectV2Options *>(builtin_options())
- : nullptr;
- }
- const DensifyOptions *builtin_options_as_DensifyOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DensifyOptions
- ? static_cast<const DensifyOptions *>(builtin_options())
- : nullptr;
- }
- const SegmentSumOptions *builtin_options_as_SegmentSumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SegmentSumOptions
- ? static_cast<const SegmentSumOptions *>(builtin_options())
- : nullptr;
- }
- const BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchMatMulOptions
- ? static_cast<const BatchMatMulOptions *>(builtin_options())
- : nullptr;
- }
- const flatbuffers::Vector<uint8_t> *custom_options() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
- }
- CustomOptionsFormat custom_options_format() const
- {
- return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
- }
- const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *intermediates() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
- VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
- VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
- VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) &&
- VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
- VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) &&
- verifier.VerifyVector(mutating_variable_inputs()) &&
- VerifyOffset(verifier, VT_INTERMEDIATES) && verifier.VerifyVector(intermediates()) &&
- verifier.EndTable();
- }
-};
-
-template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const
-{
- return builtin_options_as_Conv2DOptions();
-}
-
-template <>
-inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const
-{
- return builtin_options_as_DepthwiseConv2DOptions();
-}
-
-template <>
-inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const
-{
- return builtin_options_as_ConcatEmbeddingsOptions();
-}
-
-template <>
-inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const
-{
- return builtin_options_as_LSHProjectionOptions();
-}
-
-template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const
-{
- return builtin_options_as_Pool2DOptions();
-}
-
-template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const
-{
- return builtin_options_as_SVDFOptions();
-}
-
-template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const
-{
- return builtin_options_as_RNNOptions();
-}
-
-template <>
-inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const
-{
- return builtin_options_as_FullyConnectedOptions();
-}
-
-template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const
-{
- return builtin_options_as_SoftmaxOptions();
-}
-
-template <>
-inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const
-{
- return builtin_options_as_ConcatenationOptions();
-}
-
-template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const
-{
- return builtin_options_as_AddOptions();
-}
-
-template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const
-{
- return builtin_options_as_L2NormOptions();
-}
-
-template <>
-inline const LocalResponseNormalizationOptions *
-Operator::builtin_options_as<LocalResponseNormalizationOptions>() const
-{
- return builtin_options_as_LocalResponseNormalizationOptions();
-}
-
-template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const
-{
- return builtin_options_as_LSTMOptions();
-}
-
-template <>
-inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const
-{
- return builtin_options_as_ResizeBilinearOptions();
-}
-
-template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const
-{
- return builtin_options_as_CallOptions();
-}
-
-template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const
-{
- return builtin_options_as_ReshapeOptions();
-}
-
-template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const
-{
- return builtin_options_as_SkipGramOptions();
-}
-
-template <>
-inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const
-{
- return builtin_options_as_SpaceToDepthOptions();
-}
-
-template <>
-inline const EmbeddingLookupSparseOptions *
-Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const
-{
- return builtin_options_as_EmbeddingLookupSparseOptions();
-}
-
-template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const
-{
- return builtin_options_as_MulOptions();
-}
-
-template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const
-{
- return builtin_options_as_PadOptions();
-}
-
-template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const
-{
- return builtin_options_as_GatherOptions();
-}
-
-template <>
-inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const
-{
- return builtin_options_as_BatchToSpaceNDOptions();
-}
-
-template <>
-inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const
-{
- return builtin_options_as_SpaceToBatchNDOptions();
-}
-
-template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const
-{
- return builtin_options_as_TransposeOptions();
-}
-
-template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const
-{
- return builtin_options_as_ReducerOptions();
-}
-
-template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const
-{
- return builtin_options_as_SubOptions();
-}
-
-template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const
-{
- return builtin_options_as_DivOptions();
-}
-
-template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const
-{
- return builtin_options_as_SqueezeOptions();
-}
-
-template <>
-inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const
-{
- return builtin_options_as_SequenceRNNOptions();
-}
-
-template <>
-inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const
-{
- return builtin_options_as_StridedSliceOptions();
-}
-
-template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const
-{
- return builtin_options_as_ExpOptions();
-}
-
-template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const
-{
- return builtin_options_as_TopKV2Options();
-}
-
-template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const
-{
- return builtin_options_as_SplitOptions();
-}
-
-template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const
-{
- return builtin_options_as_LogSoftmaxOptions();
-}
-
-template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const
-{
- return builtin_options_as_CastOptions();
-}
-
-template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const
-{
- return builtin_options_as_DequantizeOptions();
-}
-
-template <>
-inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const
-{
- return builtin_options_as_MaximumMinimumOptions();
-}
-
-template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const
-{
- return builtin_options_as_ArgMaxOptions();
-}
-
-template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const
-{
- return builtin_options_as_LessOptions();
-}
-
-template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const
-{
- return builtin_options_as_NegOptions();
-}
-
-template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const
-{
- return builtin_options_as_PadV2Options();
-}
-
-template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const
-{
- return builtin_options_as_GreaterOptions();
-}
-
-template <>
-inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const
-{
- return builtin_options_as_GreaterEqualOptions();
-}
-
-template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const
-{
- return builtin_options_as_LessEqualOptions();
-}
-
-template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const
-{
- return builtin_options_as_SelectOptions();
-}
-
-template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const
-{
- return builtin_options_as_SliceOptions();
-}
-
-template <>
-inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const
-{
- return builtin_options_as_TransposeConvOptions();
-}
-
-template <>
-inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const
-{
- return builtin_options_as_SparseToDenseOptions();
-}
-
-template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const
-{
- return builtin_options_as_TileOptions();
-}
-
-template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const
-{
- return builtin_options_as_ExpandDimsOptions();
-}
-
-template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const
-{
- return builtin_options_as_EqualOptions();
-}
-
-template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const
-{
- return builtin_options_as_NotEqualOptions();
-}
-
-template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const
-{
- return builtin_options_as_ShapeOptions();
-}
-
-template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const
-{
- return builtin_options_as_PowOptions();
-}
-
-template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const
-{
- return builtin_options_as_ArgMinOptions();
-}
-
-template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const
-{
- return builtin_options_as_FakeQuantOptions();
-}
-
-template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const
-{
- return builtin_options_as_PackOptions();
-}
-
-template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const
-{
- return builtin_options_as_LogicalOrOptions();
-}
-
-template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const
-{
- return builtin_options_as_OneHotOptions();
-}
-
-template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const
-{
- return builtin_options_as_LogicalAndOptions();
-}
-
-template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const
-{
- return builtin_options_as_LogicalNotOptions();
-}
-
-template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const
-{
- return builtin_options_as_UnpackOptions();
-}
-
-template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const
-{
- return builtin_options_as_FloorDivOptions();
-}
-
-template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const
-{
- return builtin_options_as_SquareOptions();
-}
-
-template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const
-{
- return builtin_options_as_ZerosLikeOptions();
-}
-
-template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const
-{
- return builtin_options_as_FillOptions();
-}
-
-template <>
-inline const BidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceLSTMOptions();
-}
-
-template <>
-inline const BidirectionalSequenceRNNOptions *
-Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceRNNOptions();
-}
-
-template <>
-inline const UnidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_UnidirectionalSequenceLSTMOptions();
-}
-
-template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const
-{
- return builtin_options_as_FloorModOptions();
-}
-
-template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const
-{
- return builtin_options_as_RangeOptions();
-}
-
-template <>
-inline const ResizeNearestNeighborOptions *
-Operator::builtin_options_as<ResizeNearestNeighborOptions>() const
-{
- return builtin_options_as_ResizeNearestNeighborOptions();
-}
-
-template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const
-{
- return builtin_options_as_LeakyReluOptions();
-}
-
-template <>
-inline const SquaredDifferenceOptions *
-Operator::builtin_options_as<SquaredDifferenceOptions>() const
-{
- return builtin_options_as_SquaredDifferenceOptions();
-}
-
-template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const
-{
- return builtin_options_as_MirrorPadOptions();
-}
-
-template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const
-{
- return builtin_options_as_AbsOptions();
-}
-
-template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const
-{
- return builtin_options_as_SplitVOptions();
-}
-
-template <> inline const UniqueOptions *Operator::builtin_options_as<UniqueOptions>() const
-{
- return builtin_options_as_UniqueOptions();
-}
-
-template <> inline const ReverseV2Options *Operator::builtin_options_as<ReverseV2Options>() const
-{
- return builtin_options_as_ReverseV2Options();
-}
-
-template <> inline const AddNOptions *Operator::builtin_options_as<AddNOptions>() const
-{
- return builtin_options_as_AddNOptions();
-}
-
-template <> inline const GatherNdOptions *Operator::builtin_options_as<GatherNdOptions>() const
-{
- return builtin_options_as_GatherNdOptions();
-}
-
-template <> inline const CosOptions *Operator::builtin_options_as<CosOptions>() const
-{
- return builtin_options_as_CosOptions();
-}
-
-template <> inline const WhereOptions *Operator::builtin_options_as<WhereOptions>() const
-{
- return builtin_options_as_WhereOptions();
-}
-
-template <> inline const RankOptions *Operator::builtin_options_as<RankOptions>() const
-{
- return builtin_options_as_RankOptions();
-}
-
-template <>
-inline const ReverseSequenceOptions *Operator::builtin_options_as<ReverseSequenceOptions>() const
-{
- return builtin_options_as_ReverseSequenceOptions();
-}
-
-template <> inline const MatrixDiagOptions *Operator::builtin_options_as<MatrixDiagOptions>() const
-{
- return builtin_options_as_MatrixDiagOptions();
-}
-
-template <> inline const QuantizeOptions *Operator::builtin_options_as<QuantizeOptions>() const
-{
- return builtin_options_as_QuantizeOptions();
-}
-
-template <>
-inline const MatrixSetDiagOptions *Operator::builtin_options_as<MatrixSetDiagOptions>() const
-{
- return builtin_options_as_MatrixSetDiagOptions();
-}
-
-template <> inline const HardSwishOptions *Operator::builtin_options_as<HardSwishOptions>() const
-{
- return builtin_options_as_HardSwishOptions();
-}
-
-template <> inline const IfOptions *Operator::builtin_options_as<IfOptions>() const
-{
- return builtin_options_as_IfOptions();
-}
-
-template <> inline const WhileOptions *Operator::builtin_options_as<WhileOptions>() const
-{
- return builtin_options_as_WhileOptions();
-}
-
-template <>
-inline const DepthToSpaceOptions *Operator::builtin_options_as<DepthToSpaceOptions>() const
-{
- return builtin_options_as_DepthToSpaceOptions();
-}
-
-template <>
-inline const NonMaxSuppressionV4Options *
-Operator::builtin_options_as<NonMaxSuppressionV4Options>() const
-{
- return builtin_options_as_NonMaxSuppressionV4Options();
-}
-
-template <>
-inline const NonMaxSuppressionV5Options *
-Operator::builtin_options_as<NonMaxSuppressionV5Options>() const
-{
- return builtin_options_as_NonMaxSuppressionV5Options();
-}
-
-template <> inline const ScatterNdOptions *Operator::builtin_options_as<ScatterNdOptions>() const
-{
- return builtin_options_as_ScatterNdOptions();
-}
-
-template <> inline const SelectV2Options *Operator::builtin_options_as<SelectV2Options>() const
-{
- return builtin_options_as_SelectV2Options();
-}
-
-template <> inline const DensifyOptions *Operator::builtin_options_as<DensifyOptions>() const
-{
- return builtin_options_as_DensifyOptions();
-}
-
-template <> inline const SegmentSumOptions *Operator::builtin_options_as<SegmentSumOptions>() const
-{
- return builtin_options_as_SegmentSumOptions();
-}
-
-template <>
-inline const BatchMatMulOptions *Operator::builtin_options_as<BatchMatMulOptions>() const
-{
- return builtin_options_as_BatchMatMulOptions();
-}
-
-struct OperatorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_opcode_index(uint32_t opcode_index)
- {
- fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(Operator::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
- }
- void add_builtin_options_type(BuiltinOptions builtin_options_type)
- {
- fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE,
- static_cast<uint8_t>(builtin_options_type), 0);
- }
- void add_builtin_options(flatbuffers::Offset<void> builtin_options)
- {
- fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
- }
- void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options)
- {
- fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
- }
- void add_custom_options_format(CustomOptionsFormat custom_options_format)
- {
- fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT,
- static_cast<int8_t>(custom_options_format), 0);
- }
- void add_mutating_variable_inputs(
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs)
- {
- fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
- }
- void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates)
- {
- fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates);
- }
- explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorBuilder &operator=(const OperatorBuilder &);
- flatbuffers::Offset<Operator> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Operator>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Operator>
-CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0)
-{
- OperatorBuilder builder_(_fbb);
- builder_.add_intermediates(intermediates);
- builder_.add_mutating_variable_inputs(mutating_variable_inputs);
- builder_.add_custom_options(custom_options);
- builder_.add_builtin_options(builtin_options);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_opcode_index(opcode_index);
- builder_.add_custom_options_format(custom_options_format);
- builder_.add_builtin_options_type(builtin_options_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Operator>
-CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- const std::vector<uint8_t> *custom_options = nullptr,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- const std::vector<uint8_t> *mutating_variable_inputs = nullptr,
- const std::vector<int32_t> *intermediates = nullptr)
-{
- return onert_tflite::CreateOperator(
- _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options,
- custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format,
- mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0,
- intermediates ? _fbb.CreateVector<int32_t>(*intermediates) : 0);
-}
-
-struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TENSORS = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_OPERATORS = 10,
- VT_NAME = 12
- };
- const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS);
- }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS);
- }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) &&
- verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) &&
- verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && verifier.EndTable();
- }
-};
-
-struct SubGraphBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors)
- {
- fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
- }
- void
- add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators)
- {
- fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
- }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(SubGraph::VT_NAME, name);
- }
- explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubGraphBuilder &operator=(const SubGraphBuilder &);
- flatbuffers::Offset<SubGraph> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubGraph>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0)
-{
- SubGraphBuilder builder_(_fbb);
- builder_.add_name(name);
- builder_.add_operators(operators);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_tensors(tensors);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SubGraph>
-CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr,
- const char *name = nullptr)
-{
- return onert_tflite::CreateSubGraph(
- _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0,
- inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
- operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0,
- name ? _fbb.CreateString(name) : 0);
-}
-
-struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_DATA = 4
- };
- const flatbuffers::Vector<uint8_t> *data() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) &&
- verifier.VerifyVector(data()) && verifier.EndTable();
- }
-};
-
-struct BufferBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data)
- {
- fbb_.AddOffset(Buffer::VT_DATA, data);
- }
- explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BufferBuilder &operator=(const BufferBuilder &);
- flatbuffers::Offset<Buffer> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Buffer>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Buffer>
-CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0)
-{
- BufferBuilder builder_(_fbb);
- builder_.add_data(data);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *data = nullptr)
-{
- return onert_tflite::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0);
-}
-
-struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NAME = 4,
- VT_BUFFER = 6
- };
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_BUFFER) &&
- verifier.EndTable();
- }
-};
-
-struct MetadataBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Metadata::VT_NAME, name);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0); }
- explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MetadataBuilder &operator=(const MetadataBuilder &);
- flatbuffers::Offset<Metadata> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Metadata>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Metadata>
-CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t buffer = 0)
-{
- MetadataBuilder builder_(_fbb);
- builder_.add_buffer(buffer);
- builder_.add_name(name);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Metadata> CreateMetadataDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const char *name = nullptr,
- uint32_t buffer = 0)
-{
- return onert_tflite::CreateMetadata(_fbb, name ? _fbb.CreateString(name) : 0, buffer);
-}
-
-struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VERSION = 4,
- VT_OPERATOR_CODES = 6,
- VT_SUBGRAPHS = 8,
- VT_DESCRIPTION = 10,
- VT_BUFFERS = 12,
- VT_METADATA_BUFFER = 14,
- VT_METADATA = 16
- };
- uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); }
- const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>(
- VT_OPERATOR_CODES);
- }
- const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS);
- }
- const flatbuffers::String *description() const
- {
- return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS);
- }
- const flatbuffers::Vector<int32_t> *metadata_buffer() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *metadata() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *>(VT_METADATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) &&
- VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) &&
- verifier.VerifyVectorOfTables(operator_codes()) &&
- VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) &&
- verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) &&
- verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) &&
- verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) &&
- VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) &&
- VerifyOffset(verifier, VT_METADATA) && verifier.VerifyVector(metadata()) &&
- verifier.VerifyVectorOfTables(metadata()) && verifier.EndTable();
- }
-};
-
-struct ModelBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); }
- void add_operator_codes(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes)
- {
- fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
- }
- void
- add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs)
- {
- fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
- }
- void add_description(flatbuffers::Offset<flatbuffers::String> description)
- {
- fbb_.AddOffset(Model::VT_DESCRIPTION, description);
- }
- void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers)
- {
- fbb_.AddOffset(Model::VT_BUFFERS, buffers);
- }
- void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer)
- {
- fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
- }
- void
- add_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata)
- {
- fbb_.AddOffset(Model::VT_METADATA, metadata);
- }
- explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ModelBuilder &operator=(const ModelBuilder &);
- flatbuffers::Offset<Model> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Model>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Model> CreateModel(
- flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0,
- flatbuffers::Offset<flatbuffers::String> description = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata = 0)
-{
- ModelBuilder builder_(_fbb);
- builder_.add_metadata(metadata);
- builder_.add_metadata_buffer(metadata_buffer);
- builder_.add_buffers(buffers);
- builder_.add_description(description);
- builder_.add_subgraphs(subgraphs);
- builder_.add_operator_codes(operator_codes);
- builder_.add_version(version);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Model>
-CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr,
- const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr,
- const char *description = nullptr,
- const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr,
- const std::vector<int32_t> *metadata_buffer = nullptr,
- const std::vector<flatbuffers::Offset<Metadata>> *metadata = nullptr)
-{
- return onert_tflite::CreateModel(
- _fbb, version,
- operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0,
- subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0,
- description ? _fbb.CreateString(description) : 0,
- buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0,
- metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0,
- metadata ? _fbb.CreateVector<flatbuffers::Offset<Metadata>>(*metadata) : 0);
-}
-
-inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type)
-{
- switch (type)
- {
- case QuantizationDetails_NONE:
- {
- return true;
- }
- case QuantizationDetails_CustomQuantization:
- {
- auto ptr = reinterpret_cast<const CustomQuantization *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyQuantizationDetails(verifier, values->Get(i),
- types->GetEnum<QuantizationDetails>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
- SparseIndexVector type)
-{
- switch (type)
- {
- case SparseIndexVector_NONE:
- {
- return true;
- }
- case SparseIndexVector_Int32Vector:
- {
- auto ptr = reinterpret_cast<const Int32Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case SparseIndexVector_Uint16Vector:
- {
- auto ptr = reinterpret_cast<const Uint16Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case SparseIndexVector_Uint8Vector:
- {
- auto ptr = reinterpret_cast<const Uint8Vector *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifySparseIndexVector(verifier, values->Get(i), types->GetEnum<SparseIndexVector>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
- BuiltinOptions type)
-{
- switch (type)
- {
- case BuiltinOptions_NONE:
- {
- return true;
- }
- case BuiltinOptions_Conv2DOptions:
- {
- auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthwiseConv2DOptions:
- {
- auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatEmbeddingsOptions:
- {
- auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSHProjectionOptions:
- {
- auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_Pool2DOptions:
- {
- auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SVDFOptions:
- {
- auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RNNOptions:
- {
- auto ptr = reinterpret_cast<const RNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FullyConnectedOptions:
- {
- auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatenationOptions:
- {
- auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddOptions:
- {
- auto ptr = reinterpret_cast<const AddOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_L2NormOptions:
- {
- auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LocalResponseNormalizationOptions:
- {
- auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSTMOptions:
- {
- auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeBilinearOptions:
- {
- auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CallOptions:
- {
- auto ptr = reinterpret_cast<const CallOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReshapeOptions:
- {
- auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SkipGramOptions:
- {
- auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToDepthOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EmbeddingLookupSparseOptions:
- {
- auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MulOptions:
- {
- auto ptr = reinterpret_cast<const MulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadOptions:
- {
- auto ptr = reinterpret_cast<const PadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherOptions:
- {
- auto ptr = reinterpret_cast<const GatherOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchToSpaceNDOptions:
- {
- auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToBatchNDOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeOptions:
- {
- auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReducerOptions:
- {
- auto ptr = reinterpret_cast<const ReducerOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SubOptions:
- {
- auto ptr = reinterpret_cast<const SubOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DivOptions:
- {
- auto ptr = reinterpret_cast<const DivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SqueezeOptions:
- {
- auto ptr = reinterpret_cast<const SqueezeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_StridedSliceOptions:
- {
- auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpOptions:
- {
- auto ptr = reinterpret_cast<const ExpOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TopKV2Options:
- {
- auto ptr = reinterpret_cast<const TopKV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitOptions:
- {
- auto ptr = reinterpret_cast<const SplitOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogSoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CastOptions:
- {
- auto ptr = reinterpret_cast<const CastOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DequantizeOptions:
- {
- auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MaximumMinimumOptions:
- {
- auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMaxOptions:
- {
- auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessOptions:
- {
- auto ptr = reinterpret_cast<const LessOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NegOptions:
- {
- auto ptr = reinterpret_cast<const NegOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadV2Options:
- {
- auto ptr = reinterpret_cast<const PadV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterOptions:
- {
- auto ptr = reinterpret_cast<const GreaterOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterEqualOptions:
- {
- auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessEqualOptions:
- {
- auto ptr = reinterpret_cast<const LessEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectOptions:
- {
- auto ptr = reinterpret_cast<const SelectOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SliceOptions:
- {
- auto ptr = reinterpret_cast<const SliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeConvOptions:
- {
- auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SparseToDenseOptions:
- {
- auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TileOptions:
- {
- auto ptr = reinterpret_cast<const TileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpandDimsOptions:
- {
- auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EqualOptions:
- {
- auto ptr = reinterpret_cast<const EqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NotEqualOptions:
- {
- auto ptr = reinterpret_cast<const NotEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ShapeOptions:
- {
- auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PowOptions:
- {
- auto ptr = reinterpret_cast<const PowOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMinOptions:
- {
- auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FakeQuantOptions:
- {
- auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PackOptions:
- {
- auto ptr = reinterpret_cast<const PackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalOrOptions:
- {
- auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_OneHotOptions:
- {
- auto ptr = reinterpret_cast<const OneHotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalAndOptions:
- {
- auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalNotOptions:
- {
- auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnpackOptions:
- {
- auto ptr = reinterpret_cast<const UnpackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorDivOptions:
- {
- auto ptr = reinterpret_cast<const FloorDivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquareOptions:
- {
- auto ptr = reinterpret_cast<const SquareOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ZerosLikeOptions:
- {
- auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FillOptions:
- {
- auto ptr = reinterpret_cast<const FillOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorModOptions:
- {
- auto ptr = reinterpret_cast<const FloorModOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RangeOptions:
- {
- auto ptr = reinterpret_cast<const RangeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeNearestNeighborOptions:
- {
- auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LeakyReluOptions:
- {
- auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquaredDifferenceOptions:
- {
- auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MirrorPadOptions:
- {
- auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AbsOptions:
- {
- auto ptr = reinterpret_cast<const AbsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitVOptions:
- {
- auto ptr = reinterpret_cast<const SplitVOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UniqueOptions:
- {
- auto ptr = reinterpret_cast<const UniqueOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReverseV2Options:
- {
- auto ptr = reinterpret_cast<const ReverseV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddNOptions:
- {
- auto ptr = reinterpret_cast<const AddNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherNdOptions:
- {
- auto ptr = reinterpret_cast<const GatherNdOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CosOptions:
- {
- auto ptr = reinterpret_cast<const CosOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_WhereOptions:
- {
- auto ptr = reinterpret_cast<const WhereOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RankOptions:
- {
- auto ptr = reinterpret_cast<const RankOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReverseSequenceOptions:
- {
- auto ptr = reinterpret_cast<const ReverseSequenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MatrixDiagOptions:
- {
- auto ptr = reinterpret_cast<const MatrixDiagOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_QuantizeOptions:
- {
- auto ptr = reinterpret_cast<const QuantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MatrixSetDiagOptions:
- {
- auto ptr = reinterpret_cast<const MatrixSetDiagOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_HardSwishOptions:
- {
- auto ptr = reinterpret_cast<const HardSwishOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_IfOptions:
- {
- auto ptr = reinterpret_cast<const IfOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_WhileOptions:
- {
- auto ptr = reinterpret_cast<const WhileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthToSpaceOptions:
- {
- auto ptr = reinterpret_cast<const DepthToSpaceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NonMaxSuppressionV4Options:
- {
- auto ptr = reinterpret_cast<const NonMaxSuppressionV4Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NonMaxSuppressionV5Options:
- {
- auto ptr = reinterpret_cast<const NonMaxSuppressionV5Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ScatterNdOptions:
- {
- auto ptr = reinterpret_cast<const ScatterNdOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectV2Options:
- {
- auto ptr = reinterpret_cast<const SelectV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DensifyOptions:
- {
- auto ptr = reinterpret_cast<const DensifyOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SegmentSumOptions:
- {
- auto ptr = reinterpret_cast<const SegmentSumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchMatMulOptions:
- {
- auto ptr = reinterpret_cast<const BatchMatMulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline const onert_tflite::Model *GetModel(const void *buf)
-{
- return flatbuffers::GetRoot<onert_tflite::Model>(buf);
-}
-
-inline const onert_tflite::Model *GetSizePrefixedModel(const void *buf)
-{
- return flatbuffers::GetSizePrefixedRoot<onert_tflite::Model>(buf);
-}
-
-inline const char *ModelIdentifier() { return "TFL3"; }
-
-inline bool ModelBufferHasIdentifier(const void *buf)
-{
- return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier());
-}
-
-inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifyBuffer<onert_tflite::Model>(ModelIdentifier());
-}
-
-inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifySizePrefixedBuffer<onert_tflite::Model>(ModelIdentifier());
-}
-
-inline const char *ModelExtension() { return "tflite"; }
-
-inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<onert_tflite::Model> root)
-{
- fbb.Finish(root, ModelIdentifier());
-}
-
-inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<onert_tflite::Model> root)
-{
- fbb.FinishSizePrefixed(root, ModelIdentifier());
-}
-
-} // namespace onert_tflite
-
-#endif // FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_
diff --git a/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs b/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs
deleted file mode 100644
index ae6b5230f..000000000
--- a/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs
+++ /dev/null
@@ -1,795 +0,0 @@
-// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Revision History
-// Version 0: Initial version.
-// Version 1: Add subgraphs to schema.
-// Version 2: Rename operators to conform to NN API.
-// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
-
-// Change namespace to onert_tflite
-namespace onert_tflite;
-
-// This corresponds to the version.
-file_identifier "TFL3";
-// File extension of any written files.
-file_extension "tflite";
-
-// IMPORTANT: All new members of tables, enums and unions must be added at the
-// end to ensure backwards compatibility.
-
-// The type of data stored in a tensor.
-enum TensorType : byte {
- FLOAT32 = 0,
- FLOAT16 = 1,
- INT32 = 2,
- UINT8 = 3,
- INT64 = 4,
- STRING = 5,
- BOOL = 6,
- INT16 = 7,
- COMPLEX64 = 8,
- INT8 = 9,
-}
-
-// Custom quantization parameters for experimenting with new quantization
-// techniques.
-table CustomQuantization {
- custom:[ubyte] (force_align: 16);
-}
-
-// Represents a specific quantization technique's parameters.
-union QuantizationDetails {
- CustomQuantization,
-}
-
-// Parameters for converting a quantized tensor back to float.
-table QuantizationParameters {
- // These four parameters are the asymmetric linear quantization parameters.
- // Given a quantized value q, the corresponding float value f should be:
- // f = scale * (q - zero_point)
- // For other quantization types, the QuantizationDetails below is used.
- min:[float]; // For importing back into tensorflow.
- max:[float]; // For importing back into tensorflow.
- scale:[float]; // For dequantizing the tensor's values.
- zero_point:[long];
-
- // If this is not none, the quantization parameters above are ignored and the
- // value of the QuantizationDetails union below should be used.
- details:QuantizationDetails;
-}
-
-table Tensor {
- // The tensor shape. The meaning of each entry is operator-specific but
- // builtin ops use: [batch size, height, width, number of channels] (That's
- // Tensorflow's NHWC).
- shape:[int];
- type:TensorType;
- // An index that refers to the buffers table at the root of the model. Or,
- // if there is no data buffer associated (i.e. intermediate results), then
- // this is 0 (which refers to an always existent empty buffer).
- //
- // The data_buffer itself is an opaque container, with the assumption that the
- // target device is little-endian. In addition, all builtin operators assume
- // the memory is ordered such that if `shape` is [4, 3, 2], then index
- // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
- buffer:uint;
- name:string; // For debugging and importing back into tensorflow.
- quantization:QuantizationParameters; // Optional.
-
- is_variable:bool = false;
-}
-
-// A list of builtin operators. Builtin operators are slightly faster than custom
-// ones, but not by much. Moreover, while custom operators accept an opaque
-// object containing configuration parameters, builtins have a predetermined
-// set of acceptable options.
-enum BuiltinOperator : byte {
- ADD = 0,
- AVERAGE_POOL_2D = 1,
- CONCATENATION = 2,
- CONV_2D = 3,
- DEPTHWISE_CONV_2D = 4,
- // DEPTH_TO_SPACE = 5,
- DEQUANTIZE = 6,
- EMBEDDING_LOOKUP = 7,
- FLOOR = 8,
- FULLY_CONNECTED = 9,
- HASHTABLE_LOOKUP = 10,
- L2_NORMALIZATION = 11,
- L2_POOL_2D = 12,
- LOCAL_RESPONSE_NORMALIZATION = 13,
- LOGISTIC = 14,
- LSH_PROJECTION = 15,
- LSTM = 16,
- MAX_POOL_2D = 17,
- MUL = 18,
- RELU = 19,
- // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
- // since different model developers use RELU1 in different ways. Never
- // create another op called RELU1.
- RELU_N1_TO_1 = 20,
- RELU6 = 21,
- RESHAPE = 22,
- RESIZE_BILINEAR = 23,
- RNN = 24,
- SOFTMAX = 25,
- SPACE_TO_DEPTH = 26,
- SVDF = 27,
- TANH = 28,
- // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
- CONCAT_EMBEDDINGS = 29,
- SKIP_GRAM = 30,
- CALL = 31,
- CUSTOM = 32,
- EMBEDDING_LOOKUP_SPARSE = 33,
- PAD = 34,
- UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- GATHER = 36,
- BATCH_TO_SPACE_ND = 37,
- SPACE_TO_BATCH_ND = 38,
- TRANSPOSE = 39,
- MEAN = 40,
- SUB = 41,
- DIV = 42,
- SQUEEZE = 43,
- UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- STRIDED_SLICE = 45,
- BIDIRECTIONAL_SEQUENCE_RNN = 46,
- EXP = 47,
- TOPK_V2 = 48,
- SPLIT = 49,
- LOG_SOFTMAX = 50,
- // DELEGATE is a special op type for the operations which are delegated to
- // other backends.
- // WARNING: Experimental interface, subject to change
- DELEGATE = 51,
- BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- CAST = 53,
- PRELU = 54,
- MAXIMUM = 55,
- ARG_MAX = 56,
- MINIMUM = 57,
- LESS = 58,
- NEG = 59,
- PADV2 = 60,
- GREATER = 61,
- GREATER_EQUAL = 62,
- LESS_EQUAL = 63,
- SELECT = 64,
- SLICE = 65,
- SIN = 66,
- TRANSPOSE_CONV = 67,
- SPARSE_TO_DENSE = 68,
- TILE = 69,
- EXPAND_DIMS = 70,
- EQUAL = 71,
- NOT_EQUAL = 72,
- LOG = 73,
- SUM = 74,
- SQRT = 75,
- RSQRT = 76,
- SHAPE = 77,
- POW = 78,
- ARG_MIN = 79,
- FAKE_QUANT = 80,
- REDUCE_PROD = 81,
- REDUCE_MAX = 82,
- PACK = 83,
- LOGICAL_OR = 84,
- ONE_HOT = 85,
- LOGICAL_AND = 86,
- LOGICAL_NOT = 87,
- UNPACK = 88,
- REDUCE_MIN = 89,
- FLOOR_DIV = 90,
- REDUCE_ANY = 91,
- SQUARE = 92,
- ZEROS_LIKE = 93,
- FILL = 94,
- FLOOR_MOD = 95,
- RANGE = 96,
- RESIZE_NEAREST_NEIGHBOR = 97,
- LEAKY_RELU = 98,
- SQUARED_DIFFERENCE = 99,
- MIRROR_PAD = 100,
- ABS = 101,
- SPLIT_V = 102,
-}
-
-// Options for the builtin operators.
-union BuiltinOptions {
- Conv2DOptions,
- DepthwiseConv2DOptions,
- ConcatEmbeddingsOptions,
- LSHProjectionOptions,
- Pool2DOptions,
- SVDFOptions,
- RNNOptions,
- FullyConnectedOptions,
- SoftmaxOptions,
- ConcatenationOptions,
- AddOptions,
- L2NormOptions,
- LocalResponseNormalizationOptions,
- LSTMOptions,
- ResizeBilinearOptions,
- CallOptions,
- ReshapeOptions,
- SkipGramOptions,
- SpaceToDepthOptions,
- EmbeddingLookupSparseOptions,
- MulOptions,
- PadOptions,
- GatherOptions,
- BatchToSpaceNDOptions,
- SpaceToBatchNDOptions,
- TransposeOptions,
- ReducerOptions,
- SubOptions,
- DivOptions,
- SqueezeOptions,
- SequenceRNNOptions,
- StridedSliceOptions,
- ExpOptions,
- TopKV2Options,
- SplitOptions,
- LogSoftmaxOptions,
- CastOptions,
- DequantizeOptions,
- MaximumMinimumOptions,
- ArgMaxOptions,
- LessOptions,
- NegOptions,
- PadV2Options,
- GreaterOptions,
- GreaterEqualOptions,
- LessEqualOptions,
- SelectOptions,
- SliceOptions,
- TransposeConvOptions,
- SparseToDenseOptions,
- TileOptions,
- ExpandDimsOptions,
- EqualOptions,
- NotEqualOptions,
- ShapeOptions,
- PowOptions,
- ArgMinOptions,
- FakeQuantOptions,
- PackOptions,
- LogicalOrOptions,
- OneHotOptions,
- LogicalAndOptions,
- LogicalNotOptions,
- UnpackOptions,
- FloorDivOptions,
- SquareOptions,
- ZerosLikeOptions,
- FillOptions,
- BidirectionalSequenceLSTMOptions,
- BidirectionalSequenceRNNOptions,
- UnidirectionalSequenceLSTMOptions,
- FloorModOptions,
- RangeOptions,
- ResizeNearestNeighborOptions,
- LeakyReluOptions,
- SquaredDifferenceOptions,
- MirrorPadOptions,
- AbsOptions,
- SplitVOptions,
-}
-
-enum Padding : byte { SAME, VALID }
-
-enum ActivationFunctionType : byte {
- NONE = 0,
- RELU = 1,
- RELU_N1_TO_1 = 2,
- RELU6 = 3,
- TANH = 4,
- SIGN_BIT = 5,
-}
-
-table Conv2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- fused_activation_function:ActivationFunctionType;
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table Pool2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- filter_width:int;
- filter_height:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table DepthwiseConv2DOptions {
- // Parameters for DepthwiseConv version 1 or above.
- padding:Padding;
- stride_w:int;
- stride_h:int;
- depth_multiplier:int;
- fused_activation_function:ActivationFunctionType;
- // Parameters for DepthwiseConv version 2 or above.
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table ConcatEmbeddingsOptions {
- num_channels:int;
- num_columns_per_channel:[int];
- embedding_dim_per_channel:[int]; // This could be inferred from parameters.
-}
-
-enum LSHProjectionType: byte {
- UNKNOWN = 0,
- SPARSE = 1,
- DENSE = 2,
-}
-
-table LSHProjectionOptions {
- type: LSHProjectionType;
-}
-
-table SVDFOptions {
- rank:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow RNNCell.
-table RNNOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow dynamic_rnn with RNNCell.
-table SequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
-table BidirectionalSequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
- merge_outputs: bool;
-}
-
-enum FullyConnectedOptionsWeightsFormat: byte {
- DEFAULT = 0,
- SHUFFLED4x16INT8 = 1,
-}
-
-// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
-table FullyConnectedOptions {
- // Parameters for FullyConnected version 1 or above.
- fused_activation_function:ActivationFunctionType;
-
- // Parameters for FullyConnected version 2 or above.
- weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
-}
-
-table SoftmaxOptions {
- beta: float;
-}
-
-// An implementation of TensorFlow concat.
-table ConcatenationOptions {
- axis:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table AddOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table MulOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table L2NormOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table LocalResponseNormalizationOptions {
- radius:int;
- bias:float;
- alpha:float;
- beta:float;
-}
-
-enum LSTMKernelType : byte {
- // Full LSTM kernel which supports peephole and projection.
- FULL = 0,
- // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
- BASIC = 1,
-}
-
-// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
-table LSTMOptions {
- // Parameters for LSTM version 1 or above.
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // Parameters for LSTM version 2 or above.
- // Basic kernel is only supported in version 2 or above.
- kernel_type: LSTMKernelType = FULL;
-}
-
-// An implementation of TensorFlow dynamic_rnn with LSTMCell.
-table UnidirectionalSequenceLSTMOptions {
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true then first dimension is sequence, otherwise batch.
- time_major:bool;
-}
-
-table BidirectionalSequenceLSTMOptions {
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true, store the outputs of both directions into the first output.
- merge_outputs: bool;
-}
-
-table ResizeBilinearOptions {
- new_height: int (deprecated);
- new_width: int (deprecated);
- align_corners: bool;
-}
-
-table ResizeNearestNeighborOptions {
- align_corners: bool;
-}
-
-// A call operation options
-table CallOptions {
- // The subgraph index that needs to be called.
- subgraph:uint;
-}
-
-table PadOptions {
-}
-
-table PadV2Options {
-}
-
-table ReshapeOptions {
- new_shape:[int];
-}
-
-table SpaceToBatchNDOptions {
-}
-
-table BatchToSpaceNDOptions {
-}
-
-table SkipGramOptions {
- ngram_size: int;
- max_skip_size: int;
- include_all_ngrams: bool;
-}
-
-table SpaceToDepthOptions {
- block_size: int;
-}
-
-table SubOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table DivOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table TopKV2Options {
-}
-
-enum CombinerType : byte {
- SUM = 0,
- MEAN = 1,
- SQRTN = 2,
-}
-
-table EmbeddingLookupSparseOptions {
- combiner:CombinerType;
-}
-
-table GatherOptions {
- axis: int;
-}
-
-table TransposeOptions {
-}
-
-table ExpOptions {
-}
-
-table ReducerOptions {
- keep_dims: bool;
-}
-
-table SqueezeOptions {
- squeeze_dims:[int];
-}
-
-table SplitOptions {
- num_splits: int;
-}
-
-table SplitVOptions {
- num_splits: int;
-}
-
-table StridedSliceOptions {
- begin_mask: int;
- end_mask: int;
- ellipsis_mask: int;
- new_axis_mask: int;
- shrink_axis_mask: int;
-}
-
-table LogSoftmaxOptions {
-}
-
-table CastOptions {
- in_data_type: TensorType;
- out_data_type: TensorType;
-}
-
-table DequantizeOptions {
-}
-
-table MaximumMinimumOptions {
-}
-
-table TileOptions {
-}
-
-table ArgMaxOptions {
- output_type : TensorType;
-}
-
-table ArgMinOptions {
- output_type : TensorType;
-}
-
-table GreaterOptions {
-}
-
-table GreaterEqualOptions {
-}
-
-table LessOptions {
-}
-
-table LessEqualOptions {
-}
-
-table NegOptions {
-}
-
-table SelectOptions {
-}
-
-table SliceOptions {
-}
-
-table TransposeConvOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
-}
-
-table ExpandDimsOptions {
-}
-
-table SparseToDenseOptions {
- validate_indices:bool;
-}
-
-table EqualOptions {
-}
-
-table NotEqualOptions {
-}
-
-table ShapeOptions {
- // Optional output type of the operation (int32 or int64). Defaults to int32.
- out_type : TensorType;
-}
-
-table PowOptions {
-}
-
-table FakeQuantOptions {
- // Parameters supported by version 1:
- min:float;
- max:float;
- num_bits:int;
-
- // Parameters supported by version 2:
- narrow_range:bool;
-}
-
-table PackOptions {
- values_count:int;
- axis:int;
-}
-
-table LogicalOrOptions {
-}
-
-table OneHotOptions {
- axis:int;
-}
-
-table AbsOptions {
-}
-
-
-table LogicalAndOptions {
-}
-
-table LogicalNotOptions {
-}
-
-table UnpackOptions {
- num:int;
- axis:int;
-}
-
-table FloorDivOptions {
-}
-
-table SquareOptions {
-}
-
-table ZerosLikeOptions {
-}
-
-table FillOptions {
-}
-
-table FloorModOptions {
-}
-
-table RangeOptions {
-}
-
-table LeakyReluOptions {
- alpha:float;
-}
-
-table SquaredDifferenceOptions {
-}
-
-enum MirrorPadMode : byte {
- // Doesn't include borders.
- REFLECT = 0,
- // Includes borders.
- SYMMETRIC = 1,
-}
-
-table MirrorPadOptions {
- mode:MirrorPadMode;
-}
-
-// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
-// builtin, or a string if the operator is custom.
-table OperatorCode {
- builtin_code:BuiltinOperator;
- custom_code:string;
-
- // The version of the operator. The version need to be bumped whenever new
- // parameters are introduced into an op.
- version:int = 1;
-}
-
-enum CustomOptionsFormat : byte {
- FLEXBUFFERS = 0,
-}
-
-// An operator takes tensors as inputs and outputs. The type of operation being
-// performed is determined by an index into the list of valid OperatorCodes,
-// while the specifics of each operations is configured using builtin_options
-// or custom_options.
-table Operator {
- // Index into the operator_codes array. Using an integer here avoids
- // complicate map lookups.
- opcode_index:uint;
-
- // Optional input and output tensors are indicated by -1.
- inputs:[int];
- outputs:[int];
-
- builtin_options:BuiltinOptions;
- custom_options:[ubyte];
- custom_options_format:CustomOptionsFormat;
-
- // A list of booleans indicating the input tensors which are being mutated by
- // this operator.(e.g. used by RNN and LSTM).
- // For example, if the "inputs" array refers to 5 tensors and the second and
- // fifth are mutable variables, then this list will contain
- // [false, true, false, false, true].
- //
- // If the list is empty, no variable is mutated in this operator.
- // The list either has the same length as `inputs`, or is empty.
- mutating_variable_inputs:[bool];
-}
-
-// The root type, defining a subgraph, which typically represents an entire
-// model.
-table SubGraph {
- // A list of all tensors used in this subgraph.
- tensors:[Tensor];
-
- // Indices of the tensors that are inputs into this subgraph. Note this is
- // the list of non-static tensors that feed into the subgraph for inference.
- inputs:[int];
-
- // Indices of the tensors that are outputs out of this subgraph. Note this is
- // the list of output tensors that are considered the product of the
- // subgraph's inference.
- outputs:[int];
-
- // All operators, in execution order.
- operators:[Operator];
-
- // Name of this subgraph (used for debugging).
- name:string;
-}
-
-// Table of raw data buffers (used for constant tensors). Referenced by tensors
-// by index. The generous alignment accommodates mmap-friendly data structures.
-table Buffer {
- data:[ubyte] (force_align: 16);
-}
-
-table Model {
- // Version of the schema.
- version:uint;
-
- // A list of all operator codes used in this model. This is
- // kept in order because operators carry an index into this
- // vector.
- operator_codes:[OperatorCode];
-
- // All the subgraphs of the model. The 0th is assumed to be the main
- // model.
- subgraphs:[SubGraph];
-
- // A description of the model.
- description:string;
-
- // Buffers of the model.
- // Note the 0th entry of this array must be an empty buffer (sentinel).
- // This is a convention so that tensors without a buffer can provide 0 as
- // their buffer.
- buffers:[Buffer];
-
- // Metadata about the model. Indirects into the existings buffers list.
- metadata_buffer:[int];
-}
-
-root_type Model;
diff --git a/runtime/onert/frontend/tflite/tflite_schema.fbs b/runtime/onert/frontend/tflite/tflite_schema.fbs
deleted file mode 100644
index 9bffb4f3c..000000000
--- a/runtime/onert/frontend/tflite/tflite_schema.fbs
+++ /dev/null
@@ -1,1095 +0,0 @@
-// Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
-// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Revision History
-// Version 0: Initial version.
-// Version 1: Add subgraphs to schema.
-// Version 2: Rename operators to conform to NN API.
-// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
-
-// Change namespace to onert_tflite
-namespace onert_tflite;
-
-// This corresponds to the version.
-file_identifier "TFL3";
-// File extension of any written files.
-file_extension "tflite";
-
-// IMPORTANT: All new members of tables, enums and unions must be added at the
-// end to ensure backwards compatibility.
-
-// The type of data stored in a tensor.
-enum TensorType : byte {
- FLOAT32 = 0,
- FLOAT16 = 1,
- INT32 = 2,
- UINT8 = 3,
- INT64 = 4,
- STRING = 5,
- BOOL = 6,
- INT16 = 7,
- COMPLEX64 = 8,
- INT8 = 9,
- FLOAT64 = 10,
-}
-
-// Custom quantization parameters for experimenting with new quantization
-// techniques.
-table CustomQuantization {
- custom:[ubyte] (force_align: 16);
-}
-
-// Represents a specific quantization technique's parameters.
-union QuantizationDetails {
- CustomQuantization,
-}
-
-// Parameters for converting a quantized tensor back to float.
-table QuantizationParameters {
- // These four parameters are the asymmetric linear quantization parameters.
- // Given a quantized value q, the corresponding float value f should be:
- // f = scale * (q - zero_point)
- // For other quantization types, the QuantizationDetails below is used.
- min:[float]; // For importing back into tensorflow.
- max:[float]; // For importing back into tensorflow.
- scale:[float]; // For dequantizing the tensor's values.
- zero_point:[long];
-
- // If this is not none, the other quantization parameters (i.e. min, max,
- // scale, zero_point fields above) are ignored and the value of the
- // QuantizationDetails union should be used.
- details:QuantizationDetails;
-
- // Specifies the dimension of the Tensor's shape that the scales and
- // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
- // with quantization params:
- // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
- // will be quantized across the second dimension of t.
- // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
- // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
- // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
- quantized_dimension:int;
-}
-
-// Sparse tensors.
-// We use a modification of the TACO format.
-// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
-//
-// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
-// potentially with a k-dimensional block (0 <= k <= n) with dims
-// (dn, ..., dn+k-1), the format needs to specify:
-// 1. In what order to traverse these dimensions. For example, to store a 2-D
-// matrix in row major order, the traversal order would be (d0, d1),
-// whereas to store it in column major order, the traversal order would be
-// (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
-// could be (d0, d1, d2, d3).
-// 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
-// tensor dimension in (d0, ..., dn-1).
-// 3. In the traversal order defined above, the format (dense vs. sparse) and
-// index metadata for each dimension. For a dense dimension, this is just
-// the size of that dimension. For a sparse dimension, it's the same as
-// the compressed index defined in the Compressed Sparse Row (CSR) format.
-// (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
-
-// The storage type for a dimension. Currently we support:
-// 1. DENSE: each coordinate in this dimension is stored implicitly.
-// 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
-// compression technique is the same what CSR uses.
-// More types like a sparse dimension with a different compression technique
-// could be added to the list in the future.
-enum DimensionType : byte {
- DENSE = 0,
- SPARSE_CSR = 1,
-}
-
-table Int32Vector {
- values:[int];
-}
-
-table Uint16Vector {
- values:[ushort] (force_align: 4);
-}
-
-table Uint8Vector {
- values:[ubyte] (force_align: 4);
-}
-
-// Variable-typed buffer to store the index metadata for a sparse dimension.
-// The widest type is Int32 instead of UInt32 because tensor's shape is a int32
-// vector. We don't want the per-dimensional index to overflow that range.
-union SparseIndexVector {
- Int32Vector,
- Uint16Vector,
- Uint8Vector
-}
-
-table DimensionMetadata {
- // Whether a dimension is dense or sparse.
- format:DimensionType;
- // Index metadata used for a dimension.
- // - If format is DimensionType.DENSE then we use the dense_size field to
- // store the size of that dimension. Each index in that dimension is
- // stored implicitly.
- // - If format is DimensionType.SPARSE_CSR then we use array_segments and
- // array_indices to encode that dimension. array_segments represents how
- // to segment the indices array, each segment corresponds to one element
- // in the previous dimension. array_indices represents the index of the
- // non-zero elements within this dimension (as those in the CSR matrix
- // format, where the first array is row pointers and the second array is
- // column indices).
- dense_size:int;
- array_segments:SparseIndexVector;
- array_indices:SparseIndexVector;
-}
-
-// Parameters to encode a sparse TfLite tensor.
-table SparsityParameters {
- // The traversal order of the dimensions defined in the `shape` field of the
- // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
- // ..., dn-1),
- // - if not block sparse, the traversal_order is just a permutation of (d0,
- // ..., dn-1). For example, a 2-D matrix stored in row-major order would
- // have traversal_order = (d0, d1).
- // - if block sparse with a k-dimensional block (0 <= k <= n), the
- // traversal_order has n + k elements. The first n elements are still a
- // permutation of (d0, ..., dn-1). The lask k elements are a permutation
- // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
- // example, a 2-D matrix with 2-D blocks, both stored in row-major order
- // would have traversal_order = (d0, d1, d2, d3).
- traversal_order:[int];
- // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
- // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
- // tensor dimension in (d0, ..., dn).
- // It's stored in the order of (dn, ..., dn+k-1).
- // If not block-sparse, this field is NULL.
- block_map:[int];
- // In the traversal order defined above, the metadata needed for
- // each dimension to locate the non-zero values in the original dense tensor.
- // The size of the dim_metadata array = the size of the traversal_order array
- // = n + k.
- dim_metadata:[DimensionMetadata];
-}
-
-table Tensor {
- // The tensor shape. The meaning of each entry is operator-specific but
- // builtin ops use: [batch size, height, width, number of channels] (That's
- // Tensorflow's NHWC).
- shape:[int];
- type:TensorType;
- // An index that refers to the buffers table at the root of the model. Or,
- // if there is no data buffer associated (i.e. intermediate results), then
- // this is 0 (which refers to an always existent empty buffer).
- //
- // The data_buffer itself is an opaque container, with the assumption that the
- // target device is little-endian. In addition, all builtin operators assume
- // the memory is ordered such that if `shape` is [4, 3, 2], then index
- // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
- buffer:uint;
- name:string; // For debugging and importing back into tensorflow.
- quantization:QuantizationParameters; // Optional.
-
- is_variable:bool = false;
-
- // Parameters to encode a sparse tensor. See the example in
- // tensorflow/lite/testdata/sparse_tensor.json.
- sparsity:SparsityParameters; // Optional.
-
- // Encodes `shape` with unknown dimensions. Unknown dimensions are
- // represented with -1.
- shape_signature:[int]; // Optional.
-}
-
-// A list of builtin operators. Builtin operators are slightly faster than custom
-// ones, but not by much. Moreover, while custom operators accept an opaque
-// object containing configuration parameters, builtins have a predetermined
-// set of acceptable options.
-
-enum BuiltinOperator : byte {
- ADD = 0,
- AVERAGE_POOL_2D = 1,
- CONCATENATION = 2,
- CONV_2D = 3,
- DEPTHWISE_CONV_2D = 4,
- DEPTH_TO_SPACE = 5,
- DEQUANTIZE = 6,
- EMBEDDING_LOOKUP = 7,
- FLOOR = 8,
- FULLY_CONNECTED = 9,
- HASHTABLE_LOOKUP = 10,
- L2_NORMALIZATION = 11,
- L2_POOL_2D = 12,
- LOCAL_RESPONSE_NORMALIZATION = 13,
- LOGISTIC = 14,
- LSH_PROJECTION = 15,
- LSTM = 16,
- MAX_POOL_2D = 17,
- MUL = 18,
- RELU = 19,
- // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
- // since different model developers use RELU1 in different ways. Never
- // create another op called RELU1.
- RELU_N1_TO_1 = 20,
- RELU6 = 21,
- RESHAPE = 22,
- RESIZE_BILINEAR = 23,
- RNN = 24,
- SOFTMAX = 25,
- SPACE_TO_DEPTH = 26,
- SVDF = 27,
- TANH = 28,
- // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
- CONCAT_EMBEDDINGS = 29,
- SKIP_GRAM = 30,
- CALL = 31,
- CUSTOM = 32,
- EMBEDDING_LOOKUP_SPARSE = 33,
- PAD = 34,
- UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- GATHER = 36,
- BATCH_TO_SPACE_ND = 37,
- SPACE_TO_BATCH_ND = 38,
- TRANSPOSE = 39,
- MEAN = 40,
- SUB = 41,
- DIV = 42,
- SQUEEZE = 43,
- UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- STRIDED_SLICE = 45,
- BIDIRECTIONAL_SEQUENCE_RNN = 46,
- EXP = 47,
- TOPK_V2 = 48,
- SPLIT = 49,
- LOG_SOFTMAX = 50,
- // DELEGATE is a special op type for the operations which are delegated to
- // other backends.
- // WARNING: Experimental interface, subject to change
- DELEGATE = 51,
- BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- CAST = 53,
- PRELU = 54,
- MAXIMUM = 55,
- ARG_MAX = 56,
- MINIMUM = 57,
- LESS = 58,
- NEG = 59,
- PADV2 = 60,
- GREATER = 61,
- GREATER_EQUAL = 62,
- LESS_EQUAL = 63,
- SELECT = 64,
- SLICE = 65,
- SIN = 66,
- TRANSPOSE_CONV = 67,
- SPARSE_TO_DENSE = 68,
- TILE = 69,
- EXPAND_DIMS = 70,
- EQUAL = 71,
- NOT_EQUAL = 72,
- LOG = 73,
- SUM = 74,
- SQRT = 75,
- RSQRT = 76,
- SHAPE = 77,
- POW = 78,
- ARG_MIN = 79,
- FAKE_QUANT = 80,
- REDUCE_PROD = 81,
- REDUCE_MAX = 82,
- PACK = 83,
- LOGICAL_OR = 84,
- ONE_HOT = 85,
- LOGICAL_AND = 86,
- LOGICAL_NOT = 87,
- UNPACK = 88,
- REDUCE_MIN = 89,
- FLOOR_DIV = 90,
- REDUCE_ANY = 91,
- SQUARE = 92,
- ZEROS_LIKE = 93,
- FILL = 94,
- FLOOR_MOD = 95,
- RANGE = 96,
- RESIZE_NEAREST_NEIGHBOR = 97,
- LEAKY_RELU = 98,
- SQUARED_DIFFERENCE = 99,
- MIRROR_PAD = 100,
- ABS = 101,
- SPLIT_V = 102,
- UNIQUE = 103,
- CEIL = 104,
- REVERSE_V2 = 105,
- ADD_N = 106,
- GATHER_ND = 107,
- COS = 108,
- WHERE = 109,
- RANK = 110,
- ELU = 111,
- REVERSE_SEQUENCE = 112,
- MATRIX_DIAG = 113,
- QUANTIZE = 114,
- MATRIX_SET_DIAG = 115,
- ROUND = 116,
- HARD_SWISH = 117,
- IF = 118,
- WHILE = 119,
- NON_MAX_SUPPRESSION_V4 = 120,
- NON_MAX_SUPPRESSION_V5 = 121,
- SCATTER_ND = 122,
- SELECT_V2 = 123,
- DENSIFY = 124,
- SEGMENT_SUM = 125,
- BATCH_MATMUL = 126
-}
-
-
-// Options for the builtin operators.
-union BuiltinOptions {
- Conv2DOptions,
- DepthwiseConv2DOptions,
- ConcatEmbeddingsOptions,
- LSHProjectionOptions,
- Pool2DOptions,
- SVDFOptions,
- RNNOptions,
- FullyConnectedOptions,
- SoftmaxOptions,
- ConcatenationOptions,
- AddOptions,
- L2NormOptions,
- LocalResponseNormalizationOptions,
- LSTMOptions,
- ResizeBilinearOptions,
- CallOptions,
- ReshapeOptions,
- SkipGramOptions,
- SpaceToDepthOptions,
- EmbeddingLookupSparseOptions,
- MulOptions,
- PadOptions,
- GatherOptions,
- BatchToSpaceNDOptions,
- SpaceToBatchNDOptions,
- TransposeOptions,
- ReducerOptions,
- SubOptions,
- DivOptions,
- SqueezeOptions,
- SequenceRNNOptions,
- StridedSliceOptions,
- ExpOptions,
- TopKV2Options,
- SplitOptions,
- LogSoftmaxOptions,
- CastOptions,
- DequantizeOptions,
- MaximumMinimumOptions,
- ArgMaxOptions,
- LessOptions,
- NegOptions,
- PadV2Options,
- GreaterOptions,
- GreaterEqualOptions,
- LessEqualOptions,
- SelectOptions,
- SliceOptions,
- TransposeConvOptions,
- SparseToDenseOptions,
- TileOptions,
- ExpandDimsOptions,
- EqualOptions,
- NotEqualOptions,
- ShapeOptions,
- PowOptions,
- ArgMinOptions,
- FakeQuantOptions,
- PackOptions,
- LogicalOrOptions,
- OneHotOptions,
- LogicalAndOptions,
- LogicalNotOptions,
- UnpackOptions,
- FloorDivOptions,
- SquareOptions,
- ZerosLikeOptions,
- FillOptions,
- BidirectionalSequenceLSTMOptions,
- BidirectionalSequenceRNNOptions,
- UnidirectionalSequenceLSTMOptions,
- FloorModOptions,
- RangeOptions,
- ResizeNearestNeighborOptions,
- LeakyReluOptions,
- SquaredDifferenceOptions,
- MirrorPadOptions,
- AbsOptions,
- SplitVOptions,
- UniqueOptions,
- ReverseV2Options,
- AddNOptions,
- GatherNdOptions,
- CosOptions,
- WhereOptions,
- RankOptions,
- ReverseSequenceOptions,
- MatrixDiagOptions,
- QuantizeOptions,
- MatrixSetDiagOptions,
- HardSwishOptions,
- IfOptions,
- WhileOptions,
- DepthToSpaceOptions,
- NonMaxSuppressionV4Options,
- NonMaxSuppressionV5Options,
- ScatterNdOptions,
- SelectV2Options,
- DensifyOptions,
- SegmentSumOptions,
- BatchMatMulOptions
-}
-
-enum Padding : byte { SAME, VALID }
-
-enum ActivationFunctionType : byte {
- NONE = 0,
- RELU = 1,
- RELU_N1_TO_1 = 2,
- RELU6 = 3,
- TANH = 4,
- SIGN_BIT = 5,
-}
-
-table Conv2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- fused_activation_function:ActivationFunctionType;
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table Pool2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- filter_width:int;
- filter_height:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table DepthwiseConv2DOptions {
- // Parameters for DepthwiseConv version 1 or above.
- padding:Padding;
- stride_w:int;
- stride_h:int;
- // `depth_multiplier` is redundant. It's used by CPU kernels in
- // TensorFlow 2.0 or below, but ignored in versions above.
- // See comments in lite/c/builtin_op_data.h for more details.
- depth_multiplier:int;
- fused_activation_function:ActivationFunctionType;
- // Parameters for DepthwiseConv version 2 or above.
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table ConcatEmbeddingsOptions {
- num_channels:int;
- num_columns_per_channel:[int];
- embedding_dim_per_channel:[int]; // This could be inferred from parameters.
-}
-
-enum LSHProjectionType: byte {
- UNKNOWN = 0,
- SPARSE = 1,
- DENSE = 2,
-}
-
-table LSHProjectionOptions {
- type: LSHProjectionType;
-}
-
-table SVDFOptions {
- rank:int;
- fused_activation_function:ActivationFunctionType;
- // For weights-only quantization, use asymmetric quantization for non
- // constant inputs at evaluation time.
- asymmetric_quantize_inputs:bool;
-}
-
-// An implementation of TensorFlow RNNCell.
-table RNNOptions {
- fused_activation_function:ActivationFunctionType;
- asymmetric_quantize_inputs:bool;
-}
-
-// An implementation of TensorFlow dynamic_rnn with RNNCell.
-table SequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
- asymmetric_quantize_inputs:bool;
-}
-
-// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
-table BidirectionalSequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
- merge_outputs: bool;
- asymmetric_quantize_inputs:bool;
-}
-
-enum FullyConnectedOptionsWeightsFormat: byte {
- DEFAULT = 0,
- SHUFFLED4x16INT8 = 1,
-}
-
-// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
-table FullyConnectedOptions {
- // Parameters for FullyConnected version 1 or above.
- fused_activation_function:ActivationFunctionType;
-
- // Parameters for FullyConnected version 2 or above.
- weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
-
- // Parameters for FullyConnected version 5 or above.
- // If set to true, then the number of dimension is preserved. Furthermore,
- // all but the last dimension of the input and output shapes will be equal.
- keep_num_dims: bool;
-
- // Parameters for FullyConnected version 7 or above.
- // If set to true, then weights-only op will use asymmetric quantization for
- // inputs.
- asymmetric_quantize_inputs: bool;
-}
-
-table SoftmaxOptions {
- beta: float;
-}
-
-// An implementation of TensorFlow concat.
-table ConcatenationOptions {
- axis:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table AddOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table MulOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table L2NormOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table LocalResponseNormalizationOptions {
- radius:int;
- bias:float;
- alpha:float;
- beta:float;
-}
-
-enum LSTMKernelType : byte {
- // Full LSTM kernel which supports peephole and projection.
- FULL = 0,
- // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
- BASIC = 1,
-}
-
-// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
-table LSTMOptions {
- // Parameters for LSTM version 1 or above.
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // Parameters for LSTM version 2 or above.
- // Basic kernel is only supported in version 2 or above.
- kernel_type: LSTMKernelType = FULL;
-
- // Parameters for LSTM version 4 or above.
- asymmetric_quantize_inputs: bool;
-}
-
-// An implementation of TensorFlow dynamic_rnn with LSTMCell.
-table UnidirectionalSequenceLSTMOptions {
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true then first dimension is sequence, otherwise batch.
- time_major:bool;
-
- // Parameter for Unidirectional Sequence LSTM version 4.
- asymmetric_quantize_inputs:bool;
-}
-
-table BidirectionalSequenceLSTMOptions {
- // Parameters supported by version 1:
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true, store the outputs of both directions into the first output.
- merge_outputs: bool;
-
- // Parameters supported by version 2:
- // If true then first dimension is sequence, otherwise batch.
- // Version 1 implementations assumed time_major to be true, so this default
- // value should never change.
- time_major: bool = true;
-
- // Parameters for version 3 or above.
- asymmetric_quantize_inputs:bool;
-}
-
-table ResizeBilinearOptions {
- new_height: int (deprecated);
- new_width: int (deprecated);
- align_corners: bool;
- half_pixel_centers: bool;
-}
-
-table ResizeNearestNeighborOptions {
- align_corners: bool;
-}
-
-// A call operation options
-table CallOptions {
- // The subgraph index that needs to be called.
- subgraph:uint;
-}
-
-table PadOptions {
-}
-
-table PadV2Options {
-}
-
-table ReshapeOptions {
- new_shape:[int];
-}
-
-table SpaceToBatchNDOptions {
-}
-
-table BatchToSpaceNDOptions {
-}
-
-table SkipGramOptions {
- ngram_size: int;
- max_skip_size: int;
- include_all_ngrams: bool;
-}
-
-table SpaceToDepthOptions {
- block_size: int;
-}
-
-table DepthToSpaceOptions {
- block_size: int;
-}
-
-table SubOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table DivOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table TopKV2Options {
-}
-
-enum CombinerType : byte {
- SUM = 0,
- MEAN = 1,
- SQRTN = 2,
-}
-
-table EmbeddingLookupSparseOptions {
- combiner:CombinerType;
-}
-
-table GatherOptions {
- axis: int;
-}
-
-table TransposeOptions {
-}
-
-table ExpOptions {
-}
-
-table CosOptions {
-}
-
-table ReducerOptions {
- keep_dims: bool;
-}
-
-table SqueezeOptions {
- squeeze_dims:[int];
-}
-
-table SplitOptions {
- num_splits: int;
-}
-
-table SplitVOptions {
- num_splits: int;
-}
-
-table StridedSliceOptions {
- begin_mask: int;
- end_mask: int;
- ellipsis_mask: int;
- new_axis_mask: int;
- shrink_axis_mask: int;
-}
-
-table LogSoftmaxOptions {
-}
-
-table CastOptions {
- in_data_type: TensorType;
- out_data_type: TensorType;
-}
-
-table DequantizeOptions {
-}
-
-table MaximumMinimumOptions {
-}
-
-table TileOptions {
-}
-
-table ArgMaxOptions {
- output_type : TensorType;
-}
-
-table ArgMinOptions {
- output_type : TensorType;
-}
-
-table GreaterOptions {
-}
-
-table GreaterEqualOptions {
-}
-
-table LessOptions {
-}
-
-table LessEqualOptions {
-}
-
-table NegOptions {
-}
-
-table SelectOptions {
-}
-
-table SliceOptions {
-}
-
-table TransposeConvOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
-}
-
-table ExpandDimsOptions {
-}
-
-table SparseToDenseOptions {
- validate_indices:bool;
-}
-
-table EqualOptions {
-}
-
-table NotEqualOptions {
-}
-
-table ShapeOptions {
- // Optional output type of the operation (int32 or int64). Defaults to int32.
- out_type : TensorType;
-}
-
-table RankOptions {
-}
-
-table PowOptions {
-}
-
-table FakeQuantOptions {
- // Parameters supported by version 1:
- min:float;
- max:float;
- num_bits:int;
-
- // Parameters supported by version 2:
- narrow_range:bool;
-}
-
-table PackOptions {
- values_count:int;
- axis:int;
-}
-
-table LogicalOrOptions {
-}
-
-table OneHotOptions {
- axis:int;
-}
-
-table AbsOptions {
-}
-
-
-table HardSwishOptions {
-}
-
-table LogicalAndOptions {
-}
-
-table LogicalNotOptions {
-}
-
-table UnpackOptions {
- num:int;
- axis:int;
-}
-
-table FloorDivOptions {
-}
-
-table SquareOptions {
-}
-
-table ZerosLikeOptions {
-}
-
-table FillOptions {
-}
-
-table FloorModOptions {
-}
-
-table RangeOptions {
-}
-
-table LeakyReluOptions {
- alpha:float;
-}
-
-table SquaredDifferenceOptions {
-}
-
-enum MirrorPadMode : byte {
- // Doesn't include borders.
- REFLECT = 0,
- // Includes borders.
- SYMMETRIC = 1,
-}
-
-table MirrorPadOptions {
- mode:MirrorPadMode;
-}
-
-table UniqueOptions {
- idx_out_type:TensorType = INT32;
-}
-
-table ReverseV2Options {
-}
-
-table AddNOptions {
-}
-
-table GatherNdOptions {
-}
-
-table WhereOptions {
-}
-
-table ReverseSequenceOptions {
- seq_dim:int;
- batch_dim:int = 0;
-}
-
-table MatrixDiagOptions {
-}
-
-table QuantizeOptions {
-}
-
-table MatrixSetDiagOptions {
-}
-
-table IfOptions {
- then_subgraph_index:int;
- else_subgraph_index:int;
-}
-
-table WhileOptions {
- cond_subgraph_index:int;
- body_subgraph_index:int;
-}
-
-table NonMaxSuppressionV4Options {
-}
-
-table NonMaxSuppressionV5Options {
-}
-
-table ScatterNdOptions {
-}
-
-table SelectV2Options {
-}
-
-table DensifyOptions {
-}
-
-table SegmentSumOptions {
-}
-
-table BatchMatMulOptions {
- adjoint_lhs:bool;
- adjoint_rhs:bool;
-}
-
-// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
-// builtin, or a string if the operator is custom.
-table OperatorCode {
- builtin_code:BuiltinOperator;
- custom_code:string;
-
- // The version of the operator. The version need to be bumped whenever new
- // parameters are introduced into an op.
- version:int = 1;
-}
-
-enum CustomOptionsFormat : byte {
- FLEXBUFFERS = 0,
-}
-
-// An operator takes tensors as inputs and outputs. The type of operation being
-// performed is determined by an index into the list of valid OperatorCodes,
-// while the specifics of each operations is configured using builtin_options
-// or custom_options.
-table Operator {
- // Index into the operator_codes array. Using an integer here avoids
- // complicate map lookups.
- opcode_index:uint;
-
- // Optional input are indicated by -1.
- inputs:[int];
- outputs:[int];
-
- builtin_options:BuiltinOptions;
- custom_options:[ubyte];
- custom_options_format:CustomOptionsFormat;
-
- // A list of booleans indicating the input tensors which are being mutated by
- // this operator.(e.g. used by RNN and LSTM).
- // For example, if the "inputs" array refers to 5 tensors and the second and
- // fifth are mutable variables, then this list will contain
- // [false, true, false, false, true].
- //
- // If the list is empty, no variable is mutated in this operator.
- // The list either has the same length as `inputs`, or is empty.
- mutating_variable_inputs:[bool];
-
- // A list of indices to the subgraph's "tensors" that are internal to an Op.
- // Internal tensors are those that do not flow in or out of the operation,
- // but instead are part of internal computation. As such, the operation's
- // implementation may manage its memory more efficiently. They are needed
- // however (i.e. not just an implementation detail) since they are part of the
- // computation, which may require relevant metadata such as quantization
- // parameters.
- intermediates:[int];
-}
-
-// The root type, defining a subgraph, which typically represents an entire
-// model.
-table SubGraph {
- // A list of all tensors used in this subgraph.
- tensors:[Tensor];
-
- // Indices of the tensors that are inputs into this subgraph. Note this is
- // the list of non-static tensors that feed into the subgraph for inference.
- inputs:[int];
-
- // Indices of the tensors that are outputs out of this subgraph. Note this is
- // the list of output tensors that are considered the product of the
- // subgraph's inference.
- outputs:[int];
-
- // All operators, in execution order.
- operators:[Operator];
-
- // Name of this subgraph (used for debugging).
- name:string;
-}
-
-// Table of raw data buffers (used for constant tensors). Referenced by tensors
-// by index. The generous alignment accommodates mmap-friendly data structures.
-table Buffer {
- data:[ubyte] (force_align: 16);
-}
-
-table Metadata {
- // A human readable string to uniquely identify a Metadata.
- name:string;
- // An index to the buffers table.
- buffer:uint;
-}
-
-table Model {
- // Version of the schema.
- version:uint;
-
- // A list of all operator codes used in this model. This is
- // kept in order because operators carry an index into this
- // vector.
- operator_codes:[OperatorCode];
-
- // All the subgraphs of the model. The 0th is assumed to be the main
- // model.
- subgraphs:[SubGraph];
-
- // A description of the model.
- description:string;
-
- // Buffers of the model.
- // Note the 0th entry of this array must be an empty buffer (sentinel).
- // This is a convention so that tensors without a buffer can provide 0 as
- // their buffer.
- buffers:[Buffer];
-
- // Metadata about the model. Indirects into the existings buffers list.
- // Deprecated, prefer to use metadata field.
- metadata_buffer:[int];
-
- // Metadata about the model.
- metadata:[Metadata];
-}
-
-root_type Model;
diff --git a/runtime/onert/sample/CMakeLists.txt b/runtime/onert/sample/CMakeLists.txt
deleted file mode 100644
index d853ba634..000000000
--- a/runtime/onert/sample/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectory(minimal)
diff --git a/runtime/onert/sample/minimal/CMakeLists.txt b/runtime/onert/sample/minimal/CMakeLists.txt
deleted file mode 100644
index e54223e3b..000000000
--- a/runtime/onert/sample/minimal/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-if(NOT BUILD_MINIMAL_SAMPLE)
- return()
-endif(NOT BUILD_MINIMAL_SAMPLE)
-
-list(APPEND MINIMAL_SRCS "src/minimal.cc")
-
-add_executable(onert-minimal-app ${MINIMAL_SRCS})
-target_link_libraries(onert-minimal-app nnfw-dev pthread dl)
-
-install(TARGETS onert-minimal-app DESTINATION bin)
diff --git a/runtime/onert/sample/minimal/README.md b/runtime/onert/sample/minimal/README.md
deleted file mode 100644
index fecad6fb2..000000000
--- a/runtime/onert/sample/minimal/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# minimal
-
-`minimal` is a simple driver to run `nnpackage` with nnfw API.
-
-It takes `nnpackage` as input. It uses **nnfwAPI** internally.
-
-It assumes model of float32 tensor type as an input.
-
-## Usage
-
-```
-$ ./minimal path_to_nnpackage_directory
-```
diff --git a/runtime/onert/sample/minimal/src/minimal.cc b/runtime/onert/sample/minimal/src/minimal.cc
deleted file mode 100644
index 0436b9368..000000000
--- a/runtime/onert/sample/minimal/src/minimal.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "nnfw.h"
-#include <vector>
-#include <iostream>
-
-uint64_t num_elems(const nnfw_tensorinfo *ti)
-{
- uint64_t n = 1;
- for (uint32_t i = 0; i < ti->rank; ++i)
- {
- n *= ti->dims[i];
- }
- return n;
-}
-
-int main(const int argc, char **argv)
-{
- nnfw_session *session = nullptr;
- nnfw_create_session(&session);
-
- // Loading nnpackage
- nnfw_load_model_from_file(session, argv[1]);
-
- // Use acl_neon backend for CONV_2D and acl_cl for otherwise.
- // Note that defalut backend is acl_cl
- nnfw_set_op_backend(session, "CONV_2D", "acl_neon");
-
- // Compile model
- nnfw_prepare(session);
-
- // Prepare input. Here we just allocate dummy input arrays.
- std::vector<float> input;
- nnfw_tensorinfo ti;
- nnfw_input_tensorinfo(session, 0, &ti); // get first input's info
- uint32_t input_elements = num_elems(&ti);
- input.resize(input_elements);
- // TODO: Please add initialization for your input.
- nnfw_set_input(session, 0, ti.dtype, input.data(), sizeof(float) * input_elements);
-
- // Prepare output
- std::vector<float> output;
- nnfw_output_tensorinfo(session, 0, &ti); // get first output's info
- uint32_t output_elements = num_elems(&ti);
- output.resize(output_elements);
- nnfw_set_output(session, 0, ti.dtype, output.data(), sizeof(float) * output_elements);
-
- // Do inference
- nnfw_run(session);
-
- // TODO: Please print or compare the output value in your way.
-
- nnfw_close_session(session);
-
- std::cout << "nnpackage " << argv[1] << " runs successfully." << std::endl;
- return 0;
-}
diff --git a/runtime/onert/test/CMakeLists.txt b/runtime/onert/test/CMakeLists.txt
deleted file mode 100644
index 38899976d..000000000
--- a/runtime/onert/test/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-set(TEST_ONERT test_onert)
-
-file(GLOB_RECURSE TESTS "*.cc")
-
-add_executable(${TEST_ONERT} ${TESTS})
-
-target_include_directories(${TEST_ONERT} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../core/src)
-
-target_link_libraries(${TEST_ONERT} onert_core)
-target_link_libraries(${TEST_ONERT} gtest)
-target_link_libraries(${TEST_ONERT} gtest_main)
-target_link_libraries(${TEST_ONERT} ${LIB_PTHREAD} dl)
-add_test(${TEST_ONERT} ${TEST_ONERT})
-
-install(TARGETS ${TEST_ONERT} DESTINATION unittest_standalone)
diff --git a/runtime/onert/test/core/compiler/Scheduler.cc b/runtime/onert/test/core/compiler/Scheduler.cc
deleted file mode 100644
index 50f3964db..000000000
--- a/runtime/onert/test/core/compiler/Scheduler.cc
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <compiler/HEScheduler.h>
-#include <exec/ExecTime.h>
-
-#include <ir/Shape.h>
-#include <ir/InternalType.h>
-#include <ir/TypeInfo.h>
-#include <ir/DataType.h>
-
-#include <ir/operation/BinaryArithmetic.h>
-#include <ir/operation/FullyConnected.h>
-
-#include <gtest/gtest.h>
-
-namespace
-{
-using namespace onert;
-using namespace ir;
-using namespace backend;
-using namespace operation;
-using namespace exec;
-
-//
-// Mock backends classes
-//
-
-struct MockConfigCPU : public IConfig
-{
- std::string id() override { return "cpu"; }
- bool initialize() override { return true; };
- bool supportPermutation() override { return false; }
- Layout supportLayout(const Operation &, Layout) override { return Layout::UNKNOWN; }
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return false; }
-};
-
-struct MockBackendCPU : public Backend
-{
- std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigCPU>(); }
- std::unique_ptr<BackendContext>
- newContext(const Graph &, const std::shared_ptr<custom::IKernelBuilder> &, bool) const override
- {
- return std::unique_ptr<BackendContext>(
- new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
- }
-};
-
-struct MockConfigGPU : public IConfig
-{
- std::string id() override { return "gpu"; }
- bool initialize() override { return true; };
- bool supportPermutation() override { return false; }
- ir::Layout supportLayout(const ir::Operation &, ir::Layout) override
- {
- return ir::Layout::UNKNOWN;
- }
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return false; }
-};
-
-struct MockBackendGPU : public Backend
-{
- std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigGPU>(); }
- std::unique_ptr<BackendContext>
- newContext(const Graph &, const std::shared_ptr<custom::IKernelBuilder> &, bool) const override
- {
- return std::unique_ptr<BackendContext>(
- new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
- }
-};
-
-struct MockConfigNPU : public IConfig
-{
- std::string id() override { return "npu"; }
- bool initialize() override { return true; };
- bool supportPermutation() override { return false; }
- ir::Layout supportLayout(const ir::Operation &, ir::Layout) override
- {
- return ir::Layout::UNKNOWN;
- }
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return false; }
-};
-
-struct MockBackendNPU : public Backend
-{
- std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigNPU>(); }
- std::unique_ptr<BackendContext>
- newContext(const Graph &, const std::shared_ptr<custom::IKernelBuilder> &, bool) const override
- {
- return std::unique_ptr<BackendContext>(
- new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
- }
-};
-
-//
-// Constants
-//
-
-const int OPERAND_ELEMS = 268203;
-const int OPERAND_SIZE = OPERAND_ELEMS * 4;
-const int OPERATION_SIZE = OPERAND_SIZE * 3;
-
-const std::string LINEAR("Linear");
-const std::string DATAFLOW("Dataflow");
-const std::string PARALLEL("Parallel");
-
-//
-// Helper functions
-//
-
-// Set executor through environment variable
-void setExecutor(const std::string &executor) { setenv("EXECUTOR", executor.c_str(), true); }
-
-// Set profiling mode through environment variable
-void setProfilingMode(const bool value) { setenv("PROFILING_MODE", value ? "1" : "0", true); }
-
-// Calculate operation size by addition sizes of all input and output operands
-uint32_t calcOpSize(const std::shared_ptr<Graph> &graph, const OperationIndex &op_idx)
-{
- uint32_t size = 0;
- const auto &op = graph->operations().at(op_idx);
- for (const auto &ind : op.getInputs() + op.getOutputs())
- size += graph->operands().at(ind).info().total_size();
- return size;
-}
-
-// Set execution operation time. This method is needed since ExecutionTime has only
-// 'updateOperationExecTime' method.
-void setOperationExecTime(ExecTime &et, const Backend *backend, const std::string &operation,
- bool quant, uint32_t op_size, int64_t time)
-{
- // You shouldn't set negative time with this method since nnfw JSON deserializer can't read it
- assert(time > 0);
- int64_t prev_time = et.getOperationExecTime(backend, operation, quant, op_size);
- int64_t time_to_set = prev_time == ExecTime::NOT_FOUND ? time : 2 * time - prev_time;
- et.updateOperationExecTime(backend, operation, quant, op_size, time_to_set);
- assert(et.getOperationExecTime(backend, operation, quant, op_size) == time);
-}
-
-// Set same execution time for all given backends/operations
-void setOperationsExecutionTime(const std::vector<const Backend *> &backends,
- const std::vector<std::string> &op_names,
- const std::vector<uint32_t> &op_sizes, int64_t exec_time)
-{
- assert(op_names.size() == op_sizes.size());
- ExecTime et(backends);
- for (int i = 0; i < op_names.size(); ++i)
- {
- for (auto &backend : backends)
- setOperationExecTime(et, backend, op_names[i], false, op_sizes[i], exec_time);
- }
- et.uploadOperationsExecTime();
-}
-
-// Set permute time from one backend to another. This method is needed since ExecutionTime has only
-// 'updatePermuteTime' method.
-void setPermutationTime(ExecTime &et, const Backend *from_backend, const Backend *to_backend,
- bool quant, uint32_t op_size, int64_t time)
-{
- // You shouldn't set negative time with this method since nnfw JSON deserializer can't read it
- assert(time > 0);
- int64_t prev_time = et.getPermuteTime(from_backend, to_backend, quant, op_size);
- int64_t time_to_set = prev_time == ExecTime::NOT_FOUND ? time : 2 * time - prev_time;
- et.updatePermuteTime(from_backend, to_backend, quant, op_size, time_to_set);
- assert(et.getPermuteTime(from_backend, to_backend, quant, op_size) == time);
-}
-
-// Set same permutation time between all given backends
-void setPermutationsExecutionTime(const std::vector<const Backend *> &backends,
- const int operand_size, const int64_t exec_time)
-{
- ExecTime et(backends);
- for (const auto &backend : backends)
- {
- for (auto &other_backend : backends)
- {
- if (backend == other_backend)
- continue;
- setPermutationTime(et, backend, other_backend, false, operand_size, exec_time);
- }
- }
- et.uploadOperationsExecTime();
-}
-
-//
-// Functions for creating graphs
-//
-
-using OIS = OperandIndexSequence;
-
-template <typename NodeT, typename... Types>
-OperationIndex create(std::shared_ptr<Graph> graph, Types &&... args)
-{
- auto op = std::make_unique<NodeT>(std::forward<Types>(args)...);
- auto op_idx = graph->addOperation(std::move(op));
- // For now in scheduler test all operations in tested graphs has same size (for simplicity)
- assert(calcOpSize(graph, op_idx) == OPERATION_SIZE);
- return op_idx;
-}
-
-// Create straight graph: Add->Sub->Mul
-std::shared_ptr<Graph> createStraightGraph()
-{
- auto graph = std::make_shared<Graph>();
- const TypeInfo float_op(DataType::FLOAT32);
-
- // Create add node
- auto add_lhs_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto add_rhs_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto add_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param add_op_params{BinaryArithmetic::ArithmeticType::ADD, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{add_lhs_idx, add_rhs_idx}, OIS{add_out_idx}, add_op_params);
-
- // Create sub node
- auto sub_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto sub_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param sub_op_params{BinaryArithmetic::ArithmeticType::SUB, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{add_out_idx, sub_const_idx}, OIS{sub_out_idx}, sub_op_params);
-
- // Create mul node
- auto mul_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto mul_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param mul_op_params{BinaryArithmetic::ArithmeticType::MUL, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{sub_out_idx, mul_const_idx}, OIS{mul_out_idx}, mul_op_params);
-
- graph->finishBuilding();
- return graph;
-}
-
-/* Create branched graph:
- * [Add]
- * // \\
- * [Mul1] [FC2]
- * || ||
- * [Mul2] [FC2]
- * \\ //
- * [Sub]
- */
-std::shared_ptr<Graph> createBranchedGraph()
-{
- auto graph = std::make_shared<Graph>();
- const TypeInfo float_op(DataType::FLOAT32);
-
- // Create add node
- auto add_lhs_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto add_rhs_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto add_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param add_op_params{BinaryArithmetic::ArithmeticType::ADD, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{add_lhs_idx, add_rhs_idx}, OIS{add_out_idx}, add_op_params);
-
- // Create mul1 node
- auto mul1_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto mul1_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param mul1_op_params{BinaryArithmetic::ArithmeticType::MUL, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{add_out_idx, mul1_const_idx}, OIS{mul1_out_idx},
- mul1_op_params);
-
- // Create mul2 node
- auto mul2_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto mul2_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param mul2_op_params{BinaryArithmetic::ArithmeticType::MUL, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{mul1_out_idx, mul2_const_idx}, OIS{mul2_out_idx},
- mul2_op_params);
-
- // Create fc1 node
- auto fc1_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto fc1_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- FullyConnected::Param fc1_op_params{Activation::NONE};
- create<FullyConnected>(graph, OIS{add_out_idx, fc1_const_idx}, OIS{fc1_out_idx}, fc1_op_params);
-
- // Create fc2 node
- auto fc2_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- auto fc2_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- FullyConnected::Param fc2_op_params{Activation::NONE};
- create<FullyConnected>(graph, OIS{fc1_out_idx, fc2_const_idx}, OIS{fc2_out_idx}, fc2_op_params);
-
- // Create sub node
- auto sub_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op);
- BinaryArithmetic::Param sub_op_params{BinaryArithmetic::ArithmeticType::SUB, Activation::NONE};
- create<BinaryArithmetic>(graph, OIS{mul2_out_idx, fc2_out_idx}, OIS{sub_out_idx}, sub_op_params);
-
- graph->finishBuilding();
- return graph;
-}
-
-//
-// Tests setup/teardown
-//
-
-// SetUp/TearDown methods runs before/after each test and performs actions common for each test
-class SchedulerTest : public ::testing::Test
-{
-protected:
- void SetUp() override
- {
- // Initialize mock backends
- _cpu_backend = new MockBackendCPU();
- _gpu_backend = new MockBackendGPU();
- _npu_backend = new MockBackendNPU();
- _mock_backends = {_cpu_backend, _gpu_backend, _npu_backend};
-
- // Remove previous profile data if it exists
- if (!remove("exec_time.json"))
- {
- // DO NOTHING (no profile data)
- }
-
- // Remember original value of 'EXECUTOR' environment variable
- char *executor = std::getenv("EXECUTOR");
- _original_executor = executor == nullptr ? "" : executor;
-
- // Remember original value of 'PROFILING_MODE' environment variable
- char *profiling_mode = std::getenv("PROFILING_MODE");
- _original_profiling_mode = profiling_mode == nullptr ? "" : profiling_mode;
- }
-
- void TearDown() override
- {
- delete _cpu_backend;
- delete _gpu_backend;
- delete _npu_backend;
- EXPECT_EQ(remove("exec_time.json"), 0);
- setenv("EXECUTOR", _original_executor.c_str(), true);
- setenv("PROFILING_MODE", _original_profiling_mode.c_str(), true);
- }
-
- backend::BackendContexts buildBackendContexts(const Graph &graph)
- {
- backend::BackendContexts contexts;
- for (auto backend : _mock_backends)
- {
- contexts.emplace(backend, backend->newContext(graph, nullptr, false));
- }
- return contexts;
- }
-
- const MockBackendCPU *_cpu_backend{nullptr};
- const MockBackendGPU *_gpu_backend{nullptr};
- const MockBackendNPU *_npu_backend{nullptr};
- std::vector<const Backend *> _mock_backends;
-
- std::string _original_executor;
- std::string _original_profiling_mode;
-};
-
-class SchedulerTestWithExecutorParam : public SchedulerTest,
- public testing::WithParamInterface<std::string>
-{
-};
-
-//
-// HEScheduler tests
-//
-
-// Test scheduler behavior for straight graph with known execution time of all nodes and permutes.
-TEST_P(SchedulerTestWithExecutorParam, straight_graph_known_exec_time)
-{
- setExecutor(GetParam());
-
- // Prepare graph
- ir::Subgraphs subgs;
- auto graph(createStraightGraph());
- subgs.push(ir::SubgraphIndex{0}, graph);
- OperationIndex add_op_idx(0), sub_op_idx(1), mul_op_idx(2);
-
- // Set default execution and transfer time
- setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 1);
- setOperationsExecutionTime(_mock_backends, {"Add", "Sub", "Mul"},
- {OPERATION_SIZE, OPERATION_SIZE, OPERATION_SIZE}, 1e4);
-
- // Test 1
- // Expected behaviour: scheduler assigns different backend to each node
- {
- // For each backend reduce execution time of one node
- ExecTime et(_mock_backends);
- setOperationExecTime(et, _cpu_backend, "Add", false, OPERATION_SIZE, 1);
- setOperationExecTime(et, _gpu_backend, "Sub", false, OPERATION_SIZE, 1);
- setOperationExecTime(et, _npu_backend, "Mul", false, OPERATION_SIZE, 1);
- et.uploadOperationsExecTime();
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
- ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "cpu");
- ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "gpu");
- ASSERT_EQ(br->getBackend(mul_op_idx)->config()->id(), "npu");
- }
-
- // Test 2
- // Expected behaviour: scheduler assigns single backend to all nodes because of big transfer time
- {
- // Increase transfer time
- setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 1e5);
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
- ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "cpu");
- ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "cpu");
- ASSERT_EQ(br->getBackend(mul_op_idx)->config()->id(), "cpu");
- }
-}
-
-// Test scheduler behavior for branched graph with known execution time of all nodes and permutes
-TEST_P(SchedulerTestWithExecutorParam, branched_graph_known_exec_time)
-{
- const int64_t NPU_ET = 5000;
- setExecutor(GetParam());
-
- // Prepare graph
- ir::Subgraphs subgs;
- auto graph(createBranchedGraph());
- subgs.push(ir::SubgraphIndex{0}, graph);
- OperationIndex add_op_idx(0), mul1_op_idx(1), mul2_op_idx(2), fc1_op_idx(3), fc2_op_idx(4),
- sub_op_idx(5);
-
- // Set default execution and transfer time
- setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 1000);
- setOperationsExecutionTime(_mock_backends, {"Add", "Sub", "Mul", "FullyConnected"},
- {OPERATION_SIZE, OPERATION_SIZE, OPERATION_SIZE, OPERATION_SIZE}, 1e4);
-
- // Test 1
- // Expected behaviour: for dataflow and linear executors scheduler assigns fastest backend to all
- // nodes, in case of parallel executor scheduler assigns different backends to branches.
- {
- // Reduce execution time
- ExecTime et(_mock_backends);
- setOperationExecTime(et, _npu_backend, "Add", false, OPERATION_SIZE, NPU_ET);
- setOperationExecTime(et, _npu_backend, "Mul", false, OPERATION_SIZE, NPU_ET);
- setOperationExecTime(et, _npu_backend, "Sub", false, OPERATION_SIZE, NPU_ET);
- setOperationExecTime(et, _npu_backend, "FullyConnected", false, OPERATION_SIZE, NPU_ET);
- setOperationExecTime(et, _gpu_backend, "Mul", false, OPERATION_SIZE, NPU_ET + 1000);
- setOperationExecTime(et, _gpu_backend, "FullyConnected", false, OPERATION_SIZE, NPU_ET + 1000);
- et.uploadOperationsExecTime();
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
-
- std::string branch1_expected_backend("npu"), branch2_expected_backend("npu");
- if (GetParam() == PARALLEL)
- {
- branch1_expected_backend =
- br->getBackend(mul1_op_idx)->config()->id() == "npu" ? "npu" : "gpu";
- branch2_expected_backend = branch1_expected_backend == "npu" ? "gpu" : "npu";
- }
-
- ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(mul1_op_idx)->config()->id(), branch1_expected_backend);
- ASSERT_EQ(br->getBackend(mul2_op_idx)->config()->id(), branch1_expected_backend);
- ASSERT_EQ(br->getBackend(fc1_op_idx)->config()->id(), branch2_expected_backend);
- ASSERT_EQ(br->getBackend(fc2_op_idx)->config()->id(), branch2_expected_backend);
- ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "npu");
- }
-
- // Test 2
- // Expected behaviour: scheduler assigns single backend to all nodes
- {
- // Increase execution time for GPU backend
- ExecTime et(_mock_backends);
- /* for parallel executor: set a time, that is larger than sum_of_other_branches_nodes_cnt *
- * npu_exec_time so that npu is prefered: the ith branch will wait for npu until it finishes the
- * [0;i-1] branches nodes in DFS order. In each branch it goes deep intul doesn't encounter
- * branching or scheduler assigns another backend to a node*/
- setOperationExecTime(et, _gpu_backend, "Mul", false, OPERATION_SIZE, NPU_ET * 3 + 1);
- setOperationExecTime(et, _gpu_backend, "FullyConnected", false, OPERATION_SIZE, NPU_ET * 3 + 1);
- et.uploadOperationsExecTime();
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
- ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(mul1_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(mul2_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(fc1_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(fc2_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "npu");
- }
-}
-
-// SchedulerTestWithExecutorParam tests are parameterized with executor name and runs three times -
-// one time for each executor
-INSTANTIATE_TEST_CASE_P(AllExecutors, SchedulerTestWithExecutorParam,
- testing::Values(LINEAR, DATAFLOW, PARALLEL));
-
-// Test scheduler behavior for branched graph and enabled profiling mode
-TEST_F(SchedulerTest, branched_graph_profiling_mode)
-{
- const int ET = 1e5;
-
- // Turn on profiling mode
- setProfilingMode(true);
- setExecutor(DATAFLOW);
-
- // Prepare graph
- ir::Subgraphs subgs;
- auto graph(createBranchedGraph());
- subgs.push(ir::SubgraphIndex{0}, graph);
- OperationIndex add_op_idx(0), mul1_op_idx(1), mul2_op_idx(2), fc1_op_idx(3), fc2_op_idx(4),
- sub_op_idx(5);
-
- // Test 1
- // Expected behaviour: scheduler assigns backends to nodes with unknown execution time
- {
- // Set execution time for all backends/nodes except for cpu/Sub, npu/Mul, gpu/FC
- ExecTime et(_mock_backends);
- setOperationExecTime(et, _cpu_backend, "Add", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _cpu_backend, "Mul", false, OPERATION_SIZE, ET + 1);
- setOperationExecTime(et, _cpu_backend, "FullyConnected", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _npu_backend, "Add", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _npu_backend, "FullyConnected", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _npu_backend, "Sub", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _gpu_backend, "Add", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _gpu_backend, "Mul", false, OPERATION_SIZE, ET + 1);
- setOperationExecTime(et, _gpu_backend, "Sub", false, OPERATION_SIZE, ET);
- et.uploadOperationsExecTime();
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
- ASSERT_EQ(br->getBackend(mul1_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(mul2_op_idx)->config()->id(), "npu");
- ASSERT_EQ(br->getBackend(fc1_op_idx)->config()->id(), "gpu");
- ASSERT_EQ(br->getBackend(fc2_op_idx)->config()->id(), "gpu");
- ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "cpu");
- }
-
- // Test 2
- // Expected behaviour: scheduler shuffling backends, so different backends are assigned to
- // neighbor nodes
- {
- // Set execution time for rest backends/nodes (cpu/Sub, npu/Mul, gpu/FC)
- ExecTime et(_mock_backends);
- setOperationExecTime(et, _cpu_backend, "Sub", false, OPERATION_SIZE, ET);
- setOperationExecTime(et, _npu_backend, "Mul", false, OPERATION_SIZE, ET + 1);
- setOperationExecTime(et, _gpu_backend, "FullyConnected", false, OPERATION_SIZE, ET);
- et.uploadOperationsExecTime();
-
- // Test scheduler
- auto backend_contexts = buildBackendContexts(*graph);
- auto scheduler = compiler::HEScheduler(backend_contexts,
- compiler::fetchCompilerOptionsFromGlobalConfig(subgs));
- const auto br = scheduler.schedule(*graph);
- ASSERT_NE(br->getBackend(add_op_idx)->config()->id(),
- br->getBackend(mul1_op_idx)->config()->id());
- ASSERT_NE(br->getBackend(add_op_idx)->config()->id(),
- br->getBackend(fc1_op_idx)->config()->id());
- ASSERT_NE(br->getBackend(mul1_op_idx)->config()->id(),
- br->getBackend(mul2_op_idx)->config()->id());
- ASSERT_NE(br->getBackend(fc1_op_idx)->config()->id(),
- br->getBackend(fc2_op_idx)->config()->id());
- ASSERT_NE(br->getBackend(mul2_op_idx)->config()->id(),
- br->getBackend(sub_op_idx)->config()->id());
- ASSERT_NE(br->getBackend(fc2_op_idx)->config()->id(),
- br->getBackend(sub_op_idx)->config()->id());
- }
-}
-
-// TODO: Add tests with unknown execution and permutation time
-
-} // unnamed namespace
diff --git a/runtime/onert/test/core/exec/ExecInstance.cc b/runtime/onert/test/core/exec/ExecInstance.cc
deleted file mode 100644
index 806b47ecc..000000000
--- a/runtime/onert/test/core/exec/ExecInstance.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-#include <thread>
-
-#include "ir/Graph.h"
-#include "compiler/Compiler.h"
-#include "exec/Execution.h"
-#include "ir/operation/BinaryArithmetic.h"
-
-namespace
-{
-
-using namespace onert::ir;
-
-class CompiledMockUpModel
-{
-public:
- CompiledMockUpModel()
- {
- // Model: two elementwise add operation
- // model input: lhs, rhs1
- // model output: second add result (result2)
- // constant: rhs2
- // result1 <= (lhs + rhs)
- // result2 <= (result1 + rhs2)
- // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1}
- // activation: none (constant)
- graph = std::make_shared<Graph>();
- // 1st add operands (result1 <= lhs + rhs1)
- Shape shape{1, 2, 2, 1};
- TypeInfo type{DataType::FLOAT32};
- static float rhs2_data[4] = {3, 1, -1, 5};
- auto operand_lhs = graph->addOperand(shape, type);
- auto operand_rhs1 = graph->addOperand(shape, type);
- auto operand_result1 = graph->addOperand(shape, type);
- auto operand_rhs2 = graph->addOperand(shape, type);
- auto operand_result2 = graph->addOperand(shape, type);
- graph->operands()
- .at(operand_rhs2)
- .data(std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data), 16));
- // 2nd add operations (result2 <= result1 + rhs2)
- operation::BinaryArithmetic::Param param1;
- param1.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param1.activation = Activation::NONE;
- auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
- auto output_set1 = OperandIndexSequence{operand_result1};
- graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set1, output_set1, param1));
- operation::BinaryArithmetic::Param param2;
- param2.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param2.activation = Activation::NONE;
- auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
- auto output_set2 = OperandIndexSequence{operand_result2};
- graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set2, output_set2, param2));
- // Identify model inputs and outputs
- graph->addInput(operand_lhs);
- graph->addInput(operand_rhs1);
- graph->addOutput(operand_result2);
- graph->finishBuilding();
-
- // Compile
- auto subgs = std::make_shared<onert::ir::Subgraphs>();
- subgs->push(onert::ir::SubgraphIndex{0}, graph);
- onert::compiler::Compiler compiler{subgs};
- executors = compiler.compile();
- }
-
-public:
- std::shared_ptr<Graph> graph;
- std::shared_ptr<onert::exec::ExecutorMap> executors;
-};
-
-TEST(ExecInstance, simple)
-{
- auto mockup = CompiledMockUpModel();
- auto graph = mockup.graph;
- auto executors = mockup.executors;
-
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto output = IOIndex{0};
-
- const float input1_buffer[4] = {1, 0, -1, -2};
- const float input2_buffer[4] = {1, -3, 2, -4};
- float output_buffer[4] = {};
- const float output_expected[4] = {5, -2, 0, -1};
-
- onert::exec::Execution execution{executors};
-
- execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
- execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
- execution.setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
- execution.execute();
-
- for (auto i = 0; i < 4; i++)
- {
- EXPECT_EQ(output_buffer[i], output_expected[i]);
- }
-}
-
-TEST(ExecInstance, twoCompile)
-{
- auto mockup = CompiledMockUpModel();
- auto graph = mockup.graph;
- auto executors1 = mockup.executors;
- onert::exec::Execution execution1{executors1};
-
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto output = IOIndex{0};
-
- const float exe1_input1_buffer[4] = {1, 0, -1, -2};
- const float exe1_input2_buffer[4] = {1, -3, 2, -4};
- float exe1_output_buffer[4] = {};
- const float exe1_output_expected[4] = {5, -2, 0, -1};
-
- execution1.setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
- execution1.setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
- execution1.setOutput(output, reinterpret_cast<void *>(exe1_output_buffer), 16);
-
- // Make new executor: compile again
- auto subgs = std::make_shared<onert::ir::Subgraphs>();
- subgs->push(onert::ir::SubgraphIndex{0}, graph);
- onert::compiler::Compiler compiler{subgs};
- std::shared_ptr<onert::exec::ExecutorMap> executors2 = compiler.compile();
- onert::exec::Execution execution2{executors2};
-
- const float exe2_input1_buffer[4] = {2, 1, -2, 0};
- const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
- float exe2_output_buffer[4] = {};
- const float exe2_output_expected[4] = {2, 5, -2, 7};
-
- execution2.setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
- execution2.setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
- execution2.setOutput(output, reinterpret_cast<void *>(exe2_output_buffer), 16);
-
- execution1.execute();
- execution2.execute();
-
- for (auto i = 0; i < 4; i++)
- {
- EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
- EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
- }
-}
-
-// Support two initialized execution instance then ordered execution
-TEST(ExecInstance, twoExecution)
-{
- auto mockup = CompiledMockUpModel();
- auto executors = mockup.executors;
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto output1 = IOIndex{0};
-
- const float exe1_input1_buffer[4] = {1, 0, -1, -2};
- const float exe1_input2_buffer[4] = {1, -3, 2, -4};
- float exe1_output_buffer[4] = {};
- const float exe1_output_expected[4] = {5, -2, 0, -1};
- const float exe2_output_expected[4] = {2, 5, -2, 7};
-
- onert::exec::Execution execution1{executors};
- execution1.setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
- execution1.setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
- execution1.setOutput(output1, reinterpret_cast<void *>(exe1_output_buffer), 16);
-
- const float exe2_input1_buffer[4] = {2, 1, -2, 0};
- const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
- float exe2_output_buffer[4] = {};
-
- // Make new execution
- onert::exec::Execution execution2{executors};
- execution2.setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
- execution2.setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
- execution2.setOutput(output1, reinterpret_cast<void *>(exe2_output_buffer), 16);
-
- execution1.execute();
- execution2.execute();
-
- for (auto i = 0; i < 4; i++)
- {
- EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
- EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
- }
-}
-
-class Inference
-{
-public:
- Inference(const float (&input1)[4], const float (&input2)[4], float (&output)[4],
- std::shared_ptr<onert::exec::ExecutorMap> &executors)
- : _input1{input1}, _input2{input2}, _output{output}, _executors{executors}
- {
- // DO NOTHING
- }
-
- void inference(void)
- {
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto output1 = IOIndex{0};
-
- onert::exec::Execution execution{_executors};
- execution.setInput(input1, reinterpret_cast<const void *>(_input1), 16);
- execution.setInput(input2, reinterpret_cast<const void *>(_input2), 16);
- execution.setOutput(output1, reinterpret_cast<void *>(_output), 16);
-
- execution.execute();
- }
-
-private:
- const float (&_input1)[4];
- const float (&_input2)[4];
- float (&_output)[4];
- std::shared_ptr<onert::exec::ExecutorMap> &_executors;
-};
-
-// Support multi-thread execution
-TEST(ExecInstance, twoThreads)
-{
- auto mockup = CompiledMockUpModel();
- auto executors = mockup.executors;
-
- const float exe1_input1_buffer[4] = {1, 0, -1, -2};
- const float exe1_input2_buffer[4] = {1, -3, 2, -4};
- float exe1_output_buffer[4] = {};
- const float exe1_output_expected[4] = {5, -2, 0, -1};
-
- Inference execution1{exe1_input1_buffer, exe1_input2_buffer, exe1_output_buffer, executors};
-
- const float exe2_input1_buffer[4] = {2, 1, -2, 0};
- const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
- float exe2_output_buffer[4] = {};
- const float exe2_output_expected[4] = {2, 5, -2, 7};
-
- Inference execution2{exe2_input1_buffer, exe2_input2_buffer, exe2_output_buffer, executors};
-
- std::thread t1{&Inference::inference, &execution1};
- std::thread t2{&Inference::inference, &execution2};
-
- t1.join();
- t2.join();
-
- for (auto i = 0; i < 4; i++)
- {
- EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
- EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
- }
-}
-
-// Support asynchronous execution
-TEST(ExecInstance, async)
-{
- auto mockup = CompiledMockUpModel();
- auto graph = mockup.graph;
- auto executors = mockup.executors;
-
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto output = IOIndex{0};
-
- const float input1_buffer[4] = {1, 0, -1, -2};
- const float input2_buffer[4] = {1, -3, 2, -4};
- float output_buffer[4] = {};
- const float output_expected[4] = {5, -2, 0, -1};
-
- onert::exec::Execution execution{executors};
-
- execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
- execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
- execution.setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
- execution.startExecute();
- execution.waitFinish();
-
- for (auto i = 0; i < 4; i++)
- {
- EXPECT_EQ(output_buffer[i], output_expected[i]);
- }
-}
-
-} // namespace
diff --git a/runtime/onert/test/core/exec/ExecTime.test.cc b/runtime/onert/test/core/exec/ExecTime.test.cc
deleted file mode 100644
index 8c2e34df8..000000000
--- a/runtime/onert/test/core/exec/ExecTime.test.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exec/ExecTime.h"
-#include "backend/IConfig.h"
-#include "backend/Backend.h"
-#include <gtest/gtest.h>
-#include <string>
-
-namespace
-{
-using namespace onert;
-using namespace exec;
-using namespace backend;
-
-struct MockConfig : public IConfig
-{
- std::string id() override { return "b1"; }
- bool initialize() override { return true; };
- bool supportPermutation() override { return false; }
- ir::Layout supportLayout(const ir::Operation &, ir::Layout) override
- {
- return ir::Layout::UNKNOWN;
- }
- bool supportDynamicTensor() override { return false; }
- bool supportFP16() override { return false; }
-};
-
-struct MockBackend : public ::onert::backend::Backend
-{
- std::shared_ptr<onert::backend::IConfig> config() const override
- {
- return std::make_shared<MockConfig>();
- }
- std::unique_ptr<BackendContext> newContext(const ir::Graph &,
- const std::shared_ptr<custom::IKernelBuilder> &kb,
- bool) const override
- {
- return nullptr;
- }
-};
-
-TEST(ExecTime, roundtrip_ok)
-{
- const auto *b = new MockBackend();
- std::vector<const Backend *> bs = {b};
- {
- ExecTime et(bs);
- et.updateOperationExecTime(b, "op1", true, 100, 100);
- et.updateOperationExecTime(b, "op1", true, 200, 200);
- et.updateOperationExecTime(b, "op1", false, 100, 888);
- et.uploadOperationsExecTime();
- }
- {
- ExecTime et(bs);
- auto time = et.getOperationExecTime(b, "op1", true, 100);
- ASSERT_EQ(time, 100);
- // Check interpolation
- time = et.getOperationExecTime(b, "op1", true, 150);
- ASSERT_EQ(time, 150);
- time = et.getOperationExecTime(b, "op1", false, 100);
- ASSERT_EQ(time, 888);
- et.uploadOperationsExecTime();
- }
- // clean up
- EXPECT_EQ(remove("exec_time.json"), 0);
-}
-
-TEST(ExecTime, structure)
-{
-
- const auto *b = new MockBackend();
- std::vector<const Backend *> bs = {b};
- {
- ExecTime et(bs);
- et.updateOperationExecTime(b, "op1", true, 100, 100);
- et.updateOperationExecTime(b, "op1", true, 200, 200);
- et.uploadOperationsExecTime();
- }
- {
- ExecTime et(bs);
- auto time = et.getOperationExecTime(b, "op1", true, 100);
- ASSERT_EQ(time, 100);
- // Check interpolation
- time = et.getOperationExecTime(b, "op1", true, 200);
- ASSERT_EQ(time, 200);
- et.uploadOperationsExecTime();
- }
- // clean up
- EXPECT_EQ(remove("exec_time.json"), 0);
-}
-} // unnamed namespace
diff --git a/runtime/onert/test/core/interp/ExecManager.cc b/runtime/onert/test/core/interp/ExecManager.cc
deleted file mode 100644
index 09190bc58..000000000
--- a/runtime/onert/test/core/interp/ExecManager.cc
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include <memory>
-
-#include "ir/Graph.h"
-#include "interp/InterpExecutor.h"
-#include "exec/Execution.h"
-#include "ir/operation/BinaryArithmetic.h"
-
-namespace
-{
-
-using namespace onert::ir;
-using InterpExecutor = onert::interp::InterpExecutor;
-using Execution = onert::exec::Execution;
-using ExecutorMap = onert::exec::ExecutorMap;
-
-class InterpExecutorTest : public ::testing::Test
-{
-protected:
- virtual void SetUp() {}
- void CreateSimpleModel()
- {
- // Model: one elementwise add operation
- // model input: lhs, rhs
- // model output: add result
- // lhs, rhs, result shape: {1, 2, 2, 1}
- // activation: none (constant)
- _graph = std::make_unique<Graph>();
-
- // Add operands
-
- Shape shape{1, 2, 2, 1};
- TypeInfo type{DataType::INT32};
- Shape shape_scalar(0);
- TypeInfo type_scalar{DataType::INT32};
-
- auto operand_lhs = _graph->addOperand(shape, type);
- auto operand_rhs = _graph->addOperand(shape, type);
- auto operand_result = _graph->addOperand(shape, type);
-
- // Add operations
-
- operation::BinaryArithmetic::Param param;
- param.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param.activation = Activation::NONE;
- auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
- auto output_set = OperandIndexSequence{operand_result};
- _graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set, output_set, param));
-
- // Identify model inputs and outputs
-
- _graph->getInputs().append(operand_lhs);
- _graph->getInputs().append(operand_rhs);
- _graph->getOutputs().append(operand_result);
-
- _graph->finishBuilding();
-
- auto subgs = std::make_shared<onert::ir::Subgraphs>();
- subgs->push(onert::ir::SubgraphIndex{0}, _graph);
- _graph->setSubgraphs(subgs);
-
- _executors = std::make_shared<ExecutorMap>();
- _executors->insert(
- std::make_pair(onert::ir::SubgraphIndex{0}, std::make_unique<InterpExecutor>(*_graph)));
- }
-
- void CreateTwoStepModel()
- {
- // Model: two elementwise add operation
- // model input: lhs, rhs1
- // model output: second add result (result2)
- // constant: rhs2
- // result1 <= (lhs + rhs)
- // result2 <= (result1 + rhs2)
- // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1}
- // activation: none (constant)
- _graph = std::make_unique<Graph>();
-
- // 1st add operands (result1 <= lhs + rhs1)
-
- Shape shape{1, 2, 2, 1};
- TypeInfo type{DataType::INT32};
- Shape shape_scalar(0);
- TypeInfo type_scalar{DataType::INT32};
-
- static int32_t rhs2_data[4] = {3, 1, -1, 5};
-
- auto operand_lhs = _graph->addOperand(shape, type);
- auto operand_rhs1 = _graph->addOperand(shape, type);
- auto operand_result1 = _graph->addOperand(shape, type);
- auto operand_rhs2 = _graph->addOperand(shape, type);
- auto operand_result2 = _graph->addOperand(shape, type);
- _graph->operands()
- .at(operand_rhs2)
- .data(std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data), 16));
-
- // 2nd add operations (result2 <= result1 + rhs2)
-
- operation::BinaryArithmetic::Param param1;
- param1.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param1.activation = Activation::NONE;
- auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
- auto output_set1 = OperandIndexSequence{operand_result1};
- _graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set1, output_set1, param1));
-
- operation::BinaryArithmetic::Param param2;
- param2.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param2.activation = Activation::NONE;
- auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
- auto output_set2 = OperandIndexSequence{operand_result2};
- _graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set2, output_set2, param2));
-
- // Identify model inputs and outputs
-
- _graph->getInputs().append(operand_lhs);
- _graph->getInputs().append(operand_rhs1);
- _graph->getOutputs().append(operand_result2);
-
- _graph->finishBuilding();
-
- auto subgs = std::make_shared<onert::ir::Subgraphs>();
- subgs->push(onert::ir::SubgraphIndex{0}, _graph);
- _graph->setSubgraphs(subgs);
-
- _executors = std::make_shared<ExecutorMap>();
- _executors->insert(
- std::make_pair(onert::ir::SubgraphIndex{0}, std::make_unique<InterpExecutor>(*_graph)));
- }
-
- void CreateUnspecifiedDimensionsModel()
- {
- // Model: one elementwise add operation
- // model input: lhs, rhs
- // model output: add result
- // lhs, rhs, result shape: {1, unknown, 2, 1}
- // activation: none (constant)
- _graph = std::make_unique<Graph>();
-
- // Add operands
-
- Shape shape{1, 0, 2, 1};
- TypeInfo type{DataType::INT32};
- Shape shape_scalar(0);
- TypeInfo type_scalar{DataType::INT32};
-
- auto operand_lhs = _graph->addOperand(shape, type);
- auto operand_rhs = _graph->addOperand(shape, type);
-
- auto operand_activation = _graph->addOperand(shape_scalar, type_scalar);
- _graph->operands()
- .at(operand_activation)
- .data(
- std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&_activation_value), 4));
-
- auto operand_result = _graph->addOperand(shape, type);
-
- // Add operations
-
- operation::BinaryArithmetic::Param param;
- param.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
- param.activation = Activation::NONE;
- auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
- auto output_set = OperandIndexSequence{operand_result};
- _graph->addOperation(
- std::make_unique<operation::BinaryArithmetic>(input_set, output_set, param));
-
- // Identify model inputs and outputs
-
- _graph->getInputs().append(operand_lhs);
- _graph->getInputs().append(operand_rhs);
- _graph->getOutputs().append(operand_result);
-
- _graph->finishBuilding();
-
- auto subgs = std::make_shared<onert::ir::Subgraphs>();
- subgs->push(onert::ir::SubgraphIndex{0}, _graph);
- _graph->setSubgraphs(subgs);
-
- _executors = std::make_shared<ExecutorMap>();
- _executors->insert(
- std::make_pair(onert::ir::SubgraphIndex{0}, std::make_unique<InterpExecutor>(*_graph)));
- }
-
- void createExecution() { _execution = std::make_unique<Execution>(_executors); }
-
- virtual void TearDown() { _executors = nullptr; }
-
- std::shared_ptr<Graph> _graph{nullptr};
- std::shared_ptr<ExecutorMap> _executors{nullptr};
- std::unique_ptr<Execution> _execution{nullptr};
- const int32_t _activation_value{0};
-};
-
-TEST_F(InterpExecutorTest, create_empty)
-{
- Graph graph;
- graph.finishBuilding();
- auto executor = std::make_unique<InterpExecutor>(graph);
- ASSERT_NE(executor, nullptr);
-}
-
-TEST_F(InterpExecutorTest, create_simple)
-{
- CreateSimpleModel();
- ASSERT_NE(_executors, nullptr);
- ASSERT_NE(_executors->at(onert::ir::SubgraphIndex{0}), nullptr);
-}
-
-TEST_F(InterpExecutorTest, setInput)
-{
- CreateSimpleModel();
- createExecution();
-
- auto input1 = IOIndex{0};
- const int32_t input1_buffer[4] = {1, 0, -1, -2};
-
- EXPECT_THROW(_execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 4),
- std::runtime_error);
- EXPECT_THROW(_execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 12),
- std::runtime_error);
- EXPECT_NO_THROW(_execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16));
-}
-
-TEST_F(InterpExecutorTest, setOutput)
-{
- CreateSimpleModel();
- createExecution();
-
- auto output = IOIndex{0};
- auto output_idx = _graph->getOutputs().at(output);
-
- int32_t output_buffer[4] = {};
-
- EXPECT_THROW(_execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 4),
- std::runtime_error);
- EXPECT_THROW(_execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 12),
- std::runtime_error);
- EXPECT_NO_THROW(_execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16));
-}
-
-TEST_F(InterpExecutorTest, setInputForUnspecifiedDimensions)
-{
- CreateUnspecifiedDimensionsModel();
- createExecution();
-
- auto input1 = IOIndex{0};
- const int32_t input1_buffer[4] = {1, 0, -1, -2};
-
- TypeInfo operand_type{DataType::INT32};
- Shape operand_shape{1, 2, 2, 1};
-
- EXPECT_THROW(_execution->setInput(input1, operand_type, operand_shape,
- reinterpret_cast<const void *>(input1_buffer), 4),
- std::runtime_error);
- EXPECT_THROW(_execution->setInput(input1, operand_type, operand_shape,
- reinterpret_cast<const void *>(input1_buffer), 12),
- std::runtime_error);
- EXPECT_NO_THROW(_execution->setInput(input1, operand_type, operand_shape,
- reinterpret_cast<const void *>(input1_buffer), 16));
-}
-
-TEST_F(InterpExecutorTest, setOutputForUnspecifiedDimensions)
-{
- CreateUnspecifiedDimensionsModel();
- createExecution();
-
- auto output = IOIndex{0};
- auto output_idx = _graph->getOutputs().at(output);
-
- TypeInfo operand_type{DataType::INT32};
- Shape operand_shape{1, 2, 2, 1};
-
- int32_t output_buffer[4] = {};
-
- EXPECT_THROW(_execution->setOutput(output, operand_type, operand_shape,
- reinterpret_cast<void *>(output_buffer), 4),
- std::runtime_error);
- EXPECT_THROW(_execution->setOutput(output, operand_type, operand_shape,
- reinterpret_cast<void *>(output_buffer), 12),
- std::runtime_error);
- EXPECT_NO_THROW(_execution->setOutput(output, operand_type, operand_shape,
- reinterpret_cast<void *>(output_buffer), 16));
-}
-
-TEST_F(InterpExecutorTest, execute)
-{
- CreateSimpleModel();
- createExecution();
-
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto input1_idx = _graph->getInputs().at(input1);
- auto input2_idx = _graph->getInputs().at(input2);
-
- const int32_t input1_buffer[4] = {1, 0, -1, -2};
- const int32_t input2_buffer[4] = {1, -3, 2, -4};
-
- auto output = IOIndex{0};
- auto output_idx = _graph->getOutputs().at(output);
-
- int32_t output_buffer[4] = {};
-
- EXPECT_NO_THROW(_execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16));
- EXPECT_NO_THROW(_execution->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16));
- EXPECT_NO_THROW(_execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16));
- EXPECT_NO_THROW(_execution->execute());
- EXPECT_EQ(output_buffer[0], 2);
- EXPECT_EQ(output_buffer[1], -3);
- EXPECT_EQ(output_buffer[2], 1);
- EXPECT_EQ(output_buffer[3], -6);
-}
-
-TEST_F(InterpExecutorTest, executeTwoStep)
-{
- CreateTwoStepModel();
- createExecution();
-
- auto input1 = IOIndex{0};
- auto input2 = IOIndex{1};
- auto input1_idx = _graph->getInputs().at(input1);
- auto input2_idx = _graph->getInputs().at(input2);
-
- const int32_t input1_buffer[4] = {1, 0, -1, -2};
- const int32_t input2_buffer[4] = {1, -3, 2, -4};
-
- auto output = IOIndex{0};
- auto output_idx = _graph->getOutputs().at(output);
-
- int32_t output_buffer[4] = {};
-
- EXPECT_NO_THROW(_execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16));
- EXPECT_NO_THROW(_execution->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16));
- EXPECT_NO_THROW(_execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16));
- EXPECT_NO_THROW(_execution->execute());
- EXPECT_EQ(output_buffer[0], 5);
- EXPECT_EQ(output_buffer[1], -2);
- EXPECT_EQ(output_buffer[2], 0);
- EXPECT_EQ(output_buffer[3], -1);
-}
-
-} // namespace
diff --git a/runtime/onert/test/graph/Graph.cc b/runtime/onert/test/graph/Graph.cc
deleted file mode 100644
index 34e9fe002..000000000
--- a/runtime/onert/test/graph/Graph.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Graph.h"
-
-TEST(Graph, inputs_and_outputs)
-{
- onert::ir::Graph graph;
-
- onert::ir::OperandIndex index0{0u};
- onert::ir::OperandIndex index1{1u};
-
- graph.addInput({index0});
- graph.addInput({index1});
-
- onert::ir::OperandIndex index10{10u};
- onert::ir::OperandIndex index11{11u};
- onert::ir::OperandIndex index12{12u};
-
- graph.addOutput({index10});
- graph.addOutput({index11});
- graph.addOutput({index12});
-
- ASSERT_EQ(graph.getInputs().size(), 2);
- ASSERT_EQ(graph.getOutputs().size(), 3);
-
- onert::ir::IOIndex io_index0{0};
- onert::ir::IOIndex io_index1{1};
- onert::ir::IOIndex io_index2{2};
-
- ASSERT_EQ(graph.getInputs().at(io_index0), 0);
- ASSERT_EQ(graph.getInputs().at(io_index1), 1);
-
- ASSERT_EQ(graph.getOutputs().at(io_index0), 10);
- ASSERT_EQ(graph.getOutputs().at(io_index1), 11);
- ASSERT_EQ(graph.getOutputs().at(io_index2), 12);
-}
diff --git a/runtime/onert/test/graph/Index.cc b/runtime/onert/test/graph/Index.cc
deleted file mode 100644
index 2d110e326..000000000
--- a/runtime/onert/test/graph/Index.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "util/Index.h"
-
-using Index = ::onert::util::Index<uint32_t, struct TestTag>;
-
-TEST(Index, neg_index_test)
-{
- Index idx1{1u};
- Index idx2{2u};
- Index idx3{idx1};
-
- ASSERT_EQ(idx1, 1);
- ASSERT_EQ(idx1, 1u);
- ASSERT_EQ(idx1.value(), 1u);
- ASSERT_NE(idx1, idx2);
- ASSERT_EQ(idx1, idx3);
-}
diff --git a/runtime/onert/test/graph/MockNode.h b/runtime/onert/test/graph/MockNode.h
deleted file mode 100644
index 60b4719ed..000000000
--- a/runtime/onert/test/graph/MockNode.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ONERT_TEST_GRAPH_MOCK_NODE_H__
-#define __ONERT_TEST_GRAPH_MOCK_NODE_H__
-
-#include "ir/Operation.h"
-#include "ir/OperandIndexSequence.h"
-
-namespace onert_test
-{
-namespace ir
-{
-
-class SimpleMock : public onert::ir::Operation
-{
-public:
- SimpleMock(const onert::ir::OperandIndexSequence &inputs,
- const onert::ir::OperandIndexSequence &outputs)
- : Operation{onert::ir::OperandConstraint::createAny()}
- {
- setInputs(inputs);
- setOutputs(outputs);
- }
-
-public:
- void accept(onert::ir::OperationVisitor &) const override {}
- onert::ir::OpCode opcode() const final { return onert::ir::OpCode::Invalid; }
-};
-
-} // namespace ir
-} // namespace onert_test
-
-#endif // __ONERT_TEST_GRAPH_MOCK_NODE_H__
diff --git a/runtime/onert/test/graph/operand/IndexSet.cc b/runtime/onert/test/graph/operand/IndexSet.cc
deleted file mode 100644
index 6ef425a2d..000000000
--- a/runtime/onert/test/graph/operand/IndexSet.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/OperandIndexSequence.h"
-
-using onert::ir::OperandIndex;
-using onert::ir::OperandIndexSequence;
-
-TEST(graph_OperandIndexSequence, neg_append)
-{
- OperandIndexSequence iset{0, 2, 4, 8};
-
- ASSERT_EQ(iset.size(), 4);
-
- iset.append(OperandIndex{10});
-
- ASSERT_EQ(iset.size(), 5);
-
- onert::ir::IOIndex index1{1};
- onert::ir::IOIndex index2{4};
-
- ASSERT_EQ(iset.at(index1), 2);
- ASSERT_EQ(iset.at(index2), 10);
-
- ASSERT_TRUE(iset.contains(OperandIndex{2}));
- ASSERT_TRUE(iset.contains(OperandIndex{10}));
- ASSERT_FALSE(iset.contains(OperandIndex{11}));
-}
-
-TEST(graph_OperandIndexSequence, neg_replace)
-{
- OperandIndexSequence iset{0, 1, 2, 3};
-
- iset.replace(OperandIndex{1}, OperandIndex{9});
- ASSERT_FALSE(iset.contains(OperandIndex{1}));
- ASSERT_TRUE(iset.contains(OperandIndex{9}));
-}
diff --git a/runtime/onert/test/graph/operand/LayoutSet.cc b/runtime/onert/test/graph/operand/LayoutSet.cc
deleted file mode 100644
index ef965a41e..000000000
--- a/runtime/onert/test/graph/operand/LayoutSet.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/LayoutSet.h"
-
-using onert::ir::Layout;
-using onert::ir::LayoutSet;
-
-TEST(graph_operand_LayoutSet, neg_add_remove)
-{
- LayoutSet set{Layout::NCHW};
- set.remove(Layout::NHWC);
- ASSERT_EQ(set.size(), 1);
- set.add(Layout::NHWC);
- ASSERT_EQ(set.size(), 2);
- set.remove(Layout::NHWC);
- ASSERT_EQ(set.size(), 1);
- set.remove(Layout::NCHW);
- ASSERT_EQ(set.size(), 0);
- set.remove(Layout::NCHW);
- ASSERT_EQ(set.size(), 0);
-}
-
-TEST(graph_operand_LayoutSet, set_operators)
-{
- LayoutSet set1{Layout::NCHW};
- LayoutSet set2{Layout::NHWC};
- LayoutSet set3 = set1 | set2;
-
- ASSERT_EQ(set3.size(), 2);
-
- ASSERT_EQ((set3 - set1).size(), 1);
- ASSERT_EQ((set3 - set1).contains(Layout::NHWC), true);
- ASSERT_EQ((set3 - set2).size(), 1);
- ASSERT_EQ((set3 - set2).contains(Layout::NCHW), true);
- ASSERT_EQ((set3 - set3).size(), 0);
-
- ASSERT_EQ((set3 & set1).size(), 1);
- ASSERT_EQ((set3 & set1).contains(Layout::NCHW), true);
- ASSERT_EQ((set3 & set2).size(), 1);
- ASSERT_EQ((set3 & set2).contains(Layout::NHWC), true);
- ASSERT_EQ((set1 & set2).size(), 0);
-}
diff --git a/runtime/onert/test/graph/operand/Set.cc b/runtime/onert/test/graph/operand/Set.cc
deleted file mode 100644
index ffee417b8..000000000
--- a/runtime/onert/test/graph/operand/Set.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Operands.h"
-
-TEST(graph_operand_Set, neg_set_test)
-{
- onert::ir::Operands set;
-
- onert::ir::Shape shape0{1, 2, 3};
-
- onert::ir::Shape shape1(4);
- shape1.dim(0) = 10;
- shape1.dim(1) = 20;
- shape1.dim(2) = 30;
- shape1.dim(3) = 40;
-
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- set.emplace(shape0, type);
- set.emplace(shape1, type);
-
- ASSERT_EQ(set.exist(onert::ir::OperandIndex{0u}), true);
- ASSERT_EQ(set.exist(onert::ir::OperandIndex{1u}), true);
- ASSERT_EQ(set.exist(onert::ir::OperandIndex{2u}), false);
-
- ASSERT_EQ(set.at(onert::ir::OperandIndex{0u}).shape().dim(0), 1);
- ASSERT_EQ(set.at(onert::ir::OperandIndex{0u}).shape().dim(1), 2);
- ASSERT_EQ(set.at(onert::ir::OperandIndex{0u}).shape().dim(2), 3);
-}
diff --git a/runtime/onert/test/graph/operand/UseDef.cc b/runtime/onert/test/graph/operand/UseDef.cc
deleted file mode 100644
index a8686eb18..000000000
--- a/runtime/onert/test/graph/operand/UseDef.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Graph.h"
-#include "ir/verifier/Verifier.h"
-#include <memory>
-#include "../MockNode.h"
-
-#include <typeindex>
-
-namespace
-{
-
-using IndexSet = onert::ir::OperandIndexSequence;
-using Mock = onert_test::ir::SimpleMock;
-
-} // namespace
-
-TEST(graph_operand_usedef, neg_usedef_test)
-{
- onert::ir::Graph graph;
- onert::ir::verifier::DAGChecker verifier;
-
- onert::ir::Shape shape(3);
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- // Model Input/Output
- auto input_operand = graph.addOperand(shape, type);
- auto output_operand = graph.addOperand(shape, type);
-
- graph.addInput(input_operand);
- graph.addOutput(output_operand);
-
- // MockNode1
- auto operand_index1 = graph.addOperand(shape, type);
- auto mocknode_index1 =
- graph.addOperation(std::make_unique<Mock>(IndexSet{input_operand}, IndexSet{operand_index1}));
-
- // MockNode2
- auto operand_index2 = graph.addOperand(shape, type);
- auto mocknode_index2 =
- graph.addOperation(std::make_unique<Mock>(IndexSet{input_operand}, IndexSet{operand_index2}));
-
- // MockNode3(two input)
- auto multiinput_index = graph.addOperation(
- std::make_unique<Mock>(IndexSet{operand_index1, operand_index2}, IndexSet{output_operand}));
-
- graph.finishBuilding();
-
- ASSERT_TRUE(verifier.verify(graph));
-
- // Check def
- ASSERT_EQ(graph.operands().at(operand_index1).getDef(), mocknode_index1);
- ASSERT_EQ(graph.operands().at(operand_index2).getDef(), mocknode_index2);
- ASSERT_EQ(graph.operands().at(output_operand).getDef(), multiinput_index);
-
- ASSERT_NE(graph.operands().at(operand_index1).getDef(), mocknode_index2);
- ASSERT_NE(graph.operands().at(operand_index1).getDef(), multiinput_index);
-
- // Check use
- ASSERT_EQ(graph.operands().at(input_operand).getUses().contains(mocknode_index1), true);
- ASSERT_EQ(graph.operands().at(input_operand).getUses().contains(mocknode_index2), true);
- ASSERT_EQ(graph.operands().at(input_operand).getUses().contains(multiinput_index), false);
- ASSERT_EQ(graph.operands().at(operand_index1).getUses().contains(multiinput_index), true);
- ASSERT_EQ(graph.operands().at(operand_index2).getUses().contains(multiinput_index), true);
-
- ASSERT_EQ(graph.operands().at(input_operand).getUses().size(), 2);
- ASSERT_EQ(graph.operands().at(operand_index1).getUses().size(), 1);
- ASSERT_EQ(graph.operands().at(output_operand).getUses().size(), 0);
-}
diff --git a/runtime/onert/test/graph/operation/Set.cc b/runtime/onert/test/graph/operation/Set.cc
deleted file mode 100644
index 088c44b8c..000000000
--- a/runtime/onert/test/graph/operation/Set.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "../MockNode.h"
-#include "ir/Operations.h"
-
-using onert::ir::Operation;
-using onert::ir::OperationIndex;
-using onert::ir::Operations;
-
-TEST(graph_operation_Set, operation_test)
-{
- Operations ops;
- ops.push(std::unique_ptr<Operation>(new onert_test::ir::SimpleMock({1, 2, 3, 4}, {5, 6, 7})));
- OperationIndex idx{0u};
- ASSERT_EQ(ops.at(idx).getInputs().size(), 4);
- ASSERT_EQ(ops.at(idx).getOutputs().size(), 3);
-}
diff --git a/runtime/onert/test/graph/operation/SetIO.cc b/runtime/onert/test/graph/operation/SetIO.cc
deleted file mode 100644
index 22068ff58..000000000
--- a/runtime/onert/test/graph/operation/SetIO.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Graph.h"
-#include "ir/Index.h"
-#include "ir/OperandIndexSequence.h"
-#include "ir/operation/Conv2D.h"
-#include "ir/operation/Concat.h"
-
-#include <memory>
-
-#include <stdexcept>
-
-using Index = onert::ir::IOIndex;
-using IndexSet = onert::ir::OperandIndexSequence;
-
-TEST(graph_operation_setIO, operation_setIO_conv)
-{
- onert::ir::Graph graph;
-
- onert::ir::Shape shape{3};
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- // Add Conv
- using Graph = onert::ir::operation::Conv2D;
-
- auto input_operand = graph.addOperand(shape, type);
- auto kernel_operand = graph.addOperand(shape, type);
- auto bias_operand = graph.addOperand(shape, type);
- IndexSet inputs{input_operand, kernel_operand, bias_operand};
-
- Graph::Param conv_params;
- conv_params.padding.type = onert::ir::PaddingType::SAME;
- conv_params.stride.horizontal = 1;
- conv_params.stride.vertical = 1;
- conv_params.activation = onert::ir::Activation::NONE;
-
- auto output_operand = graph.addOperand(shape, type).value();
- IndexSet outputs{output_operand};
-
- auto conv = std::make_unique<Graph>(inputs, outputs, conv_params);
-
- ASSERT_NE(conv, nullptr);
- ASSERT_EQ(conv->getInputs().at(Index{0}).value(), inputs.at(0).value());
- conv->setInputs({8, 9, 10});
- ASSERT_NE(conv->getInputs().at(Index{0}).value(), inputs.at(0).value());
- ASSERT_EQ(conv->getInputs().at(Index{0}).value(), 8);
-}
-
-TEST(graph_operation_setIO, neg_operation_setIO_concat)
-{
- onert::ir::Graph graph;
-
- onert::ir::Shape shape{3};
-
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- using Graph = onert::ir::operation::Concat;
-
- // Add Concat
- IndexSet inputs;
- for (int i = 0; i < 6; ++i)
- {
- inputs.append(graph.addOperand(shape, type));
- }
-
- Graph::Param concat_params{0};
-
- auto output_operand = graph.addOperand(shape, type).value();
- IndexSet outputs{output_operand};
-
- auto concat = std::make_unique<Graph>(inputs, outputs, concat_params);
-
- ASSERT_NE(concat, nullptr);
- ASSERT_EQ(concat->getInputs().size(), 6);
- ASSERT_EQ(concat->getInputs().at(Index{0}).value(), inputs.at(0).value());
-
- concat->setInputs({80, 6, 9, 11});
- ASSERT_EQ(concat->getInputs().size(), 4);
- ASSERT_NE(concat->getInputs().at(Index{0}).value(), inputs.at(0).value());
- ASSERT_EQ(concat->getInputs().at(Index{0}).value(), 80);
- ASSERT_EQ(concat->getInputs().at(Index{2}).value(), 9);
- ASSERT_THROW(concat->getInputs().at(Index{5}), std::out_of_range);
-}
diff --git a/runtime/onert/test/graph/verifier/Verifier.cc b/runtime/onert/test/graph/verifier/Verifier.cc
deleted file mode 100644
index 3bce2746c..000000000
--- a/runtime/onert/test/graph/verifier/Verifier.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Operation.h"
-#include "ir/Graph.h"
-#include "ir/verifier/Verifier.h"
-#include <memory>
-#include "ir/Operand.h"
-#include "../MockNode.h"
-
-using IndexSet = onert::ir::OperandIndexSequence;
-using Mock = onert_test::ir::SimpleMock;
-
-TEST(Verifier, dag_checker)
-{
- onert::ir::Graph graph;
-
- onert::ir::Shape shape{3};
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- auto operand1 = graph.addOperand(shape, type);
- auto operand2 = graph.addOperand(shape, type);
-
- graph.addInput(operand1);
- graph.addOutput(operand2);
-
- graph.addOperation(std::make_unique<Mock>(IndexSet{operand1}, IndexSet{operand2}));
-
- graph.finishBuilding();
-
- onert::ir::verifier::DAGChecker verifier;
-
- ASSERT_TRUE(verifier.verify(graph));
-}
-
-TEST(Verifier, neg_edge_consistency_checker_1)
-{
- onert::ir::Graph graph;
-
- onert::ir::Shape shape{3};
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- auto operand1 = graph.addOperand(shape, type);
- auto operand2 = graph.addOperand(shape, type);
-
- graph.addInput(operand1);
- graph.addOutput(operand2);
-
- auto mock_op = std::make_unique<Mock>(IndexSet{operand1}, IndexSet{operand2});
- auto op_ind = graph.addOperation(std::move(mock_op));
-
- graph.finishBuilding();
-
- graph.operands().at(operand1).removeUse(op_ind); // Manipulate the operand alone
-
- onert::ir::verifier::EdgeConsistencyChecker verifier;
- ASSERT_FALSE(verifier.verify(graph));
-}
-
-TEST(Verifier, neg_edge_consistency_checker_2)
-{
- onert::ir::Graph graph;
-
- onert::ir::Shape shape{3};
- onert::ir::TypeInfo type{onert::ir::DataType::INT32};
-
- auto operand1 = graph.addOperand(shape, type);
- auto operand2 = graph.addOperand(shape, type);
-
- graph.addInput(operand1);
- graph.addOutput(operand2);
-
- auto mock_op = std::make_unique<Mock>(IndexSet{operand1}, IndexSet{operand2});
- auto mock_op_ptr = mock_op.get();
- auto op_ind = graph.addOperation(std::move(mock_op));
-
- graph.finishBuilding();
-
- mock_op_ptr->setInputs({operand2}); // Manipulate the operation alone
-
- onert::ir::verifier::EdgeConsistencyChecker verifier;
- ASSERT_FALSE(verifier.verify(graph));
-}
diff --git a/runtime/onert/test/ir/Shape.cc b/runtime/onert/test/ir/Shape.cc
deleted file mode 100644
index c24aeda8d..000000000
--- a/runtime/onert/test/ir/Shape.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <ir/Shape.h>
-
-#include <gtest/gtest.h>
-
-TEST(ShapeTest, basic_test)
-{
- {
- onert::ir::Shape shape(3);
-
- shape.dim(0) = 1;
- shape.dim(1) = 2;
- shape.dim(2) = 3;
-
- ASSERT_EQ(shape.rank(), 3);
- ASSERT_EQ(shape.num_elements(), 6);
- ASSERT_EQ(onert::ir::rankMaybeUnspecified(shape), false);
- ASSERT_EQ(shape.hasUnspecifiedDims(), false);
- }
- {
- onert::ir::Shape shape; // scalar or rank is unspecified
-
- ASSERT_EQ(shape.rank(), 0);
- ASSERT_EQ(shape.num_elements(), 1);
- ASSERT_EQ(onert::ir::rankMaybeUnspecified(shape), true);
- ASSERT_EQ(shape.hasUnspecifiedDims(), false);
- }
-}
-
-TEST(ShapeTest, neg_basic_test)
-{
- {
- onert::ir::Shape shape(2);
-
- shape.dim(0) = 1;
- shape.dim(1) = onert::ir::Shape::UNSPECIFIED_DIM;
-
- ASSERT_EQ(shape.rank(), 2);
- ASSERT_EQ(onert::ir::rankMaybeUnspecified(shape), false);
- ASSERT_EQ(shape.hasUnspecifiedDims(), true);
- EXPECT_ANY_THROW(shape.num_elements());
- }
-}
diff --git a/runtime/onert/test/util/ObjectManager.cc b/runtime/onert/test/util/ObjectManager.cc
deleted file mode 100644
index 5051bcfa6..000000000
--- a/runtime/onert/test/util/ObjectManager.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "util/ObjectManager.h"
-#include "util/Index.h"
-
-using namespace onert;
-
-struct TestTag;
-using Index = typename util::Index<uint32_t, TestTag>;
-
-TEST(ObjectManager, emplace)
-{
- util::ObjectManager<Index, int> man;
-
- auto index = man.emplace(100);
- ASSERT_EQ(man.at(index), 100);
-}
-
-TEST(ObjectManager, remove_1)
-{
- util::ObjectManager<Index, int> man;
-
- Index index = man.emplace(100);
- ASSERT_TRUE(man.exist(index));
- ASSERT_EQ(man.at(index), 100);
-
- man.remove(index);
- ASSERT_FALSE(man.exist(index));
-}
-
-TEST(ObjectManager, remove_2)
-{
- util::ObjectManager<Index, int> man;
-
- auto index0 = man.emplace(100);
- auto index1 = man.emplace(200);
- ASSERT_TRUE(man.exist(index0));
- ASSERT_EQ(man.at(index0), 100);
- ASSERT_TRUE(man.exist(index1));
- ASSERT_EQ(man.at(index1), 200);
-
- man.remove(index0);
- ASSERT_FALSE(man.exist(index0));
- ASSERT_TRUE(man.exist(index1));
- ASSERT_EQ(man.at(index1), 200);
-}
-
-TEST(ObjectManager, push)
-{
- util::ObjectManager<Index, int> man;
-
- auto index = man.push(std::unique_ptr<int>{new int{100}});
- ASSERT_EQ(man.at(index), 100);
-}
-
-TEST(ObjectManager, const_iterate)
-{
- util::ObjectManager<Index, int> man;
-
- auto index0 = man.emplace(100);
- auto index1 = man.emplace(200);
- auto index2 = man.emplace(300);
-
- int sum = 0;
- man.iterate([&](const Index &index, const int &val) { sum += val; });
- ASSERT_EQ(sum, 600);
-}
-
-TEST(ObjectManager, non_const_iterate)
-{
- util::ObjectManager<Index, int> man;
-
- auto index0 = man.emplace(100);
- auto index1 = man.emplace(200);
- auto index2 = man.emplace(300);
-
- man.iterate([&](const Index &index, int &val) { val += 1; });
- ASSERT_EQ(man.at(index0), 101);
- ASSERT_EQ(man.at(index1), 201);
- ASSERT_EQ(man.at(index2), 301);
-}
diff --git a/runtime/onert/test/util/ShapeInference.cc b/runtime/onert/test/util/ShapeInference.cc
deleted file mode 100644
index a5f0af5ee..000000000
--- a/runtime/onert/test/util/ShapeInference.cc
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "ir/Layout.h"
-#include "util/ShapeInference.h"
-
-using namespace onert::ir;
-
-TEST(ShapeInference, Elementwise)
-{
- Shape lhs_shape{1, 299, 299, 3};
- Shape rhs_shape{3};
- auto infered_out_shape = onert::shape_inference::inferEltwiseShape(lhs_shape, rhs_shape);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.dim(0), 1);
- ASSERT_EQ(infered_out_shape.dim(1), 299);
- ASSERT_EQ(infered_out_shape.dim(2), 299);
- ASSERT_EQ(infered_out_shape.dim(3), 3);
-}
-
-TEST(ShapeInference, neg_Elementwise)
-{
- Shape lhs_shape{1, 299, 299, 3};
- Shape rhs_shape{5, 3};
- ASSERT_THROW(onert::shape_inference::inferEltwiseShape(lhs_shape, rhs_shape), std::runtime_error);
-}
-
-TEST(ShapeInference, Pool2DNodeSame)
-{
- Shape in_shape{10, 6, 12, 20};
- Stride stride{3, 7};
- Padding padding{PaddingType::SAME};
-
- operation::Pool2D::Param avg_pool_param{
- operation::Pool2D::PoolType::AVG, 3, 6, stride, padding, Activation::NONE};
- auto infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, avg_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-
- operation::Pool2D::Param max_pool_param{
- operation::Pool2D::PoolType::MAX, 3, 6, stride, padding, Activation::NONE};
- infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, max_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-}
-
-TEST(ShapeInference, Pool2DNodeValid)
-{
- Shape in_shape{10, 6, 12, 20};
- Stride stride{3, 7};
- Padding padding{PaddingType::VALID};
-
- operation::Pool2D::Param avg_pool_param{
- operation::Pool2D::PoolType::AVG, 3, 6, stride, padding, Activation::NONE};
- auto infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, avg_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-
- operation::Pool2D::Param max_pool_param{
- operation::Pool2D::PoolType::MAX, 3, 6, stride, padding, Activation::NONE};
- infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, max_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-}
-
-TEST(ShapeInference, Pool2DNodeExplicit)
-{
- Shape in_shape{10, 3, 5, 20};
-
- Stride stride{3, 7};
- Padding padding{4, 3, 2, 1};
-
- operation::Pool2D::Param avg_pool_param{
- operation::Pool2D::PoolType::AVG, 3, 6, stride, padding, Activation::NONE};
- auto infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, avg_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-
- operation::Pool2D::Param max_pool_param{
- operation::Pool2D::PoolType::MAX, 3, 6, stride, padding, Activation::NONE};
- infered_out_shape = onert::shape_inference::inferPoolShape(in_shape, max_pool_param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 20);
-}
-
-TEST(ShapeInference, neg_Pool2DNode_InvalidStride)
-{
- Shape in_shape{10, 6, 12, 20};
- Stride stride{0, 7};
- Padding padding{PaddingType::SAME};
-
- operation::Pool2D::Param avg_pool_param{
- operation::Pool2D::PoolType::AVG, 3, 6, stride, padding, Activation::NONE};
- ASSERT_THROW(onert::shape_inference::inferPoolShape(in_shape, avg_pool_param),
- std::runtime_error);
-}
-
-TEST(ShapeInference, Conv2D)
-{
- Shape in_shape{10, 6, 12, 20};
- Shape ker_shape{30, 3, 6, 20};
-
- operation::Conv2D::Param param{Stride{3, 7}, Padding{PaddingType::VALID}, Activation::NONE,
- Dilation{1, 1}};
- auto infered_out_shape = onert::shape_inference::inferConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 30);
-
- param = operation::Conv2D::Param{Stride{3, 7}, Padding{PaddingType::SAME}, Activation::NONE,
- Dilation{1, 1}};
- infered_out_shape = onert::shape_inference::inferConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 30);
-
- param =
- operation::Conv2D::Param{Stride{3, 7}, Padding{4, 3, 2, 1}, Activation::NONE, Dilation{1, 1}};
- infered_out_shape = onert::shape_inference::inferConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 3);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 30);
-}
-
-TEST(ShapeInference, neg_Conv2D_InvalidStride)
-{
- Shape in_shape{10, 6, 12, 20};
- Shape ker_shape{30, 3, 6, 20};
-
- operation::Conv2D::Param param{Stride{0, 0}, Padding{PaddingType::VALID}, Activation::NONE,
- Dilation{1, 1}};
- ASSERT_THROW(onert::shape_inference::inferConv2DShape(in_shape, ker_shape, param),
- std::runtime_error);
-}
-
-TEST(ShapeInference, DepthwiseConv2D)
-{
- Shape in_shape{10, 6, 12, 20};
- Shape ker_shape{1, 3, 6, 60};
-
- operation::DepthwiseConv2D::Param param{Stride{3, 7}, Padding{PaddingType::VALID}, 3,
- Activation::NONE};
- auto infered_out_shape =
- onert::shape_inference::inferDepthwiseConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 1);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 60);
-
- param = operation::DepthwiseConv2D::Param{Stride{3, 7}, Padding{PaddingType::SAME}, 3,
- Activation::NONE};
- infered_out_shape = onert::shape_inference::inferDepthwiseConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 60);
-
- param = operation::DepthwiseConv2D::Param{Stride{3, 7}, Padding{4, 3, 2, 1}, 3, Activation::NONE};
- infered_out_shape = onert::shape_inference::inferDepthwiseConv2DShape(in_shape, ker_shape, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).N, 10);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).H, 3);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).W, 2);
- ASSERT_EQ(infered_out_shape.asFeature(Layout::NHWC).C, 60);
-}
-
-TEST(ShapeInference, neg_DepthwiseConv2D_InvalidSride)
-{
- Shape in_shape{10, 6, 12, 20};
- Shape ker_shape{1, 3, 6, 60};
-
- operation::DepthwiseConv2D::Param param{Stride{3, 0}, Padding{PaddingType::VALID}, 3,
- Activation::NONE};
- ASSERT_THROW(onert::shape_inference::inferDepthwiseConv2DShape(in_shape, ker_shape, param),
- std::runtime_error);
-}
-
-TEST(ShapeInference, Concat)
-{
- {
- Shape in1{10, 20, 30, 3, 50};
- Shape in2{10, 20, 30, 2, 50};
- Shape in3{10, 20, 30, 2, 50};
-
- operation::Concat::Param param{3};
- auto infered_out_shape = onert::shape_inference::inferConcatShape({in1, in2, in3}, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 5);
- ASSERT_EQ(infered_out_shape.dim(0), 10);
- ASSERT_EQ(infered_out_shape.dim(1), 20);
- ASSERT_EQ(infered_out_shape.dim(2), 30);
- ASSERT_EQ(infered_out_shape.dim(3), 7);
- ASSERT_EQ(infered_out_shape.dim(4), 50);
- }
- {
- // case 1. when axis < 0
- Shape in1{10, 20, 2};
- Shape in2{10, 20, 3};
-
- operation::Concat::Param param{-1};
- auto infered_out_shape = onert::shape_inference::inferConcatShape({in1, in2}, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 3);
- ASSERT_EQ(infered_out_shape.dim(0), 10);
- ASSERT_EQ(infered_out_shape.dim(1), 20);
- ASSERT_EQ(infered_out_shape.dim(2), 5);
- }
- {
- // case 2. when axis < 0
- Shape in1{2, 20, 2};
- Shape in2{3, 20, 2};
-
- operation::Concat::Param param{-3};
- auto infered_out_shape = onert::shape_inference::inferConcatShape({in1, in2}, param);
-
- ASSERT_EQ(infered_out_shape.rank(), 3);
- ASSERT_EQ(infered_out_shape.dim(0), 5);
- ASSERT_EQ(infered_out_shape.dim(1), 20);
- ASSERT_EQ(infered_out_shape.dim(2), 2);
- }
-}
-
-TEST(ShapeInference, neg_Concat)
-{
- {
- operation::Concat::Param param{2};
- Shape in1{10, 1, 3};
- Shape in2{10, 2, 4}; // dim[1] should be 1 but 2
-
- EXPECT_ANY_THROW(onert::shape_inference::inferConcatShape({in1, in2}, param));
- }
- { // wrong rank
- operation::Concat::Param param{2};
- Shape in1{10, 2, 3, 4};
- Shape in2{10, 2, 4}; // rank should be 4
-
- EXPECT_ANY_THROW(onert::shape_inference::inferConcatShape({in1, in2}, param));
- }
-}
-
-TEST(ShapeInference, ExpandDims)
-{
- Shape in_shape{30, 40};
-
- auto check = [&](int32_t axis, Shape &expected) {
- auto actual = onert::shape_inference::inferExpandDimsShape(in_shape, axis);
-
- ASSERT_EQ(actual.rank(), 3);
- for (int32_t dim = 0; dim < expected.rank(); dim++)
- ASSERT_EQ(actual.dim(dim), expected.dim(dim));
- };
-
- { // boundary
- int32_t axis = 0;
- Shape expected{1, 30, 40};
- check(axis, expected);
- }
- { // boundary
- int32_t axis = 2;
- Shape expected{30, 40, 1};
- check(axis, expected);
- }
- { // inside
- int32_t axis = 1;
- Shape expected{30, 1, 40};
- check(axis, expected);
- }
- { // negative boundary
- int32_t axis = -1;
- Shape expected{30, 40, 1};
- check(axis, expected);
- }
- { // negative boundary
- int32_t axis = -3;
- Shape expected{1, 30, 40};
- check(axis, expected);
- }
-}
-
-TEST(ShapeInference, neg_ExpandDims)
-{
- Shape in_shape{30, 40};
-
- { // over boundary
- int32_t axis = 3;
- ASSERT_THROW(onert::shape_inference::inferExpandDimsShape(in_shape, axis), std::runtime_error);
- }
- { // over boundary
- int32_t axis = -4;
- ASSERT_THROW(onert::shape_inference::inferExpandDimsShape(in_shape, axis), std::runtime_error);
- }
-}
-
-TEST(ShapeInference, FullyConnected)
-{
- Shape in_shape{3, 4, 5, 6};
- Shape ker_shape{3, 10};
- auto infered_out_shape = onert::shape_inference::inferFullyConnectedShape(in_shape, ker_shape);
-
- ASSERT_EQ(infered_out_shape.rank(), 2);
- ASSERT_EQ(infered_out_shape.dim(0), 36);
- ASSERT_EQ(infered_out_shape.dim(1), 3);
-}
-
-TEST(ShapeInference, Transpose)
-{
- auto check = [&](Shape &in_shape, std::vector<int> perm, Shape &expected) {
- // pre-conditions
- ASSERT_EQ(in_shape.rank(), perm.size());
- ASSERT_EQ(expected.rank(), perm.size());
- auto inferred_out_shape =
- onert::shape_inference::inferTransposeShape(in_shape, perm.data(), perm.size());
- // post-conditions
- ASSERT_EQ(inferred_out_shape.rank(), perm.size());
- for (int32_t dim = 0; dim < expected.rank(); dim++)
- {
- ASSERT_EQ(inferred_out_shape.dim(dim), expected.dim(dim));
- }
- };
- // check for 2-D
- {
- Shape in_shape{2, 3};
- std::vector<int> perm = {1, 0};
- Shape expected{3, 2};
- // int32_t rank = 2;
- check(in_shape, perm, expected);
- }
- // check for 3-D
- {
- Shape in_shape{1, 2, 3};
- std::vector<int> perm = {2, 0, 1};
- Shape expected{3, 1, 2};
- // int32_t rank = 3;
- check(in_shape, perm, expected);
- }
- // check for 4-D
- {
- Shape in_shape{1, 2, 3, 4};
- std::vector<int> perm = {1, 3, 0, 2};
- Shape expected{2, 4, 1, 3};
- // int32_t rank = 4;
- check(in_shape, perm, expected);
- }
-}
-
-TEST(ShapeInference, neg_Transpose)
-{
- Shape in_shape{1, 2, 3};
- // Invalid parameter size
- {
- std::vector<int> perm = {2, 0, 1, 0};
- // int32_t rank = 3;
- ASSERT_THROW(onert::shape_inference::inferTransposeShape(in_shape, perm.data(), perm.size()),
- std::runtime_error);
- }
- // Invalid parameter value
- {
- std::vector<int> perm = {2, 0, 3};
- // int32_t rank = 3;
- ASSERT_THROW(onert::shape_inference::inferTransposeShape(in_shape, perm.data(), perm.size()),
- std::runtime_error);
- }
-}
-
-TEST(ShapeInference, Gather)
-{
- auto check = [&](Shape &input, Shape &indices, Shape &expected, int32_t axis) {
- int rank = input.rank();
- auto actual = onert::shape_inference::inferGatherShape(input, indices, axis, rank);
-
- ASSERT_EQ(actual.rank(), expected.rank());
-
- for (int32_t dim = 0; dim < expected.rank(); dim++)
- ASSERT_EQ(actual.dim(dim), expected.dim(dim));
- };
-
- // check for 2-D, 3-D, axis 0
- {
- Shape input{3, 4};
- Shape indices{1, 1, 2};
- int32_t axis = 0;
- Shape expected{1, 1, 2, 4};
- check(input, indices, expected, axis);
- }
-
- // check for 2-D, 3-D, axis 1
- {
- Shape input{3, 4};
- Shape indices{1, 2, 1};
- int32_t axis = 1;
- Shape expected{3, 1, 2, 1};
- check(input, indices, expected, axis);
- }
-
- // check for 3-D, 2-D, axis 0
- {
- Shape input{2, 3, 4};
- Shape indices{1, 2};
- int32_t axis = 0;
- Shape expected{1, 2, 3, 4};
- check(input, indices, expected, axis);
- }
-
- // check for 3-D, 2-D, axis 2
- {
- Shape input{2, 3, 4};
- Shape indices{2, 1};
- int32_t axis = 2;
- Shape expected{2, 3, 2, 1};
- check(input, indices, expected, axis);
- }
-
- // check for 4D, axis 0
- {
- Shape input{1, 2, 3, 4};
- Shape indices{2};
- int32_t axis = 0;
- Shape expected{2, 2, 3, 4};
- check(input, indices, expected, axis);
- }
-}
-
-TEST(ShapeInference, BCQFullyConnected)
-{
- auto check = [&](Shape &in_shape, Shape &cluster_shape, std::vector<int> cluster,
- Shape &expected) {
- auto actual = onert::shape_inference::inferBCQFullyConnectedShape(in_shape, cluster_shape,
- cluster.data());
- ASSERT_EQ(actual.rank(), expected.rank());
-
- for (int32_t dim = 0; dim < expected.rank(); dim++)
- ASSERT_EQ(actual.dim(dim), expected.dim(dim));
- };
-
- {
- Shape in_shape{10, 1};
- Shape cluster_shape{3, 2};
- std::vector<int> cluster = {1, 10, 2, 10, 3, 10};
-
- Shape expected{30, 1};
- check(in_shape, cluster_shape, cluster, expected);
- }
-
- {
- Shape in_shape{1, 1};
- Shape cluster_shape{1, 2};
- std::vector<int> cluster = {3, 50};
-
- Shape expected{50, 1};
- check(in_shape, cluster_shape, cluster, expected);
- }
-}
-
-TEST(ShapeInference, BCQGather)
-{
- auto check = [&](Shape &indices_shape, Shape &cluster_shape, std::vector<int> cluster,
- uint32_t hidden_size, uint32_t axis, int rank, Shape &expected) {
- operation::BCQGather::Param param{hidden_size, axis};
- auto actual = onert::shape_inference::inferBCQGatherShape(indices_shape, cluster_shape,
- cluster.data(), rank, param);
- ASSERT_EQ(actual.rank(), expected.rank());
-
- for (int32_t dim = 0; dim < expected.rank(); dim++)
- ASSERT_EQ(actual.dim(dim), expected.dim(dim));
- };
-
- {
- Shape indices_shape{5, 1};
- Shape cluster_shape{3, 2};
- std::vector<int> cluster = {1, 10, 2, 10, 3, 10};
- uint32_t hidden_size = 10;
- uint32_t axis = 0;
- int rank = 2;
-
- Shape expected{5, 1, 10};
- check(indices_shape, cluster_shape, cluster, hidden_size, axis, rank, expected);
- }
-
- {
- Shape indices_shape{5, 1};
- Shape cluster_shape{3, 2};
- std::vector<int> cluster = {1, 10, 2, 10, 3, 10};
- uint32_t hidden_size = 10;
- uint32_t axis = 1;
- int rank = 2;
-
- Shape expected{30, 5, 1};
- check(indices_shape, cluster_shape, cluster, hidden_size, axis, rank, expected);
- }
-}