summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/core')
-rw-r--r--runtimes/neurun/core/CMakeLists.txt18
-rw-r--r--runtimes/neurun/core/include/backend/Backend.h65
-rw-r--r--runtimes/neurun/core/include/backend/CustomKernel.h82
-rw-r--r--runtimes/neurun/core/include/backend/CustomKernelRegistry.h50
-rw-r--r--runtimes/neurun/core/include/backend/ExecTime.h111
-rw-r--r--runtimes/neurun/core/include/backend/IConfig.h45
-rw-r--r--runtimes/neurun/core/include/backend/IConstantInitializer.h260
-rw-r--r--runtimes/neurun/core/include/backend/IKernelGenerator.h63
-rw-r--r--runtimes/neurun/core/include/backend/IMemoryManager.h49
-rw-r--r--runtimes/neurun/core/include/backend/IShapeFixer.h56
-rw-r--r--runtimes/neurun/core/include/backend/ITensorBuilder.h94
-rw-r--r--runtimes/neurun/core/include/backend/ITensorManager.h56
-rw-r--r--runtimes/neurun/core/include/backend/JSONExecTime.h96
-rw-r--r--runtimes/neurun/core/include/backend/operand/IObject.h42
-rw-r--r--runtimes/neurun/core/include/backend/operand/ITensor.h52
-rw-r--r--runtimes/neurun/core/include/backend/operand/Object.h57
-rw-r--r--runtimes/neurun/core/include/compiler/Compiler.h91
-rw-r--r--runtimes/neurun/core/include/compiler/IExecutionBuilder.h39
-rw-r--r--runtimes/neurun/core/include/compiler/SubTensorInfo.h83
-rw-r--r--runtimes/neurun/core/include/exec/Execution.h118
-rw-r--r--runtimes/neurun/core/include/exec/ExecutionObservers.h65
-rw-r--r--runtimes/neurun/core/include/exec/IExecutor.h72
-rw-r--r--runtimes/neurun/core/include/exec/IFunction.h37
-rw-r--r--runtimes/neurun/core/include/exec/IODescription.h64
-rw-r--r--runtimes/neurun/core/include/exec/NopFunction.h54
-rw-r--r--runtimes/neurun/core/include/graph/BackendSet.h40
-rw-r--r--runtimes/neurun/core/include/graph/Graph.h204
-rw-r--r--runtimes/neurun/core/include/graph/LowerInfoMap.h42
-rw-r--r--runtimes/neurun/core/include/graph/operand/LowerInfo.h93
-rw-r--r--runtimes/neurun/core/include/graph/operand/ParentInfo.h79
-rw-r--r--runtimes/neurun/core/include/graph/operand/PermuteFactor.h133
-rw-r--r--runtimes/neurun/core/include/graph/operation/LowerInfo.h54
-rw-r--r--runtimes/neurun/core/include/model/Data.h75
-rw-r--r--runtimes/neurun/core/include/model/DataType.h57
-rw-r--r--runtimes/neurun/core/include/model/Index.h42
-rw-r--r--runtimes/neurun/core/include/model/InternalType.h68
-rw-r--r--runtimes/neurun/core/include/model/Layout.h67
-rw-r--r--runtimes/neurun/core/include/model/Model.h40
-rw-r--r--runtimes/neurun/core/include/model/Operand.h121
-rw-r--r--runtimes/neurun/core/include/model/OperandConstraint.h61
-rw-r--r--runtimes/neurun/core/include/model/OperandIndexMap.h34
-rw-r--r--runtimes/neurun/core/include/model/OperandIndexSequence.h59
-rw-r--r--runtimes/neurun/core/include/model/OperandInfo.h98
-rw-r--r--runtimes/neurun/core/include/model/Operands.h39
-rw-r--r--runtimes/neurun/core/include/model/Operation.h88
-rw-r--r--runtimes/neurun/core/include/model/OperationIndexList.h52
-rw-r--r--runtimes/neurun/core/include/model/OperationIndexMap.h34
-rw-r--r--runtimes/neurun/core/include/model/OperationVisitor.h51
-rw-r--r--runtimes/neurun/core/include/model/Operations.Include.h75
-rw-r--r--runtimes/neurun/core/include/model/Operations.h36
-rw-r--r--runtimes/neurun/core/include/model/Operations.lst81
-rw-r--r--runtimes/neurun/core/include/model/Shape.h83
-rw-r--r--runtimes/neurun/core/include/model/Subgraph.h86
-rw-r--r--runtimes/neurun/core/include/model/Subgraphs.h81
-rw-r--r--runtimes/neurun/core/include/model/TypeInfo.h59
-rw-r--r--runtimes/neurun/core/include/model/operation/AbsNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/AddNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/ArgMaxNode.h61
-rw-r--r--runtimes/neurun/core/include/model/operation/AvgPool2DNode.h69
-rw-r--r--runtimes/neurun/core/include/model/operation/CastNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/ComparisonNode.h72
-rw-r--r--runtimes/neurun/core/include/model/operation/ConcatNode.h58
-rw-r--r--runtimes/neurun/core/include/model/operation/Conv2DNode.h68
-rw-r--r--runtimes/neurun/core/include/model/operation/CustomNode.h65
-rw-r--r--runtimes/neurun/core/include/model/operation/DepthToSpaceNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/DepthwiseConv2DNode.h69
-rw-r--r--runtimes/neurun/core/include/model/operation/DequantizeNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/DivNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/EmbeddingLookupNode.h50
-rw-r--r--runtimes/neurun/core/include/model/operation/ExpNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/FloorNode.h51
-rw-r--r--runtimes/neurun/core/include/model/operation/FullyConnectedNode.h66
-rw-r--r--runtimes/neurun/core/include/model/operation/GatherNode.h64
-rw-r--r--runtimes/neurun/core/include/model/operation/HashtableLookupNode.h57
-rw-r--r--runtimes/neurun/core/include/model/operation/L2NormalizationNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/L2Pool2DNode.h68
-rw-r--r--runtimes/neurun/core/include/model/operation/LSTMNode.h90
-rw-r--r--runtimes/neurun/core/include/model/operation/LocalResponseNormalizationNode.h66
-rw-r--r--runtimes/neurun/core/include/model/operation/LogicalAndNode.h50
-rw-r--r--runtimes/neurun/core/include/model/operation/LogicalNotNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/LogicalOrNode.h50
-rw-r--r--runtimes/neurun/core/include/model/operation/LogisticNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/MaxPool2DNode.h68
-rw-r--r--runtimes/neurun/core/include/model/operation/MeanNode.h62
-rw-r--r--runtimes/neurun/core/include/model/operation/MulNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/NegNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/PReLUNode.h50
-rw-r--r--runtimes/neurun/core/include/model/operation/PadNode.h51
-rw-r--r--runtimes/neurun/core/include/model/operation/PermuteNode.h78
-rw-r--r--runtimes/neurun/core/include/model/operation/RNNNode.h71
-rw-r--r--runtimes/neurun/core/include/model/operation/RSQRTNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/ReLU1Node.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/ReLU6Node.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/ReLUNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/ReduceMaxNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/ReduceMinNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/ReduceSumNode.h61
-rw-r--r--runtimes/neurun/core/include/model/operation/ReshapeNode.h51
-rw-r--r--runtimes/neurun/core/include/model/operation/ResizeBilinearNode.h64
-rw-r--r--runtimes/neurun/core/include/model/operation/SQRTNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/SoftmaxNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/SpaceToDepthNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/SplitNode.h58
-rw-r--r--runtimes/neurun/core/include/model/operation/SquaredDifferenceNode.h50
-rw-r--r--runtimes/neurun/core/include/model/operation/SqueezeNode.h59
-rw-r--r--runtimes/neurun/core/include/model/operation/StridedSliceNode.h68
-rw-r--r--runtimes/neurun/core/include/model/operation/SubNode.h63
-rw-r--r--runtimes/neurun/core/include/model/operation/TanhNode.h49
-rw-r--r--runtimes/neurun/core/include/model/operation/TopKV2Node.h69
-rw-r--r--runtimes/neurun/core/include/model/operation/TransposeConvNode.h67
-rw-r--r--runtimes/neurun/core/include/model/operation/TransposeNode.h66
-rw-r--r--runtimes/neurun/core/include/model/operation/UnpackNode.h58
-rw-r--r--runtimes/neurun/core/include/util/Config.lst40
-rw-r--r--runtimes/neurun/core/include/util/ConfigSource.h55
-rw-r--r--runtimes/neurun/core/include/util/Coordinates.h103
-rw-r--r--runtimes/neurun/core/include/util/GeneralConfigSource.h44
-rw-r--r--runtimes/neurun/core/include/util/IConfigSource.h46
-rw-r--r--runtimes/neurun/core/include/util/ITimer.h59
-rw-r--r--runtimes/neurun/core/include/util/Index.h158
-rw-r--r--runtimes/neurun/core/include/util/ObjectManager.h144
-rw-r--r--runtimes/neurun/core/include/util/Padding.h42
-rw-r--r--runtimes/neurun/core/include/util/Set.h166
-rw-r--r--runtimes/neurun/core/include/util/ShapeInference.h61
-rw-r--r--runtimes/neurun/core/include/util/Utils.h52
-rw-r--r--runtimes/neurun/core/include/util/feature/Coordinate4D.h111
-rw-r--r--runtimes/neurun/core/include/util/feature/nchw/View.h106
-rw-r--r--runtimes/neurun/core/include/util/feature/nhwc/Reader.h73
-rw-r--r--runtimes/neurun/core/include/util/feature/nhwc/Utils.h63
-rw-r--r--runtimes/neurun/core/include/util/feature/nhwc/View.h91
-rw-r--r--runtimes/neurun/core/include/util/logging.h61
-rw-r--r--runtimes/neurun/core/src/backend/Backend.cc30
-rw-r--r--runtimes/neurun/core/src/backend/BackendManager.cc124
-rw-r--r--runtimes/neurun/core/src/backend/BackendManager.h81
-rw-r--r--runtimes/neurun/core/src/backend/CustomKernel.cc97
-rw-r--r--runtimes/neurun/core/src/backend/CustomKernelRegistry.cc46
-rw-r--r--runtimes/neurun/core/src/backend/ExecTime.cc133
-rw-r--r--runtimes/neurun/core/src/backend/JSONExecTime.cc231
-rw-r--r--runtimes/neurun/core/src/compiler/BackendResolver.cc47
-rw-r--r--runtimes/neurun/core/src/compiler/BackendResolver.h102
-rw-r--r--runtimes/neurun/core/src/compiler/Compiler.cc122
-rw-r--r--runtimes/neurun/core/src/compiler/ExecutorFactory.cc351
-rw-r--r--runtimes/neurun/core/src/compiler/ExecutorFactory.h52
-rw-r--r--runtimes/neurun/core/src/compiler/HEScheduler.cc577
-rw-r--r--runtimes/neurun/core/src/compiler/HEScheduler.h164
-rw-r--r--runtimes/neurun/core/src/compiler/IScheduler.h38
-rw-r--r--runtimes/neurun/core/src/compiler/Linear.cc355
-rw-r--r--runtimes/neurun/core/src/compiler/Linear.h108
-rw-r--r--runtimes/neurun/core/src/compiler/ManualScheduler.cc111
-rw-r--r--runtimes/neurun/core/src/compiler/ManualScheduler.h36
-rw-r--r--runtimes/neurun/core/src/compiler/OperandContext.cc45
-rw-r--r--runtimes/neurun/core/src/compiler/OperandContext.h63
-rw-r--r--runtimes/neurun/core/src/compiler/OperationValidator.cc879
-rw-r--r--runtimes/neurun/core/src/compiler/OperationValidator.h77
-rw-r--r--runtimes/neurun/core/src/compiler/ParamChecker.cc33
-rw-r--r--runtimes/neurun/core/src/compiler/ParamChecker.h73
-rw-r--r--runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc78
-rw-r--r--runtimes/neurun/core/src/compiler/SubTensorAnalyzer.h70
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotBuilder.cc83
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotBuilder.h62
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotDumper.cc198
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotDumper.h60
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.cc56
-rw-r--r--runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.h59
-rw-r--r--runtimes/neurun/core/src/dumper/dot/Node.cc56
-rw-r--r--runtimes/neurun/core/src/dumper/dot/Node.h127
-rw-r--r--runtimes/neurun/core/src/dumper/dot/OperandNode.cc60
-rw-r--r--runtimes/neurun/core/src/dumper/dot/OperandNode.h79
-rw-r--r--runtimes/neurun/core/src/dumper/dot/OperationNode.cc47
-rw-r--r--runtimes/neurun/core/src/dumper/dot/OperationNode.h62
-rw-r--r--runtimes/neurun/core/src/exec/DataflowExecutor.cc206
-rw-r--r--runtimes/neurun/core/src/exec/DataflowExecutor.h110
-rw-r--r--runtimes/neurun/core/src/exec/Execution.cc108
-rw-r--r--runtimes/neurun/core/src/exec/ExecutionObservers.cc77
-rw-r--r--runtimes/neurun/core/src/exec/ExecutorBase.cc140
-rw-r--r--runtimes/neurun/core/src/exec/ExecutorBase.h124
-rw-r--r--runtimes/neurun/core/src/exec/FunctionSequence.cc62
-rw-r--r--runtimes/neurun/core/src/exec/FunctionSequence.h56
-rw-r--r--runtimes/neurun/core/src/exec/Job.cc36
-rw-r--r--runtimes/neurun/core/src/exec/Job.h77
-rw-r--r--runtimes/neurun/core/src/exec/LinearExecutor.cc27
-rw-r--r--runtimes/neurun/core/src/exec/LinearExecutor.h69
-rw-r--r--runtimes/neurun/core/src/exec/ParallelExecutor.cc140
-rw-r--r--runtimes/neurun/core/src/exec/ParallelExecutor.h73
-rw-r--r--runtimes/neurun/core/src/exec/ParallelScheduler.cc170
-rw-r--r--runtimes/neurun/core/src/exec/ParallelScheduler.h158
-rw-r--r--runtimes/neurun/core/src/exec/Sink.h182
-rw-r--r--runtimes/neurun/core/src/exec/Source.h187
-rw-r--r--runtimes/neurun/core/src/exec/interp/Buffer.h94
-rw-r--r--runtimes/neurun/core/src/exec/interp/ExecEnv.h168
-rw-r--r--runtimes/neurun/core/src/exec/interp/ExecManager.cc125
-rw-r--r--runtimes/neurun/core/src/exec/interp/ExecManager.h72
-rw-r--r--runtimes/neurun/core/src/exec/interp/Interpreter.cc202
-rw-r--r--runtimes/neurun/core/src/exec/interp/Interpreter.h67
-rw-r--r--runtimes/neurun/core/src/exec/interp/Registration.h52
-rw-r--r--runtimes/neurun/core/src/exec/interp/Tensor.cc54
-rw-r--r--runtimes/neurun/core/src/exec/interp/Tensor.h179
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/Add.cc146
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc129
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/Concat.cc152
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc154
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc159
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/FullyConnected.cc137
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc128
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h110
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/Reshape.cc66
-rw-r--r--runtimes/neurun/core/src/exec/interp/operations/SoftMax.cc164
-rw-r--r--runtimes/neurun/core/src/graph/Graph.cc589
-rw-r--r--runtimes/neurun/core/src/graph/dumper/Dumper.cc583
-rw-r--r--runtimes/neurun/core/src/graph/dumper/Dumper.h96
-rw-r--r--runtimes/neurun/core/src/graph/operand/LowerInfo.cc30
-rw-r--r--runtimes/neurun/core/src/graph/operand/Shape4DConvert.h57
-rw-r--r--runtimes/neurun/core/src/graph/operation/LowerInfo.cc34
-rw-r--r--runtimes/neurun/core/src/graph/pass/OperandPass.cc36
-rw-r--r--runtimes/neurun/core/src/graph/pass/OperandPass.h53
-rw-r--r--runtimes/neurun/core/src/graph/pass/OperationPass.cc36
-rw-r--r--runtimes/neurun/core/src/graph/pass/OperationPass.h71
-rw-r--r--runtimes/neurun/core/src/graph/pass/Pass.cc28
-rw-r--r--runtimes/neurun/core/src/graph/pass/Pass.h55
-rw-r--r--runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.cc199
-rw-r--r--runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.h87
-rw-r--r--runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc210
-rw-r--r--runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.h59
-rw-r--r--runtimes/neurun/core/src/graph/verifier/Verifier.cc97
-rw-r--r--runtimes/neurun/core/src/graph/verifier/Verifier.h68
-rw-r--r--runtimes/neurun/core/src/library_info.cc17
-rw-r--r--runtimes/neurun/core/src/model/LayoutSet.cc66
-rw-r--r--runtimes/neurun/core/src/model/LayoutSet.h58
-rw-r--r--runtimes/neurun/core/src/model/Operand.cc80
-rw-r--r--runtimes/neurun/core/src/model/OperandConstraint.cc28
-rw-r--r--runtimes/neurun/core/src/model/OperandIndexSequence.cc58
-rw-r--r--runtimes/neurun/core/src/model/Operation.cc57
-rw-r--r--runtimes/neurun/core/src/model/OperationIndexList.cc37
-rw-r--r--runtimes/neurun/core/src/model/Shape.cc86
-rw-r--r--runtimes/neurun/core/src/model/Subgraph.cc58
-rw-r--r--runtimes/neurun/core/src/model/Subgraphs.cc73
-rw-r--r--runtimes/neurun/core/src/model/TypeInfo.cc47
-rw-r--r--runtimes/neurun/core/src/model/operation/AbsNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/AddNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ArgMaxNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/AvgPool2DNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/CastNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ComparisonNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ConcatNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/Conv2DNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/CustomNode.cc47
-rw-r--r--runtimes/neurun/core/src/model/operation/DepthToSpaceNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/DepthwiseConv2DNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/DequantizeNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/DivNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/EmbeddingLookupNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ExpNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/FloorNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/FullyConnectedNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/GatherNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/HashtableLookupNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/L2NormalizationNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/L2Pool2DNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/LSTMNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/LocalResponseNormalizationNode.cc41
-rw-r--r--runtimes/neurun/core/src/model/operation/LogicalAndNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/LogicalNotNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/LogicalOrNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/LogisticNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/MaxPool2DNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/MeanNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/MulNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/NegNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/PReLUNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/PadNode.cc37
-rw-r--r--runtimes/neurun/core/src/model/operation/PermuteNode.cc45
-rw-r--r--runtimes/neurun/core/src/model/operation/RNNNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/RSQRTNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ReLU1Node.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ReLU6Node.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ReLUNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ReduceMaxNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ReduceMinNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ReduceSumNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/ReshapeNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/ResizeBilinearNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/SQRTNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/SoftmaxNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/SpaceToDepthNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/SplitNode.cc33
-rw-r--r--runtimes/neurun/core/src/model/operation/SquaredDifferenceNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/SqueezeNode.cc37
-rw-r--r--runtimes/neurun/core/src/model/operation/StridedSliceNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/SubNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/TanhNode.cc39
-rw-r--r--runtimes/neurun/core/src/model/operation/TopKV2Node.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/TransposeConvNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/TransposeNode.cc40
-rw-r--r--runtimes/neurun/core/src/model/operation/UnpackNode.cc33
-rw-r--r--runtimes/neurun/core/src/util/ConfigSource.cc115
-rw-r--r--runtimes/neurun/core/src/util/EnvConfigSource.cc40
-rw-r--r--runtimes/neurun/core/src/util/EnvConfigSource.h41
-rw-r--r--runtimes/neurun/core/src/util/GeneralConfigSource.cc45
-rw-r--r--runtimes/neurun/core/src/util/Padding.cc120
-rw-r--r--runtimes/neurun/core/src/util/ShapeInference.cc202
-rw-r--r--runtimes/neurun/core/src/util/Utils.cc68
300 files changed, 24179 insertions, 0 deletions
diff --git a/runtimes/neurun/core/CMakeLists.txt b/runtimes/neurun/core/CMakeLists.txt
new file mode 100644
index 000000000..d5449a987
--- /dev/null
+++ b/runtimes/neurun/core/CMakeLists.txt
@@ -0,0 +1,18 @@
+file(GLOB_RECURSE SOURCES "src/*.cc")
+
+add_library(neurun_core STATIC ${SOURCES})
+set_target_properties(neurun_core PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(neurun_core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
+target_include_directories(neurun_core PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
+target_link_libraries(neurun_core PUBLIC nnfw_lib_misc)
+target_link_libraries(neurun_core PUBLIC nnfw_lib_cpp14)
+target_link_libraries(neurun_core PRIVATE nnfw_lib_cker)
+target_link_libraries(neurun_core PRIVATE nnfw_common)
+target_link_libraries(neurun_core PRIVATE nnfw_coverage)
+target_link_libraries(neurun_core PRIVATE dl)
+
+if(ENVVAR_NEURUN_CONFIG)
+ target_compile_definitions(neurun_core PRIVATE ENVVAR_FOR_DEFAULT_CONFIG)
+endif(ENVVAR_NEURUN_CONFIG)
+
+target_link_libraries(neurun_core PUBLIC nnfw-header) # To be removed later
diff --git a/runtimes/neurun/core/include/backend/Backend.h b/runtimes/neurun/core/include/backend/Backend.h
new file mode 100644
index 000000000..e8bfac25c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/Backend.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_BACKEND_H__
+#define __NEURUN_BACKEND_BACKEND_H__
+
+#include <memory>
+
+#include "model/Operands.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class Backend;
+struct IConfig;
+class IConstantInitializer;
+class IKernelGenerator;
+class IShapeFixer;
+struct ITensorBuilder;
+
+namespace custom
+{
+class KernelRegistry;
+}
+
+class BackendContext
+{
+public:
+ const Backend *backend;
+ std::shared_ptr<ITensorBuilder> tensor_builder;
+ std::shared_ptr<IConstantInitializer> constant_initializer;
+ std::shared_ptr<IKernelGenerator> kernel_gen;
+ std::shared_ptr<IShapeFixer> shape_fixer;
+};
+
+class Backend
+{
+public:
+ virtual ~Backend() = default;
+ virtual std::shared_ptr<neurun::backend::IConfig> config() const = 0;
+
+ virtual std::unique_ptr<BackendContext>
+ newContext(const model::Operands &operands,
+ const std::shared_ptr<custom::KernelRegistry> &registry) const = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_BACKEND_H__
diff --git a/runtimes/neurun/core/include/backend/CustomKernel.h b/runtimes/neurun/core/include/backend/CustomKernel.h
new file mode 100644
index 000000000..db0c91e46
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/CustomKernel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CUSTOM_KERNEL_H__
+#define __NEURUN_BACKEND_CUSTOM_KERNEL_H__
+
+#include "nnfw_dev.h"
+
+#include "exec/IFunction.h"
+
+#include "misc/tensor/Shape.h"
+#include "model/DataType.h"
+
+#include <vector>
+
+namespace neurun
+{
+namespace backend
+{
+namespace custom
+{
+
+using Shape = nnfw::misc::tensor::Shape;
+
+struct TypeInfo
+{
+ Shape shape;
+ model::DataType dtype;
+};
+
+class Kernel : public ::neurun::exec::IFunction
+{
+public:
+ explicit Kernel(nnfw_custom_eval evalFunction);
+
+ nnfw_custom_kernel_params _params;
+ char *_userdata;
+ size_t _userdata_size;
+
+ nnfw_custom_eval _evalFunction;
+ // nnfw_custom_type_infer _type_infer_function; //Unused for now
+
+ struct CustomKernelConfigParams
+ {
+ std::vector<void *> input_allocations;
+ std::vector<TypeInfo> input_types;
+
+ std::vector<void *> output_allocations;
+ std::vector<TypeInfo> output_types;
+
+ char *userdata;
+ size_t userdata_size;
+ };
+
+ /**
+ * Fills _params field used later by user specified eval function
+ * @param inParams custom kernel parameters
+ */
+ virtual void configure(CustomKernelConfigParams &&inParams);
+
+ void run() override;
+ void runSync() override { run(); }
+};
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CUSTOM_KERNEL_H__
diff --git a/runtimes/neurun/core/include/backend/CustomKernelRegistry.h b/runtimes/neurun/core/include/backend/CustomKernelRegistry.h
new file mode 100644
index 000000000..3eb218e11
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/CustomKernelRegistry.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
+#define __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
+
+#include "CustomKernel.h"
+
+#include <unordered_map>
+#include <functional>
+#include <memory>
+
+#include <iostream>
+
+namespace neurun
+{
+namespace backend
+{
+
+namespace custom
+{
+
+class KernelRegistry
+{
+public:
+ void registerKernel(const std::string &id, nnfw_custom_eval evalFunction);
+ std::unique_ptr<Kernel> buildKernelForOp(const std::string &id);
+
+private:
+ std::unordered_map<std::string, nnfw_custom_eval> _storage;
+};
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
diff --git a/runtimes/neurun/core/include/backend/ExecTime.h b/runtimes/neurun/core/include/backend/ExecTime.h
new file mode 100644
index 000000000..4eaf49fab
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ExecTime.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_EXEC_TIME_H__
+#define __NEURUN_BACKEND_EXEC_TIME_H__
+
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+#include "JSONExecTime.h"
+#include <memory>
+#include <limits>
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+namespace neurun
+{
+namespace backend
+{
+class ExecTime
+{
+public:
+ explicit ExecTime(const std::vector<const Backend *> &backends) : _json(backends, _measurements)
+ {
+ }
+
+public:
+ /**
+ * @brief Get exec time of an operation with input size
+ * or linearly interpolated value based on size if there is no record for given size
+ *
+ * @param[in] backend id of a backend
+ * @param[in] operation name of an operation
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @return execution time for given input sizes
+ * -1 if there are no records for given parameters (backend, op, quantization).
+ */
+ int64_t getOperationExecTime(const Backend *backend, const std::string &operation, bool quant,
+ uint32_t op_size) const;
+ /**
+ * @brief Update exec time of the operation on a backend with given input size or
+ * add new entity if there is no one.
+ *
+ * @param[in] backend id of a backend
+ * @param[in] operation name of an operation
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @param[in] time real measured value
+ */
+ void updateOperationExecTime(const Backend *backend, const std::string &operation, bool quant,
+ uint32_t op_size, int64_t time);
+ /**
+ * @brief Get the permute time from one backend to another
+ *
+ * @param[in] from_backend
+ * @param[in] to_backend
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @return permutation time for operation size
+ */
+ int64_t getPermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size) const;
+ /**
+ * @brief Update permute time from one backend to another
+ *
+ * @param[in] from_backend
+ * @param[in] to_backend
+ * @param[in] quant if input type quantized
+ * @param[in] time measured permutation time
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ */
+ void updatePermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size, int64_t time);
+ /**
+ * @brief Get the max value of int32_t in int64_t
+ * @return max value
+ */
+ static int64_t getMax() { return _MAX; }
+ /**
+ * @brief Update metrics file with new data.
+ */
+ void uploadOperationsExecTime() const { _json.uploadOperationsExecTime(); }
+ static const int64_t NOT_FOUND = -1;
+
+private:
+ /// @brief Measurement data, which is shared with serializer
+ MeasurementData _measurements;
+ // int64_t::max may cause integer overflow
+ static const int64_t _MAX = std::numeric_limits<int32_t>::max();
+ /// @brief Serializer
+ JSON _json;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_EXEC_TIME_H__
diff --git a/runtimes/neurun/core/include/backend/IConfig.h b/runtimes/neurun/core/include/backend/IConfig.h
new file mode 100644
index 000000000..0e9572033
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IConfig.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ICONFIG_H__
+#define __NEURUN_BACKEND_ICONFIG_H__
+
+#include "util/ITimer.h"
+#include <memory>
+#include <string>
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IConfig
+{
+ virtual ~IConfig() = default;
+
+ virtual std::string id() = 0;
+ virtual void initialize() = 0;
+ // Support subtensor allocation
+ virtual bool SupportSubTensorAlloc() = 0;
+
+ // Timer is used for backend profiling. In case of default (nullptr) timer profiler won't work.
+ virtual std::unique_ptr<util::ITimer> timer() { return nullptr; }
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ICONFIG_H__
diff --git a/runtimes/neurun/core/include/backend/IConstantInitializer.h b/runtimes/neurun/core/include/backend/IConstantInitializer.h
new file mode 100644
index 000000000..8393e0fd8
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IConstantInitializer.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
+#define __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
+
+#include <unordered_map>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "model/Layout.h"
+#include "model/Operand.h"
+#include "model/Operands.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+
+namespace
+{
+template <typename T>
+static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const bool copy,
+ const neurun::model::Layout frontend_layout = neurun::model::Layout::UNKNOWN)
+{
+ const auto shape = model_obj.shape();
+ auto base = reinterpret_cast<const T *>(model_obj.data().base());
+
+ obj.access([&](::neurun::backend::operand::ITensor &tensor) {
+ switch (shape.rank())
+ {
+ case 1:
+ {
+ auto vec_size = shape.dim(0);
+ for (int32_t n = 0; n < vec_size; ++n)
+ {
+ const T *from = reinterpret_cast<const T *>(base) + n;
+ const auto value = *from;
+
+ T *into = reinterpret_cast<T *>(tensor.buffer()) + n;
+
+ *into = value;
+ }
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = shape.dim(1);
+
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ neurun::util::Coordinates coords{i, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords), base + i * copy_len,
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t width = shape.dim(1);
+ const int32_t copy_len = shape.dim(2);
+
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < shape.dim(1); ++j)
+ {
+ neurun::util::Coordinates coords{i, j, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords),
+ base + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ const int32_t height = shape.dim(1);
+ const int32_t width = shape.dim(2);
+ const int32_t copy_len = shape.dim(3);
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < shape.dim(1); ++j)
+ {
+ for (auto k = 0; k < shape.dim(2); ++k)
+ {
+ if (copy)
+ {
+ neurun::util::Coordinates coords{i, j, k, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords),
+ base + i * height * width * copy_len + j * width * copy_len + k * copy_len,
+ copy_len * sizeof(T));
+ }
+ else
+ {
+ for (auto l = 0; l < shape.dim(3); ++l)
+ {
+ const auto coords = neurun::util::convertCoordinates(
+ {i, j, k, l}, frontend_layout, tensor.layout());
+ T *into = reinterpret_cast<T *>(tensor.buffer() + tensor.calcOffset(coords));
+ T value = *(base + i * height * width * copy_len + j * width * copy_len +
+ k * copy_len + l);
+ *into = value;
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error{"Not yet supported"};
+ }
+ });
+}
+
+template <typename T>
+void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj)
+{
+ Init<T>(model_obj, obj, true);
+}
+
+template <typename T>
+void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const neurun::model::Layout frontend_layout)
+{
+ Init<T>(model_obj, obj, false, frontend_layout);
+}
+
+} // namespace
+
+namespace neurun
+{
+namespace backend
+{
+
+class IConstantInitializer : model::OperationVisitor
+{
+public:
+ virtual ~IConstantInitializer() = default;
+
+public:
+ virtual void run() = 0;
+
+public:
+ using Initializer = std::function<void(const model::Operand &, backend::operand::IObject &)>;
+
+ void generate(const model::Subgraph &subg, const model::Operands &operands)
+ {
+ _current_subg_layout = subg.getLayout();
+ subg.accept(*this);
+ for (const auto &e : subg.operations())
+ {
+ for (const auto &ind : e.node->getInputs())
+ {
+ const auto &obj = operands.at(ind);
+ if (obj.isConstant() && !exist(ind))
+ {
+ registerPermuteInitializer(ind, obj);
+ }
+ }
+ }
+ }
+
+protected:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */}
+#include "model/Operations.lst"
+#undef OP
+
+protected:
+ void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ {
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = copyInit<float>;
+ break;
+ case DataType::INT32:
+ _init_map[index] = copyInit<int32_t>;
+ break;
+ case DataType::UINT32:
+ _init_map[index] = copyInit<uint32_t>;
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = copyInit<uint8_t>;
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+ }
+
+protected:
+ void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ {
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+ using namespace std::placeholders;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = std::bind(permuteInit<float>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::INT32:
+ _init_map[index] = std::bind(permuteInit<int32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::UINT32:
+ _init_map[index] = std::bind(permuteInit<uint32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = std::bind(permuteInit<uint8_t>, _1, _2, _current_subg_layout);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+ }
+
+private:
+ bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
+
+protected:
+ std::unordered_map<model::OperandIndex, Initializer> _init_map;
+ model::Layout _current_subg_layout;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
diff --git a/runtimes/neurun/core/include/backend/IKernelGenerator.h b/runtimes/neurun/core/include/backend/IKernelGenerator.h
new file mode 100644
index 000000000..542a55338
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IKernelGenerator.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
+#define __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
+
+#include <memory>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "compiler/IExecutionBuilder.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class IKernelGenerator : public model::OperationVisitor
+{
+public:
+ virtual ~IKernelGenerator() = default;
+
+ void generate(const model::Operation &node, neurun::compiler::IExecutionBuilder *executionBuilder)
+ {
+ _execution_builder = executionBuilder;
+ node.accept(*this);
+ }
+
+protected:
+ using model::OperationVisitor::visit;
+
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override \
+ { \
+ throw std::runtime_error("NYI"); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+protected:
+ neurun::compiler::IExecutionBuilder *_execution_builder;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
diff --git a/runtimes/neurun/core/include/backend/IMemoryManager.h b/runtimes/neurun/core/include/backend/IMemoryManager.h
new file mode 100644
index 000000000..b06bab872
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IMemoryManager.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_IMEMORY_MANAGER_H__
+#define __NEURUN_BACKEND_IMEMORY_MANAGER_H__
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IMemoryManager
+{
+ virtual ~IMemoryManager() = default;
+
+ virtual void allocate(void) = 0;
+ virtual void deallocate(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using MemoryManagerSet = std::unordered_set<std::unique_ptr<backend::IMemoryManager>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_IMEMORY_MANAGER_H__
diff --git a/runtimes/neurun/core/include/backend/IShapeFixer.h b/runtimes/neurun/core/include/backend/IShapeFixer.h
new file mode 100644
index 000000000..ad137942c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IShapeFixer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ISHAPE_FIXER_H__
+#define __NEURUN_BACKEND_ISHAPE_FIXER_H__
+
+#include <memory>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class IShapeFixer : model::OperationVisitor
+{
+public:
+ virtual ~IShapeFixer() = default;
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
+
+protected:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override \
+ { \
+ throw std::runtime_error("NYI"); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+public:
+ void fix(const model::Operation &node) { node.accept(*this); }
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ISHAPE_FIXER_H__
diff --git a/runtimes/neurun/core/include/backend/ITensorBuilder.h b/runtimes/neurun/core/include/backend/ITensorBuilder.h
new file mode 100644
index 000000000..72079a2fb
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ITensorBuilder.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ITENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ITENSOR_BUILDER_H__
+
+#include <map>
+
+#include "model/Index.h"
+#include "model/OperandInfo.h"
+#include "model/Operation.h"
+#include "model/Layout.h"
+#include "operand/IObject.h"
+#include "operand/ITensor.h"
+#include "compiler/SubTensorInfo.h"
+#include "ITensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+struct ITensorBuilder
+{
+ using IterateFunction = std::function<void(const model::OperandIndex &)>;
+
+ virtual ~ITensorBuilder(void) = default;
+
+ // TODO Merge registerTensorInfo and registerSubTensorInfo using abstraction by internal class
+ /**
+ * @brief Register tensor information to allocate on backend
+ */
+ virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) = 0;
+ /**
+ * @brief Register subtensor information to allocate on backend
+ */
+ virtual void registerSubTensorInfo(const model::OperandIndex &,
+ const compiler::SubTensorInfo &) = 0;
+
+ virtual void notifyFirstUse(const model::OperandIndex &) = 0;
+ virtual void notifyLastUse(const model::OperandIndex &) = 0;
+
+ virtual void prepare(void) = 0;
+ // TODO Remove after all of apis appended land
+ virtual void allocate(void) = 0;
+
+ virtual void allocateConsts() = 0;
+ virtual void allocateNonconsts() = 0;
+ virtual void postFunctionPrepare() = 0;
+ virtual void finalize() = 0;
+
+ virtual std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const model::OperandIndex &ind) = 0;
+ virtual std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) = 0;
+ virtual void iterate(const IterateFunction &fn) = 0;
+
+ virtual void preVisit(const model::Operation &) = 0;
+ virtual void postVisit(const model::Operation &) = 0;
+
+ virtual std::unique_ptr<ITensorManager> releaseTensorManager(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using TensorBuilderSet = std::unordered_set<std::shared_ptr<backend::ITensorBuilder>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ITENSOR_BUILDER_H__
diff --git a/runtimes/neurun/core/include/backend/ITensorManager.h b/runtimes/neurun/core/include/backend/ITensorManager.h
new file mode 100644
index 000000000..74506ef59
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ITensorManager.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ITENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ITENSOR_MANAGER_H__
+
+namespace neurun
+{
+namespace backend
+{
+
+// NOTE This name ITensorManager has been discussed whether or not the name is proper.
+// Anyone can argue with any better name.
+/**
+ * @brief Interface as an abstract tensor manager which has MemoryManager
+ */
+struct ITensorManager
+{
+ virtual ~ITensorManager() = default;
+
+ virtual void allocateConsts(void) = 0;
+ virtual void allocateNonconsts(void) = 0;
+ virtual void deallocateConsts(void) = 0;
+ virtual void deallocateNonconsts(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using TensorManagerSet = std::unordered_set<std::unique_ptr<backend::ITensorManager>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ITENSOR_MANAGER_H__
diff --git a/runtimes/neurun/core/include/backend/JSONExecTime.h b/runtimes/neurun/core/include/backend/JSONExecTime.h
new file mode 100644
index 000000000..84505e10f
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/JSONExecTime.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_JSON_EXEC_TIME_H__
+#define __NEURUN_BACKEND_JSON_EXEC_TIME_H__
+
+#include <fstream>
+#include <unordered_map>
+#include <map>
+#include <vector>
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+/**
+ * @brief table, that contains execution time of an operation on some backend for different input
+ * sizes and transfer time from one backend to another for various input sizes (permutation time)
+ *
+ * backend -> op -> quant-> size --> time
+ * _measurements[Backend*]["string"][bool][uint32_t] = int64_t
+ */
+using MeasurementData = std::unordered_map<
+ const Backend *,
+ std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>;
+
+class JSON
+{
+public:
+ explicit JSON(const std::vector<const Backend *> &backends, MeasurementData &measurements)
+ : _measurement_file("exec_time.json"), _backends(), _measurements(measurements)
+ {
+ for (const auto b : backends)
+ {
+ _backends.emplace(b->config()->id(), b);
+ }
+ loadOperationsExecTime();
+ };
+ /**
+ * @brief Update _operations_exec_time_file with new data.
+ */
+ void uploadOperationsExecTime() const;
+
+private:
+ ///@brief file containing measurements
+ std::string _measurement_file;
+ std::unordered_map<std::string, const Backend *> _backends;
+ std::unordered_map<
+ const Backend *,
+ std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>
+ &_measurements;
+ /**
+ * @brief Helper function for inserting data to OperationExecTimes
+ *
+ * @param backend String name of backend
+ * @param operation String name of operation
+ * @param quant if input type quantized
+ * @param stream File stream
+ */
+ void readOperation(const std::string &backend, const std::string &operation, bool quant,
+ std::ifstream &stream);
+
+ /**
+ * @brief Helper function for writing OperationExecTimes to stream
+ *
+ * @param operation_info Map of operations execution information
+ * @param stream File stream
+ */
+ void printOperation(const std::map<uint32_t, int64_t> &operation_info,
+ std::ofstream &stream) const;
+ /**
+ * @brief Parse and load operations_exec_time from _operations_exec_time_file.
+ */
+ void loadOperationsExecTime();
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_JSON_EXEC_TIME_H__
diff --git a/runtimes/neurun/core/include/backend/operand/IObject.h b/runtimes/neurun/core/include/backend/operand/IObject.h
new file mode 100644
index 000000000..56eea34a8
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/IObject.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+#define __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+
+#include <functional>
+
+#include "ITensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+struct IObject
+{
+ virtual ~IObject() = default;
+ virtual operand::ITensor *ptr(void) const = 0;
+ virtual void access(const std::function<void(operand::ITensor &tensor)> &fn) const = 0;
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
diff --git a/runtimes/neurun/core/include/backend/operand/ITensor.h b/runtimes/neurun/core/include/backend/operand/ITensor.h
new file mode 100644
index 000000000..f762ad03c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/ITensor.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
+#define __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
+
+#include <cstring>
+#include <cstdint>
+
+#include "model/Layout.h"
+#include "util/Coordinates.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+class ITensor
+{
+public:
+ virtual ~ITensor() = default;
+
+public:
+ virtual uint8_t *buffer() const = 0;
+ virtual size_t total_size() const = 0;
+ virtual size_t dimension(size_t index) const = 0;
+ virtual size_t num_dimensions() const = 0;
+ virtual size_t calcOffset(const neurun::util::Coordinates &coords) const = 0;
+ virtual model::Layout layout() const = 0;
+ virtual bool has_padding() const = 0;
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
diff --git a/runtimes/neurun/core/include/backend/operand/Object.h b/runtimes/neurun/core/include/backend/operand/Object.h
new file mode 100644
index 000000000..e6f6d926d
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/Object.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_OBJECT_H__
+#define __NEURUN_BACKEND_OPERAND_OBJECT_H__
+
+#include <memory>
+#include "ITensor.h"
+
+#include "IObject.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+class Object : public IObject
+{
+public:
+ Object() = default;
+
+public:
+ Object(const std::shared_ptr<ITensor> &tensor) : _tensor{tensor}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ITensor *ptr(void) const override { return _tensor.get(); }
+
+private:
+ std::shared_ptr<ITensor> _tensor;
+
+public:
+ void access(const std::function<void(ITensor &tensor)> &fn) const override { fn(*_tensor); }
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/core/include/compiler/Compiler.h b/runtimes/neurun/core/include/compiler/Compiler.h
new file mode 100644
index 000000000..094ffe853
--- /dev/null
+++ b/runtimes/neurun/core/include/compiler/Compiler.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Compiler.h
+ * @brief This file contains Compiler class to define and run compilation phase
+ */
+
+#ifndef __NEURUN_COMPILER_COMPILE_H_
+#define __NEURUN_COMPILER_COMPILE_H_
+
+#include "graph/Graph.h"
+#include "exec/IExecutor.h"
+
+namespace neurun
+{
+
+namespace compiler
+{
+
+enum class State
+{
+ CREATED, // Before compilation
+ STARTED, // Compile is started
+ LOWERED, // Backend is decided
+ COMPILED // Success compilation
+};
+
+/**
+ * @brief Class to compile graph model
+ */
+class Compiler
+{
+public:
+ /**
+ * @brief Construct a new Compiler object
+ * @param[in] model Graph model
+ */
+ Compiler(const std::shared_ptr<graph::Graph> &graph)
+ : _graph{graph}, _executor{nullptr}, _state{State::CREATED}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Run compilation. Compilation result will be saved in _plan
+ */
+ void compile(void);
+ /**
+ * @brief Pass plan reference
+ * @param[out] plan Plan reference to return\n
+ * Set nullptr if compile is not run yet
+ */
+ void release(std::shared_ptr<exec::IExecutor> &executor) { executor = _executor; }
+
+ void state(State state) { _state = state; }
+ State state(void) const { return _state; }
+
+ /**
+ * @brief Check if model can compile
+ * @return @c true if model can compile, otherwise @c false
+ * @note This method don't check model correctness,\n
+ * so model verification should be done before calling this method
+ */
+ bool checkCompilable();
+
+private:
+ std::shared_ptr<graph::Graph> _graph;
+ std::shared_ptr<exec::IExecutor> _executor;
+ State _state;
+};
+
+} // namespace compiler
+
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_COMPILE_H_
diff --git a/runtimes/neurun/core/include/compiler/IExecutionBuilder.h b/runtimes/neurun/core/include/compiler/IExecutionBuilder.h
new file mode 100644
index 000000000..c5a06fec0
--- /dev/null
+++ b/runtimes/neurun/core/include/compiler/IExecutionBuilder.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_I_EXECUTION_BUILDER_H__
+#define __NEURUN_COMPILER_I_EXECUTION_BUILDER_H__
+
+#include <memory>
+
+#include "exec/IFunction.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+struct IExecutionBuilder
+{
+ virtual ~IExecutionBuilder() = default;
+
+ virtual void append(std::unique_ptr<::neurun::exec::IFunction> &&f) = 0;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_I_EXECUTION_BUILDER_H__
diff --git a/runtimes/neurun/core/include/compiler/SubTensorInfo.h b/runtimes/neurun/core/include/compiler/SubTensorInfo.h
new file mode 100644
index 000000000..92b2759ea
--- /dev/null
+++ b/runtimes/neurun/core/include/compiler/SubTensorInfo.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file SubTensorInfo.h
+ * @brief This file contains SubTensorInfo to represent subsumption between tensors
+ * for backend tensor allocation
+ */
+#ifndef __NEURUN_COMPILER_SUBTENSOR_INFO_H__
+#define __NEURUN_COMPILER_SUBTENSOR_INFO_H__
+
+#include "model/Operand.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+/**
+ * @brief Class to represent information of subtensor
+ */
+class SubTensorInfo
+{
+public:
+ SubTensorInfo() = delete;
+
+ /**
+ * @brief Construct a new SubTensorInfo object
+ * @param[in] obj SubTensor object
+ */
+ SubTensorInfo(const model::Operand &obj)
+ : _parent{obj.parent_info()->parent()}, _shape{obj.shape()}, _type{obj.typeInfo()},
+ _offset{obj.parent_info()->offset()}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return parent tensor index
+ * @return Parent tensor index
+ */
+ const model::OperandIndex parent(void) const { return _parent; }
+ /**
+ * @brief Return tensor shape
+ * @return Tensor shape
+ */
+ const model::Shape shape(void) const { return _shape; }
+ /**
+ * @brief Return tensor type
+ * @return Tensor type
+ */
+ const model::TypeInfo type(void) const { return _type; }
+ /**
+ * @brief Return tensor's offset in parent tensor
+ * @return Tensor offset
+ */
+ const neurun::util::Coordinates offset(void) const { return _offset; }
+
+private:
+ const model::OperandIndex _parent;
+ const model::Shape _shape;
+ const model::TypeInfo _type;
+ const neurun::util::Coordinates _offset;
+};
+
+} // compiler
+} // neurun
+
+#endif // __NEURUN_COMPILER_SUBTENSOR_INFO_H__
diff --git a/runtimes/neurun/core/include/exec/Execution.h b/runtimes/neurun/core/include/exec/Execution.h
new file mode 100644
index 000000000..a5b47f039
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/Execution.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Execution.h
+ * @brief This file defines execution
+ */
+#ifndef __NEURUN_EXEC_EXECUTION_H__
+#define __NEURUN_EXEC_EXECUTION_H__
+
+#include "exec/IExecutor.h"
+#include "IODescription.h"
+
+#include <thread>
+
+namespace neurun
+{
+namespace exec
+{
+
+/**
+ * @brief Class to define execution instance to collect input/output information for inference
+ * and prepare executor run (TODO)
+ */
+class Execution
+{
+
+public:
+ /**
+ * @brief Construct a new Execution object
+ * @param[in] executor Model executor
+ */
+ Execution(const std::shared_ptr<IExecutor> &executor);
+
+public:
+ /**
+ * @brief Returns model object
+ * @return Model object
+ */
+ const model::Model &model() const { return _executor->model(); }
+ /**
+ * @brief Set input data's information
+ * @param[in] index Input index
+ * @param[in] buffer Input data's buffer pointer
+ * @param[in] length Input data's length
+ */
+ void setInput(const model::IOIndex &index, const void *buffer, size_t length);
+ /**
+ * @brief Set input data's information, especially to specify unknown dimensions on model
+ * build time.
+ * @param[in] index Input index
+ * @param[in] type Input data's type info
+ * @param[in] shape Input data's shape
+ * @param[in] buffer Input data's buffer pointer
+ * @param[in] length Input data's length
+ */
+ void setInput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape,
+ const void *buffer, size_t length);
+ /**
+ * @brief Set output data's information
+ * @param[in] index Output index
+ * @param[in] buffer Output data's buffer pointer
+ * @param[in] length Output data's length
+ */
+ void setOutput(const model::IOIndex &index, void *buffer, size_t length);
+ /**
+ * @brief Set output data's information, especially to specify unknown dimensions on model
+ * build time.
+ * @param[in] index Output index
+ * @param[in] type Output data's type info
+ * @param[in] shape Output data's shape
+ * @param[in] buffer Output data's buffer pointer
+ * @param[in] length Output data's length
+ */
+ void setOutput(const model::IOIndex &index, const model::TypeInfo &type,
+ const model::Shape &shape, void *buffer, size_t length);
+ /**
+ * @brief Execution
+ * @note It should be called after setting input and output buffer
+ */
+ void execute();
+
+ /**
+ * @brief Start asynchronous execution
+ * @note It returns after execution thread is started
+ * It should be called after setting input and output buffer
+ */
+ void startExecute(void);
+
+ /**
+ * @brief Return when execution is finished
+ * @note It waits until execution is finished
+ */
+ void waitFinish(void);
+
+private:
+ const std::shared_ptr<IExecutor> _executor;
+ IODescription _io_desc;
+ std::unique_ptr<std::thread> _exec_thread;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_EXECUTION_H__
diff --git a/runtimes/neurun/core/include/exec/ExecutionObservers.h b/runtimes/neurun/core/include/exec/ExecutionObservers.h
new file mode 100644
index 000000000..61c8bf1b2
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/ExecutionObservers.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_OBSREVERS_H__
+#define __NEURUN_EXEC_OBSREVERS_H__
+
+#include "exec/IFunction.h"
+#include "model/Operation.h"
+#include "backend/ExecTime.h"
+#include "util/ITimer.h"
+#include "IExecutor.h"
+
+namespace neurun
+{
+namespace exec
+{
+class IExecutionObserver
+{
+public:
+ /// @brief Invoked just before model (not individual operation) execution begins
+ virtual void handleBegin(IExecutor *) { return; }
+
+ virtual void handleBegin(IExecutor *, const model::Operation *, const backend::Backend *) = 0;
+ virtual void handleEnd(IExecutor *, const model::Operation *, const backend::Backend *) = 0;
+
+ /// @brief Invoked just after model (not individual operation) execution ends
+ virtual void handleEnd(IExecutor *) { return; }
+
+ virtual ~IExecutionObserver() = default;
+};
+
+class ProfileObserver : public IExecutionObserver
+{
+public:
+ explicit ProfileObserver(std::shared_ptr<backend::ExecTime> et) : _et(std::move(et)) {}
+ void handleBegin(IExecutor *, const model::Operation *, const backend::Backend *) override;
+ void handleEnd(IExecutor *, const model::Operation *, const backend::Backend *) override;
+
+ void handleEnd(IExecutor *) override { uploadExecTime(); }
+
+private:
+ void uploadExecTime() { _et->uploadOperationsExecTime(); }
+
+private:
+ std::unique_ptr<util::ITimer> _timer;
+ std::shared_ptr<backend::ExecTime> _et;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_OBSREVERS_H__
diff --git a/runtimes/neurun/core/include/exec/IExecutor.h b/runtimes/neurun/core/include/exec/IExecutor.h
new file mode 100644
index 000000000..eb4f5e302
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/IExecutor.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file IExecutor.h
+ * @brief This file defines interface of Executor
+ */
+#ifndef __NEURUN_EXEC_I_EXECUTOR_H_
+#define __NEURUN_EXEC_I_EXECUTOR_H_
+
+#include "model/Model.h"
+#include "IFunction.h"
+#include "IODescription.h"
+#include "model/OperationIndexMap.h"
+
+namespace neurun
+{
+namespace exec
+{
+class IExecutionObserver;
+/**
+ * @brief Struct to define interface of Executor
+ */
+struct IExecutor
+{
+ /**
+ * @brief Construct a new IExecutor object
+ */
+ IExecutor() = default;
+ /**
+ * @brief Destroy the IExecutor object
+ */
+ virtual ~IExecutor() = default;
+
+ /**
+ * @brief Returns model object
+ *
+ * @return Model object
+ */
+ virtual const model::Model &model() = 0;
+
+ /**
+ * @brief Set an ordering on operations
+ * @param[in] ranks The table encoding the ordering
+ */
+ virtual void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>>) = 0;
+
+ /**
+ * @brief Start execution
+ * @param[in] desc Input and output description
+ * @note This method should be thread-safe
+ */
+ virtual void execute(const IODescription &desc) = 0;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_I_EXECUTOR_H_
diff --git a/runtimes/neurun/core/include/exec/IFunction.h b/runtimes/neurun/core/include/exec/IFunction.h
new file mode 100644
index 000000000..5cc29ea75
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/IFunction.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_I_FUNCTION_H__
+#define __NEURUN_EXEC_I_FUNCTION_H__
+
+namespace neurun
+{
+namespace exec
+{
+
+class IFunction
+{
+public:
+ virtual ~IFunction() = default;
+ virtual void run() = 0;
+ virtual void runSync() = 0;
+ virtual void prepare() {}
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_I_FUNCTION_H__
diff --git a/runtimes/neurun/core/include/exec/IODescription.h b/runtimes/neurun/core/include/exec/IODescription.h
new file mode 100644
index 000000000..4809f34fe
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/IODescription.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_IO_DESCRIPTION_H__
+#define __NEURUN_EXEC_IO_DESCRIPTION_H__
+
+#include <vector>
+
+#include "model/OperandInfo.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+struct InputDesc
+{
+ const model::OperandInfo info;
+ const void *buffer;
+ const size_t size;
+
+ InputDesc(void) = delete;
+ InputDesc(const model::OperandInfo &info, const void *buffer, const size_t size)
+ : info(info), buffer(buffer), size(size)
+ {
+ }
+};
+
+struct OutputDesc
+{
+ const model::OperandInfo info;
+ void *buffer;
+ const size_t size;
+
+ OutputDesc(void) = delete;
+ OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size)
+ : info(info), buffer(buffer), size(size)
+ {
+ }
+};
+
+struct IODescription
+{
+ std::vector<std::unique_ptr<InputDesc>> inputs;
+ std::vector<std::unique_ptr<OutputDesc>> outputs;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_IO_DESCRIPTION_H__
diff --git a/runtimes/neurun/core/include/exec/NopFunction.h b/runtimes/neurun/core/include/exec/NopFunction.h
new file mode 100644
index 000000000..5cbd7e5ce
--- /dev/null
+++ b/runtimes/neurun/core/include/exec/NopFunction.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file NopFunction.h
+ * @brief This file defines NopFunction
+ */
+#ifndef __NEURUN_EXEC_NOP_FUNCTION_H_
+#define __NEURUN_EXEC_NOP_FUNCTION_H_
+
+#include "IFunction.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+/**
+ * @brief A derivative of IFunction tha does nothing
+ *
+ */
+class NopFunction : public IFunction
+{
+public:
+ NopFunction() = default;
+ void run() override
+ {
+ // DO NOTHING
+ }
+ void runSync() override
+ {
+ // this abstract method is used just for profiling and called for
+ // backend::acl_common::AclFunction
+ run();
+ }
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_NOP_FUNCTION_H_
diff --git a/runtimes/neurun/core/include/graph/BackendSet.h b/runtimes/neurun/core/include/graph/BackendSet.h
new file mode 100644
index 000000000..a3a508697
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/BackendSet.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_BACKEND_SET_H__
+#define __NEURUN_GRAPH_OPERAND_BACKEND_SET_H__
+
+#include "util/Set.h"
+
+namespace neurun
+{
+namespace backend
+{
+class Backend;
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+using BackendSet = util::Set<const backend::Backend *>;
+
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_BACKEND_SET_H__
diff --git a/runtimes/neurun/core/include/graph/Graph.h b/runtimes/neurun/core/include/graph/Graph.h
new file mode 100644
index 000000000..b3e6d54ff
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/Graph.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_GRAPH_H__
+#define __NEURUN_GRAPH_GRAPH_H__
+
+#include <functional>
+
+#include "model/Operation.h"
+#include "model/Model.h"
+#include "graph/LowerInfoMap.h"
+#include "model/Subgraph.h"
+#include "model/Subgraphs.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+class LowerInfo;
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+class Linear;
+} // namespace linear
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+class BackendResolver;
+} // namespace compiler
+} // namespace neurun
+
+namespace neurun
+{
+namespace backend
+{
+namespace custom
+{
+class KernelRegistry;
+} // namespace neurun
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+class Graph
+{
+private:
+ enum class Phase
+ {
+ BUILDING,
+ MODEL
+ };
+
+public:
+ template <bool is_const> class Iterator
+ {
+ public:
+ using GraphRef = typename std::conditional<is_const, const Graph &, Graph &>::type;
+ using IndexRef = const model::OperationIndex &;
+ using NodeRef =
+ typename std::conditional<is_const, const model::Operation &, model::Operation &>::type;
+ using IterFn = std::function<void(IndexRef, NodeRef)>;
+
+ public:
+ virtual ~Iterator() = default;
+ virtual void iterate(GraphRef graph, const IterFn &fn) const = 0;
+ };
+
+ template <bool is_const = false> class DefaultIterator final : public Iterator<is_const>
+ {
+ public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using IndexRef = typename Iterator<is_const>::IndexRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+ public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+ };
+ using DefaultConstIterator = DefaultIterator<true>;
+
+ template <bool is_const = false> class PostDfsIterator final : public Iterator<is_const>
+ {
+ public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using IndexRef = typename Iterator<is_const>::IndexRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+ public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+ };
+ using PostDfsConstIterator = PostDfsIterator<true>;
+
+public:
+ Graph(void) = delete;
+ Graph(std::unique_ptr<model::Model> &&model);
+ ~Graph(void);
+
+ // Graph Building
+public:
+ model::OperandIndex addOperand(const model::Shape &shape, const model::TypeInfo &type);
+ model::OperationIndex addOperation(std::unique_ptr<model::Operation> &&node);
+ void setOperandValue(const model::OperandIndex &ind, std::unique_ptr<model::Data> &&data);
+ void addInput(const model::OperandIndex &ind);
+ void addOutput(const model::OperandIndex &ind);
+ void finishBuilding(void);
+ void lower(void);
+ void removeOperand(const model::OperandIndex &ind) { _model->operands.remove(ind); }
+ std::unique_ptr<compiler::Linear> linearize(void);
+ bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; }
+ std::shared_ptr<const model::Model> shareModel() { return _model; }
+ std::unique_ptr<graph::LowerInfoMap> releaseLowerInfo() { return std::move(_lower_info_map); }
+ std::unique_ptr<model::Subgraphs> releaseSubgraphs() { return std::move(_subgraphs); }
+
+private:
+ void initializeUseDef();
+
+ // Custom operations support
+public:
+ void bindKernelRegistry(const std::shared_ptr<backend::custom::KernelRegistry> &registry)
+ {
+ _kernel_registry = registry;
+ }
+
+ const std::shared_ptr<backend::custom::KernelRegistry> &getKernelRegistry() const
+ {
+ return _kernel_registry;
+ }
+
+private:
+ std::shared_ptr<backend::custom::KernelRegistry> _kernel_registry;
+
+ // Accessors
+public:
+ const model::OperandIndexSequence &getInputs() const { return _model->inputs; }
+ model::OperandIndexSequence &getInputs() { return _model->inputs; }
+ const model::OperandIndexSequence &getOutputs() const { return _model->outputs; }
+ model::OperandIndexSequence &getOutputs() { return _model->outputs; }
+ const model::Operands &operands() const { return _model->operands; }
+ model::Operands &operands() { return _model->operands; } // TODO Remove this non-const accessor
+ const model::Operations &operations() const { return _model->operations; }
+ model::Operations &operations() { return _model->operations; }
+ const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); }
+
+private:
+ Phase _phase{Phase::BUILDING};
+ std::shared_ptr<model::Model> _model;
+
+ // For LOWERED phase
+public:
+ const operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &subg_index) const;
+ void setLowerInfo(const model::SubgraphIndex &subg_index,
+ std::unique_ptr<operation::LowerInfo> &&lower_info);
+ const operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const;
+ operand::LowerInfo *getLowerInfo(const model::OperandIndex &index);
+ void setLowerInfo(const model::OperandIndex &index,
+ std::unique_ptr<operand::LowerInfo> &&lower_info);
+ model::Subgraphs &subgraphs()
+ {
+ assert(_subgraphs);
+ return *_subgraphs;
+ }
+ const model::Subgraphs *subgraphs() const { return _subgraphs.get(); }
+ void setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br);
+ std::unique_ptr<compiler::BackendResolver> releaseBackendResolver();
+
+private:
+ std::unique_ptr<compiler::BackendResolver> _backend_resolver;
+ std::unique_ptr<LowerInfoMap> _lower_info_map;
+ // Pass(for Perm) can accept only graph so that Graph has Subgraphs as a member
+ std::unique_ptr<model::Subgraphs> _subgraphs;
+};
+
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_GRAPH_H__
diff --git a/runtimes/neurun/core/include/graph/LowerInfoMap.h b/runtimes/neurun/core/include/graph/LowerInfoMap.h
new file mode 100644
index 000000000..5b755ead3
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/LowerInfoMap.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_LOWER_INFO_MAP_H__
+#define __NEURUN_GRAPH_LOWER_INFO_MAP_H__
+
+#include <memory>
+#include <unordered_map>
+
+#include "graph/operand/LowerInfo.h"
+#include "graph/operation/LowerInfo.h"
+#include "model/OperandIndexMap.h"
+#include "model/Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+
+struct LowerInfoMap
+{
+ std::unordered_map<model::SubgraphIndex, std::unique_ptr<operation::LowerInfo>> operation;
+ model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operand;
+};
+
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_LOWER_INFO_MAP_H__
diff --git a/runtimes/neurun/core/include/graph/operand/LowerInfo.h b/runtimes/neurun/core/include/graph/operand/LowerInfo.h
new file mode 100644
index 000000000..3558f6cc2
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/operand/LowerInfo.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_LOWER_INFO_H__
+#define __NEURUN_GRAPH_OPERAND_LOWER_INFO_H__
+
+#include <functional>
+#include <stdint.h>
+
+#include "graph/operand/PermuteFactor.h"
+#include "util/Set.h"
+
+namespace neurun
+{
+namespace backend
+{
+class Backend;
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+using PermuteFactorSet = util::Set<PermuteFactor>;
+
+class LowerInfo
+{
+public:
+ class Shape4D
+ {
+ public:
+ Shape4D(uint32_t n, uint32_t h, uint32_t w, uint32_t c) : _n{n}, _h{h}, _w{w}, _c{c}
+ {
+ // DO NOTHING
+ }
+
+ public:
+ uint32_t n(void) const { return _n; }
+ uint32_t h(void) const { return _h; }
+ uint32_t w(void) const { return _w; }
+ uint32_t c(void) const { return _c; }
+
+ private:
+ uint32_t _n;
+ uint32_t _h;
+ uint32_t _w;
+ uint32_t _c;
+ };
+
+public:
+ LowerInfo(const Shape4D &shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Shape4D &shape(void) const { return _shape; }
+ const PermuteFactorSet &def_factors(void) const { return _def_factors; }
+ const PermuteFactorSet &use_factors(void) const { return _use_factors; }
+
+public:
+ void addDefPermuteFactor(const PermuteFactor &factor) { _def_factors.add(factor); }
+ void addUsePermuteFactor(const PermuteFactor &factor) { _use_factors.add(factor); }
+ void removeDefPermuteFactor(const PermuteFactor &factor) { _def_factors.remove(factor); }
+ void removeUsePermuteFactor(const PermuteFactor &factor) { _use_factors.remove(factor); }
+
+private:
+ Shape4D _shape;
+ PermuteFactorSet _def_factors;
+ PermuteFactorSet _use_factors;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_LOWED_INFO_H__
diff --git a/runtimes/neurun/core/include/graph/operand/ParentInfo.h b/runtimes/neurun/core/include/graph/operand/ParentInfo.h
new file mode 100644
index 000000000..024925d90
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/operand/ParentInfo.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ParentInfo.h
+ * @brief This file contains ParentInfo class and internal Coordinate4D class
+ * to represent subsumption between operand
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_PARENT_INFO_H__
+#define __NEURUN_GRAPH_OPERAND_PARENT_INFO_H__
+
+#include <stdint.h>
+
+#include "model/Index.h"
+#include "util/Coordinates.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+using neurun::util::Coordinates;
+
+/**
+ * @brief Class to represent parent operand in child operand
+ */
+class ParentInfo
+{
+public:
+ /**
+ * @brief Construct a new ParentInfo object
+ * @param[in] parent Index of parent operand
+ * @param[in] coordinate Offset of child operand in parent operand
+ * @return
+ */
+ ParentInfo(const model::OperandIndex parent, const Coordinates &coordinate)
+ : _parent{parent}, _coordinate{coordinate}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return parent index
+ * @return Parent index
+ */
+ model::OperandIndex parent(void) const { return _parent; }
+ /**
+ * @brief Retern offset in parent
+ * @return Offset
+ */
+ Coordinates offset(void) const { return _coordinate; }
+
+private:
+ model::OperandIndex _parent;
+ Coordinates _coordinate;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_PARENT_INFO_H__
diff --git a/runtimes/neurun/core/include/graph/operand/PermuteFactor.h b/runtimes/neurun/core/include/graph/operand/PermuteFactor.h
new file mode 100644
index 000000000..480e95c15
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/operand/PermuteFactor.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file PermuteFactor.h
+ * @brief This file contains neurun::graph::operand::PermuteFactor class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_PERMUTE_FACTOR_H__
+#define __NEURUN_GRAPH_OPERAND_PERMUTE_FACTOR_H__
+
+#include <functional>
+
+#include "model/Layout.h"
+
+namespace neurun
+{
+namespace backend
+{
+class Backend;
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+/**
+ * @brief Class that has factors of permutation
+ */
+class PermuteFactor
+{
+public:
+ /**
+ * @brief Construct PermuteFactor object.
+ * @param backend The backend factor
+ * @param backend The layout factor
+ */
+ PermuteFactor(const backend::Backend *backend, model::Layout layout)
+ : _backend{backend}, _layout{layout}
+ {
+ // DO NOTHING
+ }
+ /**
+ * @brief Construct PermuteFactor object by copy semantics.
+ */
+ PermuteFactor(const PermuteFactor &f) : _backend{f._backend}, _layout{f._layout}
+ {
+ // DO NOTHING
+ }
+ /**
+ * @brief Construct PermuteFactor object by move semantics.
+ */
+ PermuteFactor(PermuteFactor &&) = default;
+
+public:
+ /**
+ * @brief Get backend
+ *
+ * @return Backend factor
+ */
+ const backend::Backend *backend() const { return _backend; }
+ /**
+ * @brief Get layout
+ *
+ * @return Layout factor
+ */
+ model::Layout layout() const { return _layout; }
+
+public:
+ /**
+ * @brief operator overloading function for `==`
+ *
+ * @return Whether two PermuteFactor are the same
+ */
+ bool operator==(const PermuteFactor &other) const
+ {
+ return _backend == other.backend() && _layout == other.layout();
+ }
+ /**
+ * @brief operator overloading function for `!=`
+ *
+ * @return Whether two PermuteFactor are differenct
+ */
+ bool operator!=(const PermuteFactor &other) const { return !(*this == other); }
+
+private:
+ const backend::Backend *_backend{nullptr};
+ model::Layout _layout{model::Layout::UNKNOWN};
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+namespace std
+{
+
+using PermuteFactor = ::neurun::graph::operand::PermuteFactor;
+
+/**
+ * @brief Structure that provides hash value of PermuteFactor
+ */
+template <> struct hash<PermuteFactor>
+{
+ size_t operator()(const PermuteFactor &factor) const noexcept
+ {
+ hash<const ::neurun::backend::Backend *> b_hash{};
+ hash<::neurun::model::Layout> l_hash{};
+ return b_hash(factor.backend()) ^ (l_hash(factor.layout()) << 1);
+ }
+};
+
+} // namespace std
+
+#endif // __NEURUN_GRAPH_OPERAND_PERMUTE_FACTOR_H__
diff --git a/runtimes/neurun/core/include/graph/operation/LowerInfo.h b/runtimes/neurun/core/include/graph/operation/LowerInfo.h
new file mode 100644
index 000000000..fb9f5206c
--- /dev/null
+++ b/runtimes/neurun/core/include/graph/operation/LowerInfo.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
+#define __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
+
+#include <string>
+
+#include <graph/operand/PermuteFactor.h>
+
+namespace neurun
+{
+namespace backend
+{
+class Backend;
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+class LowerInfo
+{
+public:
+ LowerInfo(const backend::Backend *backend, model::Layout layout);
+ const backend::Backend *backend() const { return _permute_factor.backend(); }
+ model::Layout layout() const { return _permute_factor.layout(); }
+
+private:
+ graph::operand::PermuteFactor _permute_factor;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
diff --git a/runtimes/neurun/core/include/model/Data.h b/runtimes/neurun/core/include/model/Data.h
new file mode 100644
index 000000000..3316ad874
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Data.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_DATA_H__
+#define __NEURUN_MODEL_DATA_H__
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace model
+{
+
+struct Data
+{
+ virtual ~Data() = default;
+
+ virtual size_t size(void) const = 0;
+ virtual const uint8_t *base(void) const = 0;
+};
+
+class CachedData final : public Data
+{
+public:
+ CachedData(const uint8_t *base, size_t size) : _base{new uint8_t[size]}, _size{size}
+ {
+ std::copy(base, base + size, _base);
+ }
+
+public:
+ ~CachedData() { delete[] _base; }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base; }
+
+private:
+ uint8_t *_base;
+ size_t _size;
+};
+
+class ExternalData final : public Data
+{
+public:
+ ExternalData(const uint8_t *base, size_t size) : _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base; }
+
+private:
+ const uint8_t *_base;
+ const size_t _size;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_DATA_H__
diff --git a/runtimes/neurun/core/include/model/DataType.h b/runtimes/neurun/core/include/model/DataType.h
new file mode 100644
index 000000000..7b68dabea
--- /dev/null
+++ b/runtimes/neurun/core/include/model/DataType.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_DATATYPE_H__
+#define __NEURUN_MODEL_DATATYPE_H__
+
+#include <stdexcept>
+
+namespace neurun
+{
+namespace model
+{
+
+enum class DataType
+{
+ FLOAT32 = 0,
+ INT32 = 1,
+ UINT32 = 2,
+ QUANT8_ASYMM = 3,
+ BOOL8 = 4,
+};
+
+inline size_t sizeOfDataType(DataType data_type)
+{
+ switch (data_type)
+ {
+ case DataType::FLOAT32:
+ return sizeof(float);
+ case DataType::INT32:
+ return sizeof(int32_t);
+ case DataType::UINT32:
+ return sizeof(uint32_t);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ return sizeof(uint8_t);
+ default:
+ throw std::runtime_error{"Unsupported type size"};
+ }
+}
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_DATATYPE_H__
diff --git a/runtimes/neurun/core/include/model/Index.h b/runtimes/neurun/core/include/model/Index.h
new file mode 100644
index 000000000..e4218d51d
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Index.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERAND_INDEX_H__
+#define __NEURUN_MODEL_OPERAND_INDEX_H__
+
+#include "util/Index.h"
+
+namespace neurun
+{
+namespace model
+{
+
+struct OperationIndexTag;
+using OperationIndex = ::neurun::util::Index<uint32_t, OperationIndexTag>;
+
+struct OperandIndexTag;
+using OperandIndex = ::neurun::util::Index<uint32_t, OperandIndexTag>;
+
+struct IOIndexTag;
+using IOIndex = ::neurun::util::Index<uint32_t, IOIndexTag>;
+
+struct SubgraphIndexTag;
+using SubgraphIndex = ::neurun::util::Index<uint32_t, SubgraphIndexTag>;
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_INDEX_H__
diff --git a/runtimes/neurun/core/include/model/InternalType.h b/runtimes/neurun/core/include/model/InternalType.h
new file mode 100644
index 000000000..fccf2fe04
--- /dev/null
+++ b/runtimes/neurun/core/include/model/InternalType.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_INTERNAL_TYPE_H__
+#define __NEURUN_MODEL_INTERNAL_TYPE_H__
+
+#include <cstdint>
+
+namespace neurun
+{
+namespace model
+{
+
+enum class Activation
+{
+ NONE = 0,
+ RELU = 1,
+ RELU1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGMOID = 5
+};
+
+enum class PaddingType
+{
+ EXPLICIT = 0,
+ SAME = 1,
+ VALID = 2
+};
+
+struct ExplicitPadding
+{
+ uint32_t left;
+ uint32_t right;
+ uint32_t top;
+ uint32_t bottom;
+};
+
+// TODO Resolve explicit padding param at frontend and save in value field
+struct Padding
+{
+ PaddingType type;
+ ExplicitPadding param;
+};
+
+struct Stride
+{
+ uint32_t vertical;
+ uint32_t horizontal;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_INTERNAL_TYPE_H__
diff --git a/runtimes/neurun/core/include/model/Layout.h b/runtimes/neurun/core/include/model/Layout.h
new file mode 100644
index 000000000..db46f42de
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Layout.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_LAYOUT_H__
+#define __NEURUN_MODEL_LAYOUT_H__
+
+#include <functional>
+#include <string>
+
+namespace neurun
+{
+namespace model
+{
+
+enum class Layout
+{
+ UNKNOWN = 0,
+ NHWC,
+ NCHW
+};
+
+inline std::string to_string(model::Layout layout)
+{
+ switch (layout)
+ {
+ case Layout::NHWC:
+ return std::string{"NHWC"};
+ case model::Layout::NCHW:
+ return std::string{"NCHW"};
+ case model::Layout::UNKNOWN:
+ return std::string{"UNKNOWN"};
+ default:
+ throw std::runtime_error("WRONG LAYOUT");
+ }
+}
+
+} // namespace model
+} // namespace neurun
+
+namespace std
+{
+
+template <> struct hash<::neurun::model::Layout>
+{
+ size_t operator()(::neurun::model::Layout value) const noexcept
+ {
+ using type = typename std::underlying_type<::neurun::model::Layout>::type;
+ return hash<type>()(static_cast<type>(value));
+ }
+};
+
+} // namespace std
+
+#endif // __NEURUN_MODEL_LAYOUT_H__
diff --git a/runtimes/neurun/core/include/model/Model.h b/runtimes/neurun/core/include/model/Model.h
new file mode 100644
index 000000000..365bef198
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Model.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_MODEL_H__
+#define __NEURUN_MODEL_MODEL_H__
+
+#include "model/Operations.h"
+#include "model/OperandIndexSequence.h"
+#include "model/Operands.h"
+
+namespace neurun
+{
+namespace model
+{
+
+struct Model
+{
+ model::Operations operations;
+ model::Operands operands;
+ model::OperandIndexSequence inputs;
+ model::OperandIndexSequence outputs;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_MODEL_H__
diff --git a/runtimes/neurun/core/include/model/Operand.h b/runtimes/neurun/core/include/model/Operand.h
new file mode 100644
index 000000000..6cfe40cb9
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operand.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERAND_H__
+#define __NEURUN_MODEL_OPERAND_H__
+
+#include <cassert>
+#include <cstdint>
+#include <cpp14/memory.h>
+#include <algorithm>
+
+#include "Data.h"
+#include "DataType.h"
+#include "OperandInfo.h"
+#include "graph/operand/ParentInfo.h" // TODO Remove this dependency
+#include "model/OperationIndexList.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class Operand
+{
+public:
+ explicit Operand(const Shape &shape, const TypeInfo &type) : _info{shape, type}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Shape &shape(void) const { return _info.shape(); }
+ const TypeInfo &typeInfo(void) const { return _info.typeInfo(); }
+ Layout layout() const { return _info.layout(); }
+ const OperandInfo &info(void) const { return _info; }
+ size_t operandSize(void) const;
+
+ const OperationIndexList &getUses() const { return _uses; }
+ const OperationIndexList &getDef() const { return _def; }
+ void appendUse(const OperationIndex &idx);
+ void removeUse(const OperationIndex &idx);
+ void appendDef(const OperationIndex &idx);
+ void removeDef(const OperationIndex &idx);
+
+public:
+ void type(const DataType &type) { _info.type(type); };
+
+public:
+ void data(std::unique_ptr<Data> &&data) { _data = std::move(data); }
+ const Data &data(void) const
+ {
+ assert(_data);
+ return *_data;
+ }
+
+ /**
+ * @brief Get true if Operand has data, otherwise @c false
+ a @return @c true if Operand has data, otherwise @c false
+ */
+ bool isConstant(void) const { return _data != nullptr; }
+
+public:
+ template <typename T, typename... Args> void data(Args &&... args)
+ {
+ data(nnfw::cpp14::make_unique<T>(std::forward<Args>(args)...));
+ }
+
+public:
+ template <typename T> T asScalar(void) const
+ {
+ assert((shape().rank() == 0) || ((shape().rank() == 1) && (shape().dim(0) == 1)));
+ assert(_data != nullptr);
+ assert((_data->base() != nullptr) && (_data->size() == sizeof(T)));
+
+ return *(reinterpret_cast<const T *>(_data->base()));
+ }
+
+public:
+ /**
+ * @brief Set parent information
+ * @param[in] parent_info Parent information
+ */
+ void parent_info(std::unique_ptr<graph::operand::ParentInfo> &&parent_info);
+ /**
+ * @brief Return parent information pointer as constant
+ * @return Parent information pointer
+ */
+ const graph::operand::ParentInfo *parent_info() const;
+ /**
+ * @brief Return parent information pointer
+ * @return Perent information pointer
+ */
+ graph::operand::ParentInfo *parent_info();
+
+private:
+ OperandInfo _info;
+ std::unique_ptr<Data> _data;
+
+ OperationIndexList _uses;
+ OperationIndexList _def; // size is 0 (constant) or 1 (from def operation)
+
+ std::unique_ptr<graph::operand::ParentInfo> _parent_info;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_H__
diff --git a/runtimes/neurun/core/include/model/OperandConstraint.h b/runtimes/neurun/core/include/model/OperandConstraint.h
new file mode 100644
index 000000000..c3145d20d
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperandConstraint.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERAND_CONSTRAINT_H__
+#define __NEURUN_MODEL_OPERAND_CONSTRAINT_H__
+
+#include <stdint.h>
+#include <limits>
+#include <set>
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class OperandConstraint
+{
+private:
+ static const uint32_t INF = std::numeric_limits<uint32_t>::max();
+
+public:
+ static OperandConstraint createAny() { return OperandConstraint{0u, INF}; }
+ static OperandConstraint createExact(uint32_t exact) { return OperandConstraint{exact, exact}; }
+ static OperandConstraint createAtMost(uint32_t end) { return OperandConstraint{0u, end}; }
+ static OperandConstraint createAtLeast(uint32_t begin) { return OperandConstraint{begin, INF}; }
+ static OperandConstraint createInRange(uint32_t begin, uint32_t end)
+ {
+ return OperandConstraint{begin, end};
+ }
+
+private:
+ OperandConstraint(uint32_t begin, uint32_t end) : _begin{begin}, _end{end} {}
+
+public:
+ bool check(uint32_t ind) const { return _begin <= ind && ind <= _end; }
+
+private:
+ uint32_t _begin;
+ uint32_t _end;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_CONSTRAINT_H__
diff --git a/runtimes/neurun/core/include/model/OperandIndexMap.h b/runtimes/neurun/core/include/model/OperandIndexMap.h
new file mode 100644
index 000000000..c3492d4d0
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperandIndexMap.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
+#define __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
+
+#include <unordered_map>
+
+#include "Index.h"
+
+namespace neurun
+{
+namespace model
+{
+
+template <typename T> using OperandIndexMap = std::unordered_map<model::OperandIndex, T>;
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
diff --git a/runtimes/neurun/core/include/model/OperandIndexSequence.h b/runtimes/neurun/core/include/model/OperandIndexSequence.h
new file mode 100644
index 000000000..2cf060df2
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperandIndexSequence.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERAND_INDEX_SEQUENCE_H__
+#define __NEURUN_MODEL_OPERAND_INDEX_SEQUENCE_H__
+
+#include <initializer_list>
+#include <vector>
+
+#include "Index.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class OperandIndexSequence
+{
+public:
+ OperandIndexSequence(void) = default;
+ OperandIndexSequence(std::initializer_list<OperandIndex> list);
+ OperandIndexSequence(std::initializer_list<int32_t> list);
+ OperandIndexSequence(std::initializer_list<uint32_t> list);
+
+public:
+ void append(const OperandIndex &index) { _set.emplace_back(index); }
+
+public:
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ const OperandIndex &at(IOIndex set_index) const { return _set.at(set_index.value()); }
+ const OperandIndex &at(uint32_t index) const { return _set.at(index); }
+ bool contains(const OperandIndex &index) const;
+ void replace(const OperandIndex &from, const OperandIndex &to);
+
+public:
+ std::vector<OperandIndex>::const_iterator begin(void) const { return _set.begin(); }
+ std::vector<OperandIndex>::const_iterator end(void) const { return _set.end(); }
+
+private:
+ std::vector<OperandIndex> _set;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_INDEX_SET_H__
diff --git a/runtimes/neurun/core/include/model/OperandInfo.h b/runtimes/neurun/core/include/model/OperandInfo.h
new file mode 100644
index 000000000..036306e76
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperandInfo.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OperandInfo.h
+ * @brief This file contains OperandInfo class
+ */
+#ifndef __NEURUN_MODEL_OPERAND_INFO_H__
+#define __NEURUN_MODEL_OPERAND_INFO_H__
+
+#include "Shape.h"
+#include "TypeInfo.h"
+#include "Layout.h"
+
+namespace neurun
+{
+namespace model
+{
+
+/**
+ * @brief Class to save tensor's shape and type
+ */
+class OperandInfo
+{
+public:
+ /**
+ * @brief Construct a new OperandInfo object (deleted)
+ */
+ OperandInfo() = delete;
+ /**
+ * @brief Construct a new OperandInfo object
+ * @param[in] shape Tensor shape
+ * @param[in] typeInfo Tensor data type
+ */
+ OperandInfo(const Shape &shape, const TypeInfo &typeInfo, Layout layout = Layout::NHWC)
+ : _shape(shape), _typeInfo(typeInfo), _layout(layout)
+ {
+ // DO NOTHING
+ }
+ /**
+ * @brief Construct a new OperandInfo object
+ * @param[in] origin info for copy
+ */
+ OperandInfo(const OperandInfo &origin)
+ : _shape(origin.shape()), _typeInfo(origin.typeInfo()), _layout(origin.layout())
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return tensor shape
+ * @return Tensor shape
+ */
+ const Shape &shape() const { return _shape; }
+ /**
+ * @brief Return tensor data type info
+ * @return Tensor data type
+ */
+ const TypeInfo &typeInfo() const { return _typeInfo; }
+ /**
+ * @brief Return operand shape layout in model
+ * @return Tensor shape layout
+ */
+ Layout layout() const { return _layout; }
+ /**
+ * @brief Set tensor data type
+ */
+ void type(const DataType &type) { _typeInfo.type(type); }
+ /**
+ * @brief Return size of tensor (bytes)
+ * @return Tensor size
+ */
+ size_t total_size() const { return _shape.num_elements() * sizeOfDataType(_typeInfo.type()); }
+
+private:
+ Shape _shape;
+ TypeInfo _typeInfo;
+ Layout _layout;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_INFO_H__
diff --git a/runtimes/neurun/core/include/model/Operands.h b/runtimes/neurun/core/include/model/Operands.h
new file mode 100644
index 000000000..517d2ff2b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operands.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERANDS_H__
+#define __NEURUN_MODEL_OPERANDS_H__
+
+#include <memory>
+#include <unordered_map>
+
+#include "Operand.h"
+#include "Index.h"
+#include "util/ObjectManager.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class Operands : public util::ObjectManager<OperandIndex, Operand>
+{
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERAND_SET_H__
diff --git a/runtimes/neurun/core/include/model/Operation.h b/runtimes/neurun/core/include/model/Operation.h
new file mode 100644
index 000000000..029684dbd
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operation.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_H__
+#define __NEURUN_MODEL_OPERATION_H__
+
+#include <memory>
+
+#include "model/Operand.h"
+#include "model/OperandIndexSequence.h"
+#include "model/OperandConstraint.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+class LowerInfo;
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace model
+{
+struct OperationVisitor;
+} // namespace model
+} // namespace neurun
+
+namespace neurun
+{
+namespace model
+{
+
+using OperandConstraint = ::neurun::model::operation::OperandConstraint;
+
+class Operation
+{
+public:
+ Operation(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs);
+ explicit Operation(OperandConstraint input_constr);
+
+ Operation(const Operation &) = delete;
+ Operation(Operation &&) = default;
+ Operation &operator=(const Operation &) = delete;
+ Operation &operator=(Operation &&) = default;
+
+ virtual ~Operation();
+
+public:
+ virtual void accept(OperationVisitor &v) const = 0;
+ virtual std::string getName() const = 0;
+
+public:
+ void replaceInput(const OperandIndex &from, const OperandIndex &to);
+ void replaceOutput(const OperandIndex &from, const OperandIndex &to);
+ const OperandIndexSequence &getInputs() const { return _inputs; }
+ const OperandIndexSequence &getOutputs() const { return _outputs; }
+ // It's for only input/output tensors but const data.
+ void setInputs(const OperandIndexSequence &indexes);
+ void setOutputs(const OperandIndexSequence &indexes);
+
+private:
+ OperandConstraint _input_constr;
+ OperandIndexSequence _inputs;
+ OperandIndexSequence _outputs;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_H__
diff --git a/runtimes/neurun/core/include/model/OperationIndexList.h b/runtimes/neurun/core/include/model/OperationIndexList.h
new file mode 100644
index 000000000..924af7925
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperationIndexList.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_INDEX_LIST_H__
+#define __NEURUN_MODEL_OPERATION_INDEX_LIST_H__
+
+#include <initializer_list>
+#include <list>
+
+#include "model/Index.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class OperationIndexList
+{
+public:
+ OperationIndexList(void) = default;
+ OperationIndexList(std::initializer_list<OperationIndex> list);
+
+public:
+ void append(const OperationIndex &index) { _list.push_back(index); }
+ void remove(const OperationIndex &index) { _list.remove(index); }
+
+public:
+ uint32_t size() const { return static_cast<uint32_t>(_list.size()); }
+ const std::list<OperationIndex> &list() const { return _list; }
+ bool contains(const OperationIndex &index) const;
+
+private:
+ std::list<OperationIndex> _list;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_INDEX_LIST_H__
diff --git a/runtimes/neurun/core/include/model/OperationIndexMap.h b/runtimes/neurun/core/include/model/OperationIndexMap.h
new file mode 100644
index 000000000..e0399ef3c
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperationIndexMap.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
+#define __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
+
+#include <unordered_map>
+
+#include "Index.h"
+
+namespace neurun
+{
+namespace model
+{
+
+template <typename T> using OperationIndexMap = std::unordered_map<model::OperationIndex, T>;
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
diff --git a/runtimes/neurun/core/include/model/OperationVisitor.h b/runtimes/neurun/core/include/model/OperationVisitor.h
new file mode 100644
index 000000000..200e62dd6
--- /dev/null
+++ b/runtimes/neurun/core/include/model/OperationVisitor.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_VISITOR_H__
+#define __NEURUN_MODEL_OPERATION_VISITOR_H__
+
+#include "Operations.Include.h"
+#include "Subgraph.h"
+
+namespace neurun
+{
+namespace model
+{
+
+struct OperationVisitor
+{
+ virtual ~OperationVisitor() = default;
+
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const operation::InternalName &) {}
+#include "model/Operations.lst"
+#undef OP
+
+ // This Subgraph node should be handled specially so that
+ // Op.lst doesn't have Subgraph
+ virtual void visit(const Subgraph &subgraph)
+ {
+ for (const auto &e : subgraph.operations())
+ {
+ e.node->accept(*this);
+ }
+ }
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_VISITOR_H__
diff --git a/runtimes/neurun/core/include/model/Operations.Include.h b/runtimes/neurun/core/include/model/Operations.Include.h
new file mode 100644
index 000000000..e6790c93b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operations.Include.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file has no ifdef guard intentionally
+
+#include "operation/Conv2DNode.h"
+#include "operation/MaxPool2DNode.h"
+#include "operation/AvgPool2DNode.h"
+#include "operation/ConcatNode.h"
+#include "operation/ReshapeNode.h"
+#include "operation/FullyConnectedNode.h"
+#include "operation/SoftmaxNode.h"
+#include "operation/TransposeNode.h"
+#include "operation/PermuteNode.h"
+#include "operation/ReduceSumNode.h"
+#include "operation/AddNode.h"
+#include "operation/SubNode.h"
+#include "operation/DepthwiseConv2DNode.h"
+#include "operation/StridedSliceNode.h"
+#include "operation/MulNode.h"
+#include "operation/SqueezeNode.h"
+#include "operation/TanhNode.h"
+#include "operation/LogisticNode.h"
+#include "operation/CastNode.h"
+#include "operation/DivNode.h"
+#include "operation/ExpNode.h"
+#include "operation/ReduceMaxNode.h"
+#include "operation/ComparisonNode.h"
+#include "operation/LogicalAndNode.h"
+#include "operation/LogicalOrNode.h"
+#include "operation/LogicalNotNode.h"
+#include "operation/LSTMNode.h"
+#include "operation/RSQRTNode.h"
+#include "operation/ReLUNode.h"
+#include "operation/ResizeBilinearNode.h"
+#include "operation/ReLU1Node.h"
+#include "operation/ReLU6Node.h"
+#include "operation/RNNNode.h"
+#include "operation/FloorNode.h"
+#include "operation/SpaceToDepthNode.h"
+#include "operation/L2Pool2DNode.h"
+#include "operation/EmbeddingLookupNode.h"
+#include "operation/L2NormalizationNode.h"
+#include "operation/HashtableLookupNode.h"
+#include "operation/PReLUNode.h"
+#include "operation/TransposeConvNode.h"
+#include "operation/SQRTNode.h"
+#include "operation/SquaredDifferenceNode.h"
+#include "operation/TopKV2Node.h"
+#include "operation/GatherNode.h"
+#include "operation/NegNode.h"
+#include "operation/AbsNode.h"
+#include "operation/ArgMaxNode.h"
+#include "operation/DequantizeNode.h"
+#include "operation/MeanNode.h"
+#include "operation/LocalResponseNormalizationNode.h"
+#include "operation/DepthToSpaceNode.h"
+#include "operation/ReduceMinNode.h"
+#include "operation/SplitNode.h"
+#include "operation/UnpackNode.h"
+#include "operation/PadNode.h"
+#include "operation/CustomNode.h"
diff --git a/runtimes/neurun/core/include/model/Operations.h b/runtimes/neurun/core/include/model/Operations.h
new file mode 100644
index 000000000..4a1b2ca8d
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operations.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATIONS_H__
+#define __NEURUN_MODEL_OPERATIONS_H__
+
+#include "model/Index.h"
+#include "model/Operation.h"
+#include "util/ObjectManager.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class Operations : public util::ObjectManager<OperationIndex, Operation>
+{
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_MANAGER_H__
diff --git a/runtimes/neurun/core/include/model/Operations.lst b/runtimes/neurun/core/include/model/Operations.lst
new file mode 100644
index 000000000..ef645dd35
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Operations.lst
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OP
+#error Define OP before including this file
+#endif
+
+// NOTE The relation between "Internal Name" and "NN API" is "1 : N".
+
+// Internal Name | NN API?
+OP(AddNode , true)
+OP(SubNode , true)
+OP(CastNode , true)
+OP(Conv2DNode , true)
+OP(DepthwiseConv2DNode , true)
+OP(AvgPool2DNode , true)
+OP(MaxPool2DNode , true)
+OP(ConcatNode , true)
+OP(FullyConnectedNode , true)
+OP(ReduceSumNode , true)
+OP(ReshapeNode , true)
+OP(MulNode , true)
+OP(SoftmaxNode , true)
+OP(SqueezeNode , true)
+OP(StridedSliceNode , true)
+OP(TanhNode , true)
+OP(LogisticNode , true)
+OP(DivNode , true)
+OP(TransposeNode , true)
+OP(ExpNode , true)
+OP(ReduceMaxNode , true)
+OP(ComparisonNode , true)
+OP(LogicalAndNode , true)
+OP(LogicalOrNode , true)
+OP(LogicalNotNode , true)
+OP(LSTMNode , true)
+OP(RSQRTNode , true)
+OP(ReLUNode , true)
+OP(ResizeBilinearNode , true)
+OP(ReLU1Node , true)
+OP(ReLU6Node , true)
+OP(RNNNode , true)
+OP(FloorNode , true)
+OP(SpaceToDepthNode , true)
+OP(L2Pool2DNode , true)
+OP(EmbeddingLookupNode , true)
+OP(L2NormalizationNode , true)
+OP(HashtableLookupNode , true)
+OP(PReLUNode , true)
+OP(TransposeConvNode , true)
+OP(SQRTNode , true)
+OP(SquaredDifferenceNode , true)
+OP(TopKV2Node , true)
+OP(GatherNode , true)
+OP(NegNode , true)
+OP(AbsNode , true)
+OP(ArgMaxNode , true)
+OP(DequantizeNode , true)
+OP(MeanNode , true)
+OP(LocalResponseNormalizationNode , true)
+OP(DepthToSpaceNode , true)
+OP(ReduceMinNode , true)
+OP(SplitNode , true)
+OP(UnpackNode , true)
+OP(PadNode , true)
+OP(CustomNode , true)
+OP(PermuteNode , false)
+
diff --git a/runtimes/neurun/core/include/model/Shape.h b/runtimes/neurun/core/include/model/Shape.h
new file mode 100644
index 000000000..c8d986633
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Shape.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_SHAPE_H__
+#define __NEURUN_MODEL_SHAPE_H__
+
+#include "Layout.h"
+#include "misc/feature/Shape.h"
+
+#include <cstdint>
+#include <vector>
+
+namespace neurun
+{
+namespace model
+{
+
+// TODO Remove this dependency.
+using FeatureShape = nnfw::misc::feature::Shape;
+
+struct Shape
+{
+public:
+ Shape() = default;
+
+ explicit Shape(int rank) : _dimensions(rank) {}
+
+ Shape(std::initializer_list<int32_t> dimensions) : _dimensions(dimensions) {}
+
+ int rank() const { return _dimensions.size(); }
+
+ const std::vector<int32_t> &dims() const { return _dimensions; }
+
+ int32_t dim(int i) const { return _dimensions.at(i); }
+
+ int32_t &dim(int i) { return _dimensions.at(i); }
+
+ uint64_t num_elements() const;
+
+public:
+ FeatureShape asFeature(Layout layout) const;
+
+ /**
+ * @brief Add dimension to the beginning
+ * @param[in] d dimension to add to the beginning
+ */
+ void prepend(int32_t d) { _dimensions.insert(_dimensions.cbegin(), d); }
+
+ /**
+ * @brief Add dimension to the end
+ * @param[in] d dimension to add to the end
+ */
+ void append(int32_t d) { _dimensions.emplace_back(d); }
+
+ /**
+ * @brief Extend rank of Shape object for operand with param.
+ * @param[in] to_rank The rank value to be extended to
+ */
+ void extendRank(int to_rank);
+
+private:
+ std::vector<int32_t> _dimensions;
+};
+
+inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); }
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_SHAPE_H__
diff --git a/runtimes/neurun/core/include/model/Subgraph.h b/runtimes/neurun/core/include/model/Subgraph.h
new file mode 100644
index 000000000..70abf6a1c
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Subgraph.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_SUBGRAPH_H__
+#define __NEURUN_MODEL_SUBGRAPH_H__
+
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "Layout.h"
+#include "Index.h"
+#include "Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+
+// To support ValueSwappable, Element doesn't have members which are classes
+// as value(or can have members which are classes as value and the classes
+// support Swappable)
+struct Element
+{
+ OperationIndex index;
+ const Operation *node;
+
+ Element(const OperationIndex *i, const Operation *n) : index{*i}, node{n}
+ {
+ // DO NOTHING
+ }
+};
+
+class Subgraph : public Operation
+{
+public:
+ explicit Subgraph(model::Layout layout);
+ Subgraph(const Subgraph &) = delete;
+
+public:
+ void accept(OperationVisitor &v) const override;
+
+ virtual std::string getName(void) const override { return "Subgraph"; }
+
+public:
+ void appendOperation(const OperationIndex &index, const Operation &node)
+ {
+ _operations.emplace_back(&index, &node);
+ }
+
+ std::vector<Element> &operations(void) { return _operations; }
+
+ const std::vector<Element> &operations(void) const { return _operations; }
+
+ uint32_t size(void) const { return _operations.size(); }
+
+ // TODO: Impl Dumper instead of this method
+ std::string getStr(void) const;
+
+public:
+ Layout getLayout() const { return _layout; }
+
+private:
+ std::vector<Element> _operations;
+
+private:
+ Layout _layout;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_SUBGRAPH_H__
diff --git a/runtimes/neurun/core/include/model/Subgraphs.h b/runtimes/neurun/core/include/model/Subgraphs.h
new file mode 100644
index 000000000..13bc549be
--- /dev/null
+++ b/runtimes/neurun/core/include/model/Subgraphs.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_SUBGRAPHS_H__
+#define __NEURUN_MODEL_SUBGRAPHS_H__
+
+#include "model/Index.h"
+#include "model/Subgraph.h"
+#include "util/ObjectManager.h"
+
+namespace neurun
+{
+namespace model
+{
+
+/**
+ * @brief Class that manages Subgraph objects
+ */
+class Subgraphs : public util::ObjectManager<SubgraphIndex, Subgraph>
+{
+public:
+ /**
+ * @brief Create an instance of Subgraph with given op and push it to objects
+ *
+ * @param[in] op_idx Operation index that is emplaced
+ * @param[in] op Operation that is emplaced
+ * @param[in] layout Subgraph's layout
+ * @return SubgraphIndex
+ */
+ SubgraphIndex emplace(const OperationIndex &op_index, const Operation &op, Layout layout);
+
+ /**
+ * @brief Push an instance of Subgraph to objects
+ *
+ * @param[in] subg An instance of Subgraph
+ * @return SubgraphIndex
+ */
+ SubgraphIndex emplace(std::unique_ptr<Subgraph> &&subg);
+
+ /**
+ * @brief Check if an operation does exist in any subgraphs
+ *
+ * @param operation_index Operation index to find
+ * @return true If such operation exists in any subgraphs otherwise false
+ */
+ bool containsOperation(const OperationIndex &operation_index) const;
+ /**
+ * @brief Find an operation from all subgraphs
+ *
+ * @param operation_index Operation index to find
+ * @return SubgraphIndex Index of Subgraph that contains given operation index
+ */
+ SubgraphIndex getOperation(const OperationIndex &operation_index) const;
+ /**
+ * @brief Dump subgraphs
+ *
+ * @param msg Message that will be displayed
+ */
+ void dump(const std::string &msg) const;
+
+private:
+ SubgraphIndex findOperation(const OperationIndex &operation_index) const;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_SUBGRAPHS_H__
diff --git a/runtimes/neurun/core/include/model/TypeInfo.h b/runtimes/neurun/core/include/model/TypeInfo.h
new file mode 100644
index 000000000..4d6a5458b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/TypeInfo.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_TYPEINFO_H__
+#define __NEURUN_MODEL_TYPEINFO_H__
+
+#include <cstdint>
+
+#include "DataType.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class TypeInfo
+{
+public:
+ TypeInfo() = delete;
+
+ explicit TypeInfo(DataType type, float scale = 0, int32_t offset = 0)
+ : _type(type), _scale(scale), _offset(offset)
+ {
+ }
+
+public:
+ DataType type() const { return _type; }
+ float scale() const { return _scale; }
+ int32_t offset() const { return _offset; }
+
+public:
+ void type(const DataType &type) { _type = type; }
+
+private:
+ DataType _type;
+ float _scale;
+ int32_t _offset;
+};
+
+bool operator==(const TypeInfo &lhs, const TypeInfo &rhs);
+bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs);
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_TYPEINFO_H__
diff --git a/runtimes/neurun/core/include/model/operation/AbsNode.h b/runtimes/neurun/core/include/model/operation/AbsNode.h
new file mode 100644
index 000000000..a081d05ba
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/AbsNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ABS_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ABS_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class AbsNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ AbsNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Abs"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ABS_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/AddNode.h b/runtimes/neurun/core/include/model/operation/AddNode.h
new file mode 100644
index 000000000..4310cb231
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/AddNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ADD_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ADD_NODE_H__
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class AddNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ AddNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Add"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ADD_H__
diff --git a/runtimes/neurun/core/include/model/operation/ArgMaxNode.h b/runtimes/neurun/core/include/model/operation/ArgMaxNode.h
new file mode 100644
index 000000000..1123509ae
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ArgMaxNode.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ARG_MAX_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ARG_MAX_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ArgMaxNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ };
+
+public:
+ ArgMaxNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ArgMax"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ARG_MAX_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/AvgPool2DNode.h b/runtimes/neurun/core/include/model/operation/AvgPool2DNode.h
new file mode 100644
index 000000000..eb219308e
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/AvgPool2DNode.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_AVGPOOL2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_AVGPOOL2D_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class AvgPool2DNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ uint32_t kh;
+ uint32_t kw;
+
+ Stride stride;
+ Padding padding;
+ Activation activation;
+ };
+
+public:
+ AvgPool2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "AvgPool2D"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_AVGPOOL2D_H__
diff --git a/runtimes/neurun/core/include/model/operation/CastNode.h b/runtimes/neurun/core/include/model/operation/CastNode.h
new file mode 100644
index 000000000..7d774dfca
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/CastNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_CAST_NODE_H__
+#define __NEURUN_MODEL_OPERATION_CAST_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class CastNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ CastNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Cast"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_CAST_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ComparisonNode.h b/runtimes/neurun/core/include/model/operation/ComparisonNode.h
new file mode 100644
index 000000000..b8f3074a4
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ComparisonNode.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_COMPARISON_NODE_H__
+#define __NEURUN_MODEL_OPERATION_COMPARISON_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ComparisonNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT0 = 0,
+ INPUT1
+ };
+
+ enum class ComparisonType
+ {
+ Equal,
+ NotEqual,
+ Greater,
+ GreaterEqual,
+ Less,
+ LessEqual
+ };
+
+ struct Param
+ {
+ ComparisonType comparison_type;
+ };
+
+public:
+ ComparisonNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Comparison"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_COMPARISON_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ConcatNode.h b/runtimes/neurun/core/include/model/operation/ConcatNode.h
new file mode 100644
index 000000000..63965f243
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ConcatNode.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_CONCAT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_CONCAT_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ConcatNode : public model::Operation
+{
+public:
+ struct Param
+ {
+ int32_t axis;
+ };
+
+public:
+ ConcatNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Concat"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_CONCAT_H__
diff --git a/runtimes/neurun/core/include/model/operation/Conv2DNode.h b/runtimes/neurun/core/include/model/operation/Conv2DNode.h
new file mode 100644
index 000000000..0e7e5b7fb
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/Conv2DNode.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_CONV2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_CONV2D_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class Conv2DNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ KERNEL,
+ BIAS
+ };
+
+ struct Param
+ {
+ Stride stride;
+ Padding padding;
+ Activation activation;
+ };
+
+public:
+ Conv2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Conv2D"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_CONV2D_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/CustomNode.h b/runtimes/neurun/core/include/model/operation/CustomNode.h
new file mode 100644
index 000000000..ea51b9f3e
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/CustomNode.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_CUSTOM_NODE_H__
+#define __NEURUN_MODEL_OPERATION_CUSTOM_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class CustomNode : public model::Operation
+{
+public:
+ struct Userdata
+ {
+ char *data;
+ size_t size;
+ };
+
+ CustomNode(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, std::string id, const Userdata &userdata);
+
+ void accept(OperationVisitor &v) const override;
+
+public:
+ /**
+ * @return unique operation identifier
+ */
+ const std::string &id() const;
+
+ std::string getName() const override;
+
+ /**
+ * @return user-provided data
+ */
+ const Userdata &userdata() const;
+
+ ~CustomNode() override;
+
+private:
+ std::string _id;
+ Userdata _userdata;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+#endif // __NEURUN_MODEL_OPERATION_CUSTOM_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/DepthToSpaceNode.h b/runtimes/neurun/core/include/model/operation/DepthToSpaceNode.h
new file mode 100644
index 000000000..eee6ab7a5
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/DepthToSpaceNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_DEPTH_TO_SPACE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_DEPTH_TO_SPACE_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class DepthToSpaceNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex block_size_index;
+ };
+
+public:
+ DepthToSpaceNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "DepthToSpace"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_DEPTH_TO_SPACE_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/DepthwiseConv2DNode.h b/runtimes/neurun/core/include/model/operation/DepthwiseConv2DNode.h
new file mode 100644
index 000000000..45122fa2c
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/DepthwiseConv2DNode.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_DEPTHWISECONV2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_DEPTHWISECONV2D_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class DepthwiseConv2DNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ KERNEL,
+ BIAS
+ };
+
+ struct Param
+ {
+ Stride stride;
+ Padding padding;
+ uint32_t multiplier;
+ Activation activation;
+ };
+
+public:
+ DepthwiseConv2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "DepthwiseConv2D"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_DEPTHWISECONV2D_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/DequantizeNode.h b/runtimes/neurun/core/include/model/operation/DequantizeNode.h
new file mode 100644
index 000000000..1536c0f09
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/DequantizeNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_DEQUANTIZE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_DEQUANTIZE_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class DequantizeNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ DequantizeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Dequantize"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_DEQUANTIZE_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/DivNode.h b/runtimes/neurun/core/include/model/operation/DivNode.h
new file mode 100644
index 000000000..d30efe116
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/DivNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_DIV_NODE_H__
+#define __NEURUN_MODEL_OPERATION_DIV_NODE_H__
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class DivNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ DivNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Div"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_DIV_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/EmbeddingLookupNode.h b/runtimes/neurun/core/include/model/operation/EmbeddingLookupNode.h
new file mode 100644
index 000000000..9b61884db
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/EmbeddingLookupNode.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_EMBEDDING_LOOKUP_NODE_H__
+#define __NEURUN_MODEL_OPERATION_EMBEDDING_LOOKUP_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class EmbeddingLookupNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LOOKUPS = 0,
+ VALUES = 1
+ };
+
+public:
+ EmbeddingLookupNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "EmbeddingLookup"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_EMBEDDING_LOOKUP_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ExpNode.h b/runtimes/neurun/core/include/model/operation/ExpNode.h
new file mode 100644
index 000000000..fa7aa1d68
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ExpNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_EXP_NODE_H__
+#define __NEURUN_MODEL_OPERATION_EXP_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ExpNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ ExpNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Exp"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_EXP_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/FloorNode.h b/runtimes/neurun/core/include/model/operation/FloorNode.h
new file mode 100644
index 000000000..13d87e8a7
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/FloorNode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_FLOOR_NODE_H__
+#define __NEURUN_MODEL_OPERATION_FLOOR_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class FloorNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ FloorNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Floor"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_FLOOR_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/FullyConnectedNode.h b/runtimes/neurun/core/include/model/operation/FullyConnectedNode.h
new file mode 100644
index 000000000..61809b660
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/FullyConnectedNode.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_FULLYCONNECTED_NODE_H__
+#define __NEURUN_MODEL_OPERATION_FULLYCONNECTED_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class FullyConnectedNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ WEIGHT,
+ BIAS
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ FullyConnectedNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "FullyConnected"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_FULLYCONNECTED_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/GatherNode.h b/runtimes/neurun/core/include/model/operation/GatherNode.h
new file mode 100644
index 000000000..fddeefcf0
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/GatherNode.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_GATHER_NODE_H__
+#define __NEURUN_MODEL_OPERATION_GATHER_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class GatherNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ INDICES,
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ };
+
+public:
+ GatherNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Gather"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_GATHER_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/HashtableLookupNode.h b/runtimes/neurun/core/include/model/operation/HashtableLookupNode.h
new file mode 100644
index 000000000..bbf3d309b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/HashtableLookupNode.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_HASHTABLE_LOOKUP_NODE_H__
+#define __NEURUN_MODEL_OPERATION_HASHTABLE_LOOKUP_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class HashtableLookupNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LOOKUPS = 0,
+ KEYS = 1,
+ VALUES = 2
+ };
+
+ enum Output
+ {
+ OUTPUT = 0,
+ HITS = 1
+ };
+
+public:
+ HashtableLookupNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "HashTableLookup"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_HASHTABLE_LOOKUP_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/L2NormalizationNode.h b/runtimes/neurun/core/include/model/operation/L2NormalizationNode.h
new file mode 100644
index 000000000..3c126de45
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/L2NormalizationNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_L2_NORMALIZATION_NODE_H__
+#define __NEURUN_MODEL_OPERATION_L2_NORMALIZATION_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class L2NormalizationNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ L2NormalizationNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "L2Normalization"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_L2_NORMALIZATION_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/L2Pool2DNode.h b/runtimes/neurun/core/include/model/operation/L2Pool2DNode.h
new file mode 100644
index 000000000..76e80d35a
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/L2Pool2DNode.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class L2Pool2DNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ };
+
+ struct Param
+ {
+ Padding padding;
+ Stride stride;
+ uint32_t kw;
+ uint32_t kh;
+ Activation activation;
+ };
+
+public:
+ L2Pool2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "L2Pool2D"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LSTMNode.h b/runtimes/neurun/core/include/model/operation/LSTMNode.h
new file mode 100644
index 000000000..e453aed6b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LSTMNode.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_LSTM_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LSTM_NODE_H__
+
+#include "model/InternalType.h"
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LSTMNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ INPUT_TO_INPUT_WEIGHTS = 1,
+ INPUT_TO_FORGET_WEIGHTS = 2,
+ INPUT_TO_CELL_WEIGHTS = 3,
+ INPUT_TO_OUTPUT_WEIGHTS = 4,
+ RECURRENT_TO_INPUT_WEIGHTS = 5,
+ RECURRENT_TO_FORGET_WEIGHTS = 6,
+ RECURRENT_TO_CELL_WEIGHTS = 7,
+ RECURRENT_TO_OUTPUT_WEIGHTS = 8,
+ CELL_TO_INPUT_WEIGHTS = 9,
+ CELL_TO_FORGET_WEIGHTS = 10,
+ CELL_TO_OUTPUT_WEIGHTS = 11,
+ INPUT_GATE_BIAS = 12,
+ FORGET_GATE_BIAS = 13,
+ CELL_BIAS = 14,
+ OUTPUT_GATE_BIAS = 15,
+ PROJECTION_WEIGHTS = 16,
+ PROJECTION_BIAS = 17,
+ OUTPUT_STATE_IN = 18,
+ CELL_STATE_IN = 19,
+ };
+
+ enum Output
+ {
+ SCRATCH_BUFFER = 0,
+ OUTPUT_STATE_OUT = 1,
+ CELL_STATE_OUT = 2,
+ OUTPUT = 3
+ };
+
+ struct Param
+ {
+ Activation activation;
+ float cell_threshold;
+ float projection_threshold;
+ };
+
+public:
+ LSTMNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "LSTM"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LSTM_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LocalResponseNormalizationNode.h b/runtimes/neurun/core/include/model/operation/LocalResponseNormalizationNode.h
new file mode 100644
index 000000000..a7c1cd382
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LocalResponseNormalizationNode.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOCAL_RESPONSE_NORMALIZATION_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOCAL_RESPONSE_NORMALIZATION_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LocalResponseNormalizationNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex radius_index;
+ OperandIndex bias_index;
+ OperandIndex alpha_index;
+ OperandIndex beta_index;
+ };
+
+public:
+ LocalResponseNormalizationNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "LocalResponseNormalization"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOCAL_RESPONSE_NORMALIZATION_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LogicalAndNode.h b/runtimes/neurun/core/include/model/operation/LogicalAndNode.h
new file mode 100644
index 000000000..058f457d2
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LogicalAndNode.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGICAL_AND_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGICAL_AND_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogicalAndNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT0 = 0,
+ INPUT1 = 1,
+ };
+
+public:
+ LogicalAndNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "LogicalAnd"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGICAL_AND_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LogicalNotNode.h b/runtimes/neurun/core/include/model/operation/LogicalNotNode.h
new file mode 100644
index 000000000..d694510d7
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LogicalNotNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogicalNotNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ };
+
+public:
+ LogicalNotNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "LogicalNot"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LogicalOrNode.h b/runtimes/neurun/core/include/model/operation/LogicalOrNode.h
new file mode 100644
index 000000000..220aea2c7
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LogicalOrNode.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogicalOrNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT0 = 0,
+ INPUT1 = 1,
+ };
+
+public:
+ LogicalOrNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "LogicalOr"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/LogisticNode.h b/runtimes/neurun/core/include/model/operation/LogisticNode.h
new file mode 100644
index 000000000..03577143b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/LogisticNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGISTIC_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGISTIC_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogisticNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ LogisticNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Logistic"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGISTIC_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h b/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h
new file mode 100644
index 000000000..e8afe863d
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_MAXPOOL2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_MAXPOOL2D_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class MaxPool2DNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ uint32_t kh;
+ uint32_t kw;
+ Stride stride;
+ Padding padding;
+ Activation activation;
+ };
+
+public:
+ MaxPool2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "MaxPool2D"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_MAXPOOL2D_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/MeanNode.h b/runtimes/neurun/core/include/model/operation/MeanNode.h
new file mode 100644
index 000000000..9d142545d
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/MeanNode.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_MEAN_NODE_H__
+#define __NEURUN_MODEL_OPERATION_MEAN_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class MeanNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ bool keep_dims;
+ };
+
+public:
+ MeanNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Mean"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_MEAN_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/MulNode.h b/runtimes/neurun/core/include/model/operation/MulNode.h
new file mode 100644
index 000000000..e76155256
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/MulNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_MUL_NODE_H__
+#define __NEURUN_MODEL_OPERATION_MUL_NODE_H__
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class MulNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ MulNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Mul"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_MUL_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/NegNode.h b/runtimes/neurun/core/include/model/operation/NegNode.h
new file mode 100644
index 000000000..07f27eab7
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/NegNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_NEG_NODE_H__
+#define __NEURUN_MODEL_OPERATION_NEG_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class NegNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ NegNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Neg"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_NEG_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/PReLUNode.h b/runtimes/neurun/core/include/model/operation/PReLUNode.h
new file mode 100644
index 000000000..e31805d7f
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/PReLUNode.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_PRELU_NODE_H__
+#define __NEURUN_MODEL_OPERATION_PRELU_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class PReLUNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ ALPHA = 1
+ };
+
+public:
+ PReLUNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "PReLU"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_PRELU_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/PadNode.h b/runtimes/neurun/core/include/model/operation/PadNode.h
new file mode 100644
index 000000000..c4cc18c39
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/PadNode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_PAD_NODE_H__
+#define __NEURUN_MODEL_OPERATION_PAD_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class PadNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ PAD = 1,
+ // VALUE = 2 Not allow padding value operand yet
+ };
+
+public:
+ PadNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Pad"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_PAD_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/PermuteNode.h b/runtimes/neurun/core/include/model/operation/PermuteNode.h
new file mode 100644
index 000000000..2339f35ee
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/PermuteNode.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_PERMUTE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_PERMUTE_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace backend
+{
+class BackendContext;
+} // namespace backend
+} // namespace neurun
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class PermuteNode : public model::Operation
+{
+public:
+ enum class Type
+ {
+ NHWC_TO_NCHW,
+ NCHW_TO_NHWC,
+ COPY
+ };
+
+ struct Param
+ {
+ const backend::BackendContext *input_backend_ctx;
+ const backend::BackendContext *output_backend_ctx;
+ };
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Permute"; }
+
+public:
+ PermuteNode(const OperandIndex &input, const OperandIndex &output,
+ const backend::BackendContext *input_backend_ctx,
+ const backend::BackendContext *output_backend_ctx, Type type,
+ model::DataType data_type = model::DataType::FLOAT32);
+
+public:
+ const Param &param() const { return _param; }
+ model::DataType getDataType() const { return _dataType; }
+ Type getPermuteType() const { return _type; }
+
+private:
+ Param _param;
+ Type _type;
+ model::DataType _dataType;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_PERMUTE_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/RNNNode.h b/runtimes/neurun/core/include/model/operation/RNNNode.h
new file mode 100644
index 000000000..fb4c9b325
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/RNNNode.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_RNN_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RNN_NODE_H__
+
+#include "model/InternalType.h"
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class RNNNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ WEIGHTS = 1,
+ RECURRENT_WEIGHTS = 2,
+ BIAS = 3,
+ HIDDEN_STATE_IN = 4
+ };
+
+ enum Output
+ {
+ OUTPUT = 0,
+ HIDDEN_STATE_OUT = 1
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ RNNNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "RNN"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RNN_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/RSQRTNode.h b/runtimes/neurun/core/include/model/operation/RSQRTNode.h
new file mode 100644
index 000000000..bd3fe2227
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/RSQRTNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_RSQRT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RSQRT_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class RSQRTNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ RSQRTNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "RSQRT"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RSQRT_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReLU1Node.h b/runtimes/neurun/core/include/model/operation/ReLU1Node.h
new file mode 100644
index 000000000..d8a325f21
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReLU1Node.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReLU1Node : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ ReLU1Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReLU1"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReLU6Node.h b/runtimes/neurun/core/include/model/operation/ReLU6Node.h
new file mode 100644
index 000000000..437f1e07b
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReLU6Node.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReLU6Node : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ ReLU6Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReLU6"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReLUNode.h b/runtimes/neurun/core/include/model/operation/ReLUNode.h
new file mode 100644
index 000000000..848ca1b5c
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReLUNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_RELU_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RELU_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReLUNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ ReLUNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReLU"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RELU_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReduceMaxNode.h b/runtimes/neurun/core/include/model/operation/ReduceMaxNode.h
new file mode 100644
index 000000000..3886ff481
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReduceMaxNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_REDUCEMAX_NODE_H__
+#define __NEURUN_MODEL_OPERATION_REDUCEMAX_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReduceMaxNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ };
+
+public:
+ ReduceMaxNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReduceMax"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_REDUCEMAX_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReduceMinNode.h b/runtimes/neurun/core/include/model/operation/ReduceMinNode.h
new file mode 100644
index 000000000..f0de17c07
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReduceMinNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_REDUCEMIN_NODE_H__
+#define __NEURUN_MODEL_OPERATION_REDUCEMIN_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReduceMinNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ };
+
+public:
+ ReduceMinNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReduceMin"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_REDUCEMIN_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReduceSumNode.h b/runtimes/neurun/core/include/model/operation/ReduceSumNode.h
new file mode 100644
index 000000000..b70c83cff
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReduceSumNode.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_REDUCE_SUM_NODE_H__
+#define __NEURUN_MODEL_OPERATION_REDUCE_SUM_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReduceSumNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ };
+
+public:
+ ReduceSumNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ReduceSum"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_REDUCE_SUM_H__
diff --git a/runtimes/neurun/core/include/model/operation/ReshapeNode.h b/runtimes/neurun/core/include/model/operation/ReshapeNode.h
new file mode 100644
index 000000000..735aa30e0
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ReshapeNode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_RESHAPE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RESHAPE_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReshapeNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ ReshapeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Reshape"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RESHAPE_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/ResizeBilinearNode.h b/runtimes/neurun/core/include/model/operation/ResizeBilinearNode.h
new file mode 100644
index 000000000..76f0341cc
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/ResizeBilinearNode.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ResizeBilinearNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex height_index;
+ OperandIndex width_index;
+ };
+
+public:
+ ResizeBilinearNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "ResizeBilinear"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SQRTNode.h b/runtimes/neurun/core/include/model/operation/SQRTNode.h
new file mode 100644
index 000000000..b693dab94
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SQRTNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SQRT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SQRT_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SQRTNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ SQRTNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "SQRT"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SQRT_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SoftmaxNode.h b/runtimes/neurun/core/include/model/operation/SoftmaxNode.h
new file mode 100644
index 000000000..0810526f3
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SoftmaxNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SOFTMAX_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SOFTMAX_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SoftmaxNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ float beta;
+ };
+
+public:
+ SoftmaxNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "SoftMax"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SOFTMAX_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SpaceToDepthNode.h b/runtimes/neurun/core/include/model/operation/SpaceToDepthNode.h
new file mode 100644
index 000000000..bbf6732f1
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SpaceToDepthNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SPACE_TO_DEPTH_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SPACE_TO_DEPTH_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SpaceToDepthNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex block_size_index;
+ };
+
+public:
+ SpaceToDepthNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "SpaceToDepth"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SPACE_TO_DEPTH_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SplitNode.h b/runtimes/neurun/core/include/model/operation/SplitNode.h
new file mode 100644
index 000000000..eee2c4f84
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SplitNode.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+class SplitNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ OperandIndex num_of_splits_index;
+ };
+
+public:
+ SplitNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Split"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+} // namespace operation
+} // namespace model
+} // namespace neurun
+#endif // __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SquaredDifferenceNode.h b/runtimes/neurun/core/include/model/operation/SquaredDifferenceNode.h
new file mode 100644
index 000000000..180c68731
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SquaredDifferenceNode.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SQUARED_DIFFERENCE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SQUARED_DIFFERENCE_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SquaredDifferenceNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+public:
+ SquaredDifferenceNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "SquaredDifference"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SQUARED_DIFFERENCE_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SqueezeNode.h b/runtimes/neurun/core/include/model/operation/SqueezeNode.h
new file mode 100644
index 000000000..aa2386b94
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SqueezeNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SQUEEZE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SQUEEZE_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SqueezeNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex dims;
+ };
+
+public:
+ SqueezeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Squeeze"; }
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SQUEEZE_H__
diff --git a/runtimes/neurun/core/include/model/operation/StridedSliceNode.h b/runtimes/neurun/core/include/model/operation/StridedSliceNode.h
new file mode 100644
index 000000000..4de5bc9df
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/StridedSliceNode.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_STRIDED_SLICE_H__
+#define __NEURUN_MODEL_OPERATION_STRIDED_SLICE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class StridedSliceNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex startData_index; //!< index where slicing start from
+ OperandIndex endData_index; //!< index where slicing ends to
+ OperandIndex stridesData_index; //!< index for stride value
+ OperandIndex beginMask_index; //!< index for beginmask
+ OperandIndex endMask_index; //!< index for endmask
+ OperandIndex shrinkAxisMask_index; //!< index for shrink axis
+ };
+
+public:
+ StridedSliceNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "StridedSlice"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_STRIDED_SLICE_H__
diff --git a/runtimes/neurun/core/include/model/operation/SubNode.h b/runtimes/neurun/core/include/model/operation/SubNode.h
new file mode 100644
index 000000000..cb930fd95
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/SubNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_SUB_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SUB_NODE_H__
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class SubNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+ struct Param
+ {
+ Activation activation;
+ };
+
+public:
+ SubNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Sub"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_SUB_H__
diff --git a/runtimes/neurun/core/include/model/operation/TanhNode.h b/runtimes/neurun/core/include/model/operation/TanhNode.h
new file mode 100644
index 000000000..5af480ab7
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/TanhNode.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_TANH_NODE_H__
+#define __NEURUN_MODEL_OPERATION_TANH_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class TanhNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+public:
+ TanhNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Tanh"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_TANH_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/TopKV2Node.h b/runtimes/neurun/core/include/model/operation/TopKV2Node.h
new file mode 100644
index 000000000..675c19c58
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/TopKV2Node.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_TOPK_V2_H__
+#define __NEURUN_MODEL_OPERATION_TOPK_V2_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class TopKV2Node : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT
+ };
+
+ enum Output
+ {
+ OUTPUT_VALUES = 0,
+ OUTPUT_INDICES,
+ };
+
+ struct Param
+ {
+ OperandIndex k_index;
+ };
+
+public:
+ TopKV2Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "TopKV2"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_TOPK_V2_H__
diff --git a/runtimes/neurun/core/include/model/operation/TransposeConvNode.h b/runtimes/neurun/core/include/model/operation/TransposeConvNode.h
new file mode 100644
index 000000000..72443c810
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/TransposeConvNode.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__
+#define __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__
+
+#include <memory>
+
+#include "model/Operation.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class TransposeConvNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ OUTPUT_SHAPE = 0,
+ KERNEL,
+ INPUT
+ };
+
+ struct Param
+ {
+ Padding padding;
+ Stride stride;
+ };
+
+public:
+ TransposeConvNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "TransposeConv"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__
diff --git a/runtimes/neurun/core/include/model/operation/TransposeNode.h b/runtimes/neurun/core/include/model/operation/TransposeNode.h
new file mode 100644
index 000000000..1a42212e8
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/TransposeNode.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_TRANSPOSE_NODE_H__
+#define __NEURUN_MODEL_OPERATION_TRANSPOSE_NODE_H__
+
+#include "model/Operation.h"
+
+#include <utility>
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class TransposeNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0, // for an n-D tensor, specifying the tensor to be transposed.
+ };
+
+ struct Param
+ {
+ // permutation vector is optional.
+ // if permutation vector is provided, set perm.first to true
+ // if permutation vector is NOT provided, set perm.first to false
+ OperandIndex perm;
+ };
+
+public:
+ TransposeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Transpose"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_TRANSPOSE_H__
diff --git a/runtimes/neurun/core/include/model/operation/UnpackNode.h b/runtimes/neurun/core/include/model/operation/UnpackNode.h
new file mode 100644
index 000000000..08d8979bf
--- /dev/null
+++ b/runtimes/neurun/core/include/model/operation/UnpackNode.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_UNPACK_NODE_H__
+#define __NEURUN_MODEL_OPERATION_UNPACK_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+class UnpackNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ int32_t num;
+ int32_t axis;
+ };
+
+public:
+ UnpackNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Unpack"; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+} // namespace operation
+} // namespace model
+} // namespace neurun
+#endif // __NEURUN_MODEL_OPERATION_UNPACK_NODE_H__
diff --git a/runtimes/neurun/core/include/util/Config.lst b/runtimes/neurun/core/include/util/Config.lst
new file mode 100644
index 000000000..c17ac147e
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Config.lst
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CONFIG
+#error Define CONFIG before including this file
+#endif
+
+// Name | Type | Default
+CONFIG(GRAPH_DOT_DUMP , int , "0")
+CONFIG(BACKENDS , std::string , "cpu;acl_cl;acl_neon;srcn")
+CONFIG(OP_BACKEND_ALLOPS , std::string , "acl_cl")
+CONFIG(OP_BACKEND_MAP , std::string , "")
+CONFIG(DISABLE_COMPILE , bool , "0")
+CONFIG(NEURUN_LOG_ENABLE , bool , "0")
+CONFIG(CPU_MEMORY_PLANNER , std::string , "FirstFit")
+CONFIG(EXECUTOR , std::string , "Linear")
+CONFIG(ACL_LAYOUT , std::string , "none")
+CONFIG(PROFILING_MODE , bool , "0")
+CONFIG(USE_SCHEDULER , bool , "0")
+
+// Auto-generate all operations
+
+#define OP(InternalName, IsNnApi) \
+ CONFIG(OP_BACKEND_ ## InternalName, std::string, "")
+#include "model/Operations.lst"
+#undef OP
+
diff --git a/runtimes/neurun/core/include/util/ConfigSource.h b/runtimes/neurun/core/include/util/ConfigSource.h
new file mode 100644
index 000000000..b1fa9a87d
--- /dev/null
+++ b/runtimes/neurun/core/include/util/ConfigSource.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_CONFIG_SOURCE_H__
+#define __NEURUN_UTIL_CONFIG_SOURCE_H__
+
+#include <memory>
+
+#include "IConfigSource.h"
+
+namespace neurun
+{
+namespace util
+{
+
+void config_source(std::unique_ptr<IConfigSource> &&source);
+
+bool getConfigBool(const std::string &key);
+int getConfigInt(const std::string &key);
+std::string getConfigString(const std::string &key);
+
+} // namespace util
+} // namespace neurun
+
+namespace neurun
+{
+namespace util
+{
+namespace config
+{
+
+#define CONFIG(Name, Type, Default) extern const char *Name;
+
+#include "Config.lst"
+
+#undef CONFIG
+
+} // namespace config
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_CONFIG_SOURCE_H__
diff --git a/runtimes/neurun/core/include/util/Coordinates.h b/runtimes/neurun/core/include/util/Coordinates.h
new file mode 100644
index 000000000..67947138f
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Coordinates.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_COORDINATES_H__
+#define __NEURUN_UTIL_COORDINATES_H__
+
+#include <cassert>
+#include <stdint.h>
+#include <vector>
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief Class to represent position(offset) of tensor.\n
+ * Assume that the front is higher dimensional.
+ * i.g. N: 0, C: 1, H: 2, W: 3 for NCHW layout
+ */
+class Coordinates final
+{
+public:
+ static constexpr size_t num_max_dimensions = 4;
+
+public:
+ /**
+ * @brief Construct a new Coordinates object
+ * @param[in] init The initialzer_list with coordinates
+ * @return
+ */
+ Coordinates(std::initializer_list<int32_t> init) : _coordinates{init}
+ {
+ assert(init.size() <= num_max_dimensions);
+ }
+
+public:
+ /**
+ * @brief Set the coordinate of one of the coordinates.
+ *
+ * @param[in] dimension Dimension for which the coordinate is set.
+ * @param[in] Coordinate Coordinate to be set for the dimension.
+ */
+ void set(size_t dimension, int32_t coordinate)
+ {
+ assert(dimension < num_max_dimensions);
+ if (dimension >= _coordinates.size())
+ {
+ _coordinates.resize(dimension + 1, 0);
+ }
+ _coordinates[dimension] = coordinate;
+ }
+
+public:
+ /**
+ * @brief Return size of coordinates
+ *
+ * @return size of coordinates
+ */
+ size_t size() const { return _coordinates.size(); }
+
+public:
+ int32_t operator[](size_t dimension) const
+ {
+ assert(dimension < _coordinates.size());
+ return _coordinates[dimension];
+ }
+
+public:
+ /**
+ * @brief begin() of const_iterator for this class
+ *
+ * @return The first iterator of the coordinates
+ */
+ std::vector<int32_t>::const_iterator begin() const { return _coordinates.begin(); }
+ /**
+ * @brief end() of const_iterator for this class
+ *
+ * @return The last iterator of the coordinates
+ */
+ std::vector<int32_t>::const_iterator end() const { return _coordinates.end(); }
+
+private:
+ std::vector<int32_t> _coordinates;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_COORDINATES_H__
diff --git a/runtimes/neurun/core/include/util/GeneralConfigSource.h b/runtimes/neurun/core/include/util/GeneralConfigSource.h
new file mode 100644
index 000000000..04e3332b3
--- /dev/null
+++ b/runtimes/neurun/core/include/util/GeneralConfigSource.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_GLOBAL_CONFIG_SOURCE_H__
+#define __NEURUN_UTIL_GLOBAL_CONFIG_SOURCE_H__
+
+#include <unordered_map>
+
+#include "util/IConfigSource.h"
+
+namespace neurun
+{
+namespace util
+{
+
+class GeneralConfigSource : public IConfigSource
+{
+public:
+ GeneralConfigSource() = default;
+
+ std::string get(const std::string &key) const override;
+ void set(const std::string &key, const std::string &val);
+
+private:
+ std::unordered_map<std::string, std::string> _map;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_GLOBAL_CONFIG_SOURCE_H__
diff --git a/runtimes/neurun/core/include/util/IConfigSource.h b/runtimes/neurun/core/include/util/IConfigSource.h
new file mode 100644
index 000000000..a52d87097
--- /dev/null
+++ b/runtimes/neurun/core/include/util/IConfigSource.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_I_CONFIG_SOURCE_H__
+#define __NEURUN_UTIL_I_CONFIG_SOURCE_H__
+
+#include <string>
+
+namespace neurun
+{
+namespace util
+{
+
+struct IConfigSource
+{
+ /**
+ * @brief Destroy the IConfigSource object
+ */
+ virtual ~IConfigSource() = default;
+
+ /**
+ * @brief get the value for the matching key
+ *
+ * @param key string key to search
+ * @return string value associated with the key
+ */
+ virtual std::string get(const std::string &key) const = 0;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_I_CONFIG_SOURCE_H__
diff --git a/runtimes/neurun/core/include/util/ITimer.h b/runtimes/neurun/core/include/util/ITimer.h
new file mode 100644
index 000000000..79ecdd0ca
--- /dev/null
+++ b/runtimes/neurun/core/include/util/ITimer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_ITIMER_H__
+#define __NEURUN_UTIL_ITIMER_H__
+
+#include <chrono>
+
+namespace neurun
+{
+namespace util
+{
+
+class ITimer
+{
+public:
+ virtual void handleBegin() = 0;
+ virtual void handleEnd() = 0;
+ int getTime() { return _timer_res; };
+
+ virtual ~ITimer() = default;
+
+protected:
+ int _timer_res{0};
+};
+
+class CPUTimer : public ITimer
+{
+public:
+ void handleBegin() override { _start_time = std::chrono::steady_clock::now(); };
+
+ void handleEnd() override
+ {
+ const auto end_time = std::chrono::steady_clock::now();
+ _timer_res =
+ std::chrono::duration_cast<std::chrono::microseconds>(end_time - _start_time).count();
+ };
+
+private:
+ std::chrono::steady_clock::time_point _start_time; // in microseconds
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_ITIMER_H__
diff --git a/runtimes/neurun/core/include/util/Index.h b/runtimes/neurun/core/include/util/Index.h
new file mode 100644
index 000000000..d1fdc237c
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Index.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_INDEX_H__
+#define __NEURUN_UTIL_INDEX_H__
+
+#include <functional>
+#include <limits>
+#include <stdint.h>
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief A wrapper class for unsigned integral Index
+ * NOTE : Max value of the underlying type is used as the invalid value
+ *
+ * @tparam T Underlying type. Must be unsigned integral type otherwise its behavior is undefined.
+ * @tparam DummyTag Dummy type to distinguish types with a same underlying type. Using an opaque
+ * type is recommended.
+ */
+template <typename T, typename DummyTag> class Index
+{
+private:
+ static const T UNDEFINED = std::numeric_limits<T>::max();
+
+public:
+ /**
+ * @brief Construct a new Index object
+ */
+ explicit Index(void) : _index{UNDEFINED} {}
+ /**
+ * @brief Construct a new Index object with a value in the underlying type
+ *
+ * @param o Value in the underlying type
+ */
+ explicit Index(T o) : _index{o} {}
+ /**
+ * @brief Copy Constructor
+ *
+ * @param o Object to be copied
+ */
+ Index(const Index &o) : _index{o._index} {}
+
+ /**
+ * @brief Assign a value in the underlying time
+ *
+ * @param o Value in the underlying type
+ * @return Index& Reference of this pointer
+ */
+ Index &operator=(T o)
+ {
+ _index = o;
+ return *this;
+ }
+
+ /**
+ * @brief Copy assignment operator
+ *
+ * @param o Object to be copied
+ * @return Index& Reference of this pointer
+ */
+ Index &operator=(const T &o)
+ {
+ _index = o._index;
+ return *this;
+ }
+
+ /**
+ * @brief Equality operator
+ *
+ * @param o The other value in the underlying type to compare
+ * @return true if underlying value is the same, false otherwise
+ */
+ bool operator==(T o) const { return _index == o; }
+ /**
+ * @brief Equality operator
+ *
+ * @param o The other object to compare
+ * @return true if underlying value is the same, false otherwise
+ */
+ bool operator==(const Index &o) const { return _index == o._index; }
+ /**
+ * @brief Inquality operator
+ *
+ * @param o The other value in the underlying type to compare
+ * @return true if underlying value is the same, false otherwise
+ */
+ bool operator!=(T o) const { return !(*this == o); }
+ /**
+ * @brief Inquality operator
+ *
+ * @param o The other object to compare
+ * @return true if underlying value is the same, false otherwise
+ */
+ bool operator!=(const Index &o) const { return !(*this == o); }
+
+ /**
+ * @brief Post increment operator
+ *
+ * @return Index Index before increment
+ */
+ Index operator++(int)
+ {
+ Index temp = *this;
+ _index++;
+ return temp;
+ }
+
+ /**
+ * @brief Check whether the value is valid or not
+ *
+ * @return true if valid, false otherwise
+ */
+ bool valid() const { return _index != UNDEFINED; }
+ /**
+ * @brief Return underlying value
+ *
+ * @return T Underlying value
+ */
+ T value() const { return _index; }
+
+private:
+ T _index;
+};
+
+} // namespace util
+} // namespace neurun
+
+namespace std
+{
+
+template <typename T, typename Tag> struct hash<::neurun::util::Index<T, Tag>>
+{
+ size_t operator()(const ::neurun::util::Index<T, Tag> &index) const noexcept
+ {
+ return hash<T>()(index.value());
+ }
+};
+
+} // namespace std
+
+#endif // __NEURUN_UTIL_INDEX_H__
diff --git a/runtimes/neurun/core/include/util/ObjectManager.h b/runtimes/neurun/core/include/util/ObjectManager.h
new file mode 100644
index 000000000..fd2c3f295
--- /dev/null
+++ b/runtimes/neurun/core/include/util/ObjectManager.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_OBJECT_MANAGER_H__
+#define __NEURUN_UTIL_OBJECT_MANAGER_H__
+
+#include <unordered_map>
+#include <memory>
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief Class that owns objects and maps them with indices as a handle for them
+ *
+ */
+template <typename Index, typename Object> class ObjectManager
+{
+public:
+ ObjectManager() : _index_count{0u} {}
+
+public:
+ /**
+ * @brief Create an object with args and put it in the container with a new Index for that
+ *
+ * @param[in] args Arguments for creating Operand object
+ * @return Created index that is associated to the object
+ */
+ template <class... Args> Index emplace(Args &&... args)
+ {
+ auto index = generateIndex();
+ _objects.emplace(index, nnfw::cpp14::make_unique<Object>(std::forward<Args>(args)...));
+ return index;
+ }
+
+ /**
+ * @brief Put object in the container with a new Index for that
+ *
+ * @param[in] object Object to be pushed
+ * @return Created index that is associated to the object
+ */
+ Index push(std::unique_ptr<Object> &&object)
+ {
+ auto index = generateIndex();
+ _objects.emplace(index, std::move(object));
+ return index;
+ }
+
+ /**
+ * @brief Remove the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be removed
+ * @return N/A
+ */
+ void remove(const Index &index) { _objects.erase(index); };
+
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return Object
+ */
+ const Object &at(const Index &index) const { return *(_objects.at(index)); }
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return Object
+ */
+ Object &at(const Index &index) { return *(_objects.at(index)); }
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return true if such entry exists otherwise false
+ */
+ bool exist(const Index &index) const
+ {
+ auto it = _objects.find(index);
+ return it != _objects.end();
+ }
+ /**
+ * @brief Iterate over the container with given function
+ *
+ * @param[in] fn Function to be run for every container entry
+ * @return N/A
+ */
+ void iterate(const std::function<void(const Index &, const Object &)> &fn) const
+ {
+ for (const auto &e : _objects)
+ {
+ fn(e.first, *e.second);
+ }
+ }
+ /**
+ * @brief Iterate over the container with given function
+ *
+ * @param[in] fn Function to be run for every container entry
+ * @return N/A
+ */
+ void iterate(const std::function<void(const Index &, Object &)> &fn)
+ {
+ // TODO Remove this workaround
+ // This implementation is a workaround in case of adding operands while iteration
+ std::list<Index> l;
+
+ for (auto &e : _objects)
+ {
+ l.push_back(e.first);
+ }
+
+ for (auto index : l)
+ {
+ fn(index, *_objects[index]);
+ }
+ }
+
+private:
+ Index generateIndex() { return Index{_index_count++}; }
+
+private:
+ std::unordered_map<Index, std::unique_ptr<Object>> _objects;
+ uint32_t _index_count;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_OBJECT_MANAGER_H__
diff --git a/runtimes/neurun/core/include/util/Padding.h b/runtimes/neurun/core/include/util/Padding.h
new file mode 100644
index 000000000..230013238
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Padding.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_PADDING_H__
+#define __NEURUN_UTIL_PADDING_H__
+
+#include <stdint.h>
+
+#include "model/Shape.h"
+#include "model/InternalType.h"
+
+namespace neurun
+{
+namespace util
+{
+
+model::ExplicitPadding validPadding(void);
+model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape,
+ const model::Stride &stride, uint32_t kw, uint32_t kh);
+model::ExplicitPadding calculatePadding(const model::Padding &padding,
+ const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape,
+ const model::Stride &stride, uint32_t kw, uint32_t kh);
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_PADDING_H__
diff --git a/runtimes/neurun/core/include/util/Set.h b/runtimes/neurun/core/include/util/Set.h
new file mode 100644
index 000000000..13213511d
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Set.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Set.h
+ * @brief This file contains neurun::util::Set class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NEURUN_UTIL_SET_H__
+#define __NEURUN_UTIL_SET_H__
+
+#include <cassert>
+#include <unordered_set>
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief Class for set of custom element
+ & @tparam Element Key type of Set
+ */
+template <typename Element> class Set
+{
+public:
+ /**
+ * @brief Construct default Set object.
+ */
+ Set() = default;
+ /**
+ * @brief Construct Set object by copy semantics.
+ */
+ Set(const Set<Element> &) = default;
+ /**
+ * @brief Construct move Set object by move semantics.
+ */
+ Set(Set<Element> &&) = default;
+
+public:
+ /**
+ * @brief Add a given element to the set
+ *
+ * @param e Element added
+ */
+ void add(const Element &e) { _set.insert(e); }
+ /**
+ * @brief remove a given element from the set
+ *
+ * @param e Element removed
+ */
+ void remove(const Element &e) { _set.erase(e); }
+ /**
+ * @brief Get size of the set
+ *
+ * @return The size of the set
+ */
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ /**
+ * @brief Get whether the set is empty
+ *
+ * @return Whether the set is empty
+ */
+ bool empty() const { return _set.empty(); }
+ /**
+ * @brief Get whether a given element exists in the set
+ *
+ * @param e A given element
+ *
+ * @return Whether a given element exists in the set
+ */
+ bool contains(const Element &e) const { return _set.find(e) != _set.end(); }
+ /**
+ * @brief Get first element of the set
+ *
+ * @return first element of the set
+ */
+ const Element &getOnlyElement() const
+ {
+ assert(_set.size() == 1u);
+ return *_set.begin();
+ }
+
+public:
+ /**
+ * @brief operator overloading function for `|`
+ *
+ * @return A set with two sets combined
+ */
+ Set<Element> operator|(const Set<Element> &other) const // Union
+ {
+ auto ret = *this;
+ for (auto e : other)
+ {
+ ret.add(e);
+ }
+ return ret;
+ }
+ /**
+ * @brief operator overloading function for `&`
+ *
+ * @return A set of elements that overlap in two sets
+ */
+ Set<Element> operator&(const Set<Element> &other) const // Intersect
+ {
+ Set<Element> ret;
+ for (auto e : other)
+ {
+ if (contains(e))
+ {
+ ret.add(e);
+ }
+ }
+ return ret;
+ }
+ /**
+ * @brief operator overloading function for `-`
+ *
+ * @return A set of subtracted from another set
+ */
+ Set<Element> operator-(const Set<Element> &other) const // Minus
+ {
+ auto ret = *this;
+ for (auto e : other)
+ {
+ ret.remove(e);
+ }
+ return ret;
+ }
+
+public:
+ /**
+ * @brief begin() of const_iterator for this class
+ *
+ * @return The first iterator of the set
+ */
+ typename std::unordered_set<Element>::const_iterator begin() const { return _set.begin(); }
+ /**
+ * @brief end() of const_iterator for this class
+ *
+ * @return The last iterator of the set
+ */
+ typename std::unordered_set<Element>::const_iterator end() const { return _set.end(); }
+
+private:
+ std::unordered_set<Element> _set;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_SET_H__
diff --git a/runtimes/neurun/core/include/util/ShapeInference.h b/runtimes/neurun/core/include/util/ShapeInference.h
new file mode 100644
index 000000000..54076199b
--- /dev/null
+++ b/runtimes/neurun/core/include/util/ShapeInference.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_SHAPE_INFERENCE_H__
+#define __NEURUN_GRAPH_SHAPE_INFERENCE_H__
+
+#include "model/operation/AvgPool2DNode.h"
+#include "model/operation/ConcatNode.h"
+#include "model/operation/MaxPool2DNode.h"
+#include "model/operation/Conv2DNode.h"
+#include "model/operation/DepthwiseConv2DNode.h"
+#include "model/Operands.h"
+#include "model/Index.h"
+#include "model/Layout.h"
+
+namespace neurun
+{
+namespace shape_inference
+{
+
+using Shapes = std::vector<model::Shape>;
+
+Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape);
+
+Shapes inferAvgPoolShape(const model::Shape &in_shape,
+ const model::operation::AvgPool2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::ConcatNode::Param &param);
+
+Shapes inferMaxPoolShape(const model::Shape &in_shape,
+ const model::operation::MaxPool2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::Conv2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::DepthwiseConv2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape);
+
+} // namespace shape_inference
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_SHAPE_INFERENCE_H__
diff --git a/runtimes/neurun/core/include/util/Utils.h b/runtimes/neurun/core/include/util/Utils.h
new file mode 100644
index 000000000..c472dd7c8
--- /dev/null
+++ b/runtimes/neurun/core/include/util/Utils.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Utils.h
+ * @brief This file contains utility functions
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NEURUN_UTIL_UTILS_H__
+#define __NEURUN_UTIL_UTILS_H__
+
+#include "model/InternalType.h"
+#include "model/Layout.h"
+#include "model/Operand.h"
+#include "util/Coordinates.h"
+#include "backend/operand/IObject.h"
+
+#define UNUSED_RELEASE(a) (void)(a)
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief Converts a internal padding type to const char*
+ * @param[in] code Padding type to be converted
+ * @return A string holding the converted value
+ */
+const char *to_string(const model::PaddingType &type);
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, model::Layout from_layout,
+ model::Layout to_layout);
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_UTILS_H__
diff --git a/runtimes/neurun/core/include/util/feature/Coordinate4D.h b/runtimes/neurun/core/include/util/feature/Coordinate4D.h
new file mode 100644
index 000000000..b020ed239
--- /dev/null
+++ b/runtimes/neurun/core/include/util/feature/Coordinate4D.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_FEATURE_COORDINATE_4D_H__
+#define __NEURUN_UTIL_FEATURE_COORDINATE_4D_H__
+
+#include <stdint.h>
+
+namespace neurun
+{
+namespace util
+{
+namespace feature
+{
+
+/**
+ * @brief Class to represent position(offset) of subtensor.\n
+ * Assume that parent and child are already lowered (can get Shape4D).
+ */
+class Coordinate4D
+{
+public:
+ /**
+ * @brief Construct a new Coordinate4D object
+ */
+ Coordinate4D(void) : _n{0}, _h{0}, _w{0}, _c{0}
+ {
+ // DO NOTHING
+ }
+ /**
+ * @brief Construct a new Coordinate4D object
+ * @param[in] n Batch offset
+ * @param[in] h Height offset
+ * @param[in] w Width offset
+ * @param[in] c Channel offset
+ * @return
+ */
+ Coordinate4D(int32_t n, int32_t h, int32_t w, int32_t c) : _n{n}, _h{h}, _w{w}, _c{c}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Set batch offset
+ * @param[in] n Batch offset
+ */
+ void n(int32_t n) { _n = n; }
+ /**
+ * @brief Set height offset
+ * @param[in] h Height offset
+ */
+ void h(int32_t h) { _h = h; }
+ /**
+ * @brief Set width offset
+ * @param[in] w Width offset
+ */
+ void w(int32_t w) { _w = w; }
+ /**
+ * @brief Set channel offset
+ * @param[in] c Channel offset
+ */
+ void c(int32_t c) { _c = c; }
+
+public:
+ /**
+ * @brief Return batch offset
+ * @return Batch offset
+ */
+ int32_t n(void) const { return _n; }
+ /**
+ * @brief Return height offset
+ * @return Height offset
+ */
+ int32_t h(void) const { return _h; }
+ /**
+ * @brief Return width offset
+ * @return Width offset
+ */
+ int32_t w(void) const { return _w; }
+ /**
+ * @brief Return channel offset
+ * @return Channel offset
+ */
+ int32_t c(void) const { return _c; }
+
+private:
+ int32_t _n;
+ int32_t _h;
+ int32_t _w;
+ int32_t _c;
+};
+
+} // namespace feature
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_FEATURE_COORDINATE_4D_H__
diff --git a/runtimes/neurun/core/include/util/feature/nchw/View.h b/runtimes/neurun/core/include/util/feature/nchw/View.h
new file mode 100644
index 000000000..37ee8e398
--- /dev/null
+++ b/runtimes/neurun/core/include/util/feature/nchw/View.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_FEATURE_NCHW_VIEW_H__
+#define __NEURUN_UTIL_FEATURE_NCHW_VIEW_H__
+
+#include "misc/feature/Reader.h"
+#include "misc/feature/Shape.h"
+
+#include "backend/operand/ITensor.h"
+#include "util/Coordinates.h"
+
+#include <cassert>
+
+namespace neurun
+{
+namespace util
+{
+namespace feature
+{
+namespace nchw
+{
+
+template <typename T> class View final : public nnfw::misc::feature::Reader<T>
+{
+public:
+ View(::neurun::backend::operand::ITensor *tensor) : _tensor{tensor}
+ {
+ assert(tensor->num_dimensions() == 4 && tensor->layout() == model::Layout::NCHW);
+ _shape.N = tensor->dimension(0);
+ _shape.C = tensor->dimension(1);
+ _shape.H = tensor->dimension(2);
+ _shape.W = tensor->dimension(3);
+ }
+
+public:
+ const ::nnfw::misc::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(0, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+public:
+ T &at(uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(0, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+private:
+ size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ return _tensor->calcOffset(
+ neurun::util::Coordinates{static_cast<int32_t>(batch), static_cast<int32_t>(ch),
+ static_cast<int32_t>(row), static_cast<int32_t>(col)});
+ }
+
+private:
+ ::nnfw::misc::feature::Shape _shape;
+ ::neurun::backend::operand::ITensor *_tensor;
+};
+
+} // namespace nchw
+} // namespace feature
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_FEATURE_NCHW_VIEW_H__
diff --git a/runtimes/neurun/core/include/util/feature/nhwc/Reader.h b/runtimes/neurun/core/include/util/feature/nhwc/Reader.h
new file mode 100644
index 000000000..471f62a4b
--- /dev/null
+++ b/runtimes/neurun/core/include/util/feature/nhwc/Reader.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_FEATURE_NHWC_READER_H__
+#define __NEURUN_UTIL_FEATURE_NHWC_READER_H__
+
+#include "util/Utils.h"
+#include "Utils.h"
+
+#include "misc/feature/Reader.h"
+
+namespace neurun
+{
+namespace util
+{
+namespace feature
+{
+namespace nhwc
+{
+
+template <typename T> class Reader final : public nnfw::misc::feature::Reader<T>
+{
+public:
+ Reader(const ::nnfw::misc::feature::Shape &shape, const T *ptr, size_t len)
+ : _shape{shape}, _ptr{ptr}
+ {
+ UNUSED_RELEASE(len); // Workaround for unused variable in release mode
+ assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
+ }
+
+public:
+ const nnfw::misc::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ return _ptr[index];
+ }
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ return _ptr[index];
+ }
+
+private:
+ nnfw::misc::feature::Shape _shape;
+
+private:
+ const T *_ptr;
+};
+
+} // namespace nhwc
+} // namespace feature
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_FEATURE_NHWC_READER_H__
diff --git a/runtimes/neurun/core/include/util/feature/nhwc/Utils.h b/runtimes/neurun/core/include/util/feature/nhwc/Utils.h
new file mode 100644
index 000000000..3dab4261c
--- /dev/null
+++ b/runtimes/neurun/core/include/util/feature/nhwc/Utils.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_FEATURE_NHWC_UTILS_H__
+#define __NEURUN_UTIL_FEATURE_NHWC_UTILS_H__
+
+#include "misc/feature/Shape.h"
+
+namespace neurun
+{
+namespace util
+{
+namespace feature
+{
+namespace nhwc
+{
+
+inline uint32_t index_of(const ::nnfw::misc::feature::Shape &shape, uint32_t ch, uint32_t row,
+ uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
+inline uint32_t index_of(const ::nnfw::misc::feature::Shape &shape, uint32_t batch, uint32_t ch,
+ uint32_t row, uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += batch * shape.H * shape.W * shape.C;
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
+} // namespace nhwc
+} // namespace feature
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_FEATURE_NHWC_UTILS_H__
diff --git a/runtimes/neurun/core/include/util/feature/nhwc/View.h b/runtimes/neurun/core/include/util/feature/nhwc/View.h
new file mode 100644
index 000000000..cfaab8ea4
--- /dev/null
+++ b/runtimes/neurun/core/include/util/feature/nhwc/View.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_FEATURE_NHWC_VIEW_H__
+#define __NEURUN_UTIL_FEATURE_NHWC_VIEW_H__
+
+#include <cassert>
+#include <cstddef>
+
+#include "Utils.h"
+#include "util/Utils.h"
+
+#include "misc/feature/Reader.h"
+
+namespace neurun
+{
+namespace util
+{
+namespace feature
+{
+namespace nhwc
+{
+
+// This class is for cpu buffer only, and do not support padding.
+template <typename T> class View final : public nnfw::misc::feature::Reader<T>
+{
+public:
+ View(const ::nnfw::misc::feature::Shape &shape, T *ptr, size_t len) : _shape{shape}, _ptr{ptr}
+ {
+ UNUSED_RELEASE(len); // Workaround for unused variable in release mode
+ assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
+ }
+
+public:
+ const nnfw::misc::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ return _ptr[index];
+ }
+
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ return _ptr[index];
+ }
+
+ T &at(uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ return _ptr[index];
+ }
+
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ return _ptr[index];
+ }
+
+private:
+ nnfw::misc::feature::Shape _shape;
+
+private:
+ T *_ptr;
+};
+
+} // namespace nhwc
+} // namespace feature
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_FEATURE_NHWC_VIEW_H__
diff --git a/runtimes/neurun/core/include/util/logging.h b/runtimes/neurun/core/include/util/logging.h
new file mode 100644
index 000000000..a2fdbdd59
--- /dev/null
+++ b/runtimes/neurun/core/include/util/logging.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_LOGGING_H__
+#define __NEURUN_UTIL_LOGGING_H__
+
+#include <iostream>
+
+#include "util/ConfigSource.h"
+
+namespace neurun
+{
+namespace util
+{
+namespace logging
+{
+
+class Context
+{
+public:
+ Context() : _enabled{false}
+ {
+ const auto env = util::getConfigBool(util::config::NEURUN_LOG_ENABLE);
+
+ if (env)
+ {
+ _enabled = true;
+ }
+ }
+
+public:
+ bool enabled(void) const { return _enabled; }
+
+private:
+ bool _enabled;
+};
+
+static Context ctx;
+
+} // namespace logging
+} // namespace util
+} // namespace neurun
+
+#define VERBOSE(name) \
+ if (::neurun::util::logging::ctx.enabled()) \
+ std::cout << "[" << #name << "] "
+
+#endif // __NEURUN_UTIL_LOGGING_H__
diff --git a/runtimes/neurun/core/src/backend/Backend.cc b/runtimes/neurun/core/src/backend/Backend.cc
new file mode 100644
index 000000000..c2f745f8f
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/Backend.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/Backend.h"
+
+#include "backend/IConfig.h"
+#include "backend/ITensorBuilder.h"
+#include "backend/IKernelGenerator.h"
+#include "backend/IShapeFixer.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/backend/BackendManager.cc b/runtimes/neurun/core/src/backend/BackendManager.cc
new file mode 100644
index 000000000..155f7f51a
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/BackendManager.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <dlfcn.h>
+#include "BackendManager.h"
+
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+#include "util/logging.h"
+#include "util/ConfigSource.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+BackendManager &BackendManager::instance()
+{
+ static BackendManager object;
+ return object;
+}
+
+template <typename T, class... Types>
+void BackendManager::loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
+ const std::string obj_creator_func_name, void *handle,
+ Types &&... args)
+{
+ T *(*allocate_obj)(Types && ... Args);
+ // load object creator function
+ allocate_obj = (T * (*)(Types && ... Args))dlsym(handle, obj_creator_func_name.c_str());
+ if (allocate_obj == nullptr)
+ {
+ fprintf(stderr, "BackendManager: unable to open function %s: %s\n",
+ obj_creator_func_name.c_str(), dlerror());
+ abort();
+ }
+
+ object_of_plugin_class.reset(allocate_obj(args...));
+}
+
+void BackendManager::loadBackend(const std::string &backend)
+{
+ const std::string backend_plugin = "libbackend_" + backend + ".so";
+ void *handle = dlopen(backend_plugin.c_str(), RTLD_LAZY | RTLD_LOCAL);
+ if (handle == nullptr)
+ {
+ fprintf(stderr, "BackendManager::loadBackend failed to load plugin of %s backend: %s\n",
+ backend.c_str(), dlerror());
+ abort();
+ }
+ VERBOSE(BackendManager::loadBackend) << "loaded " << backend_plugin << " as a plugin of "
+ << backend << " backend\n";
+
+ {
+ // load object creator function
+ auto backend_create = (backend_create_t)dlsym(handle, "neurun_backend_create");
+ if (backend_create == nullptr)
+ {
+ fprintf(stderr, "BackendManager: unable to open function neurun_backend_create : %s\n",
+ dlerror());
+ abort();
+ }
+
+ // load object creator function
+ auto backend_destroy = (backend_destroy_t)dlsym(handle, "neurun_backend_destroy");
+ if (backend_destroy == nullptr)
+ {
+ fprintf(stderr, "BackendManager: unable to open function neurun_backend_destroy : %s\n",
+ dlerror());
+ abort();
+ }
+
+ auto backend_object =
+ std::unique_ptr<backend::Backend, backend_destroy_t>(backend_create(), backend_destroy);
+ auto backend_object_raw = backend_object.get();
+ backend_object->config()->initialize(); // Call initialize here?
+ _gen_map.emplace(backend_object->config()->id(), std::move(backend_object));
+ _available_backends.push_back(backend_object_raw);
+ }
+
+ // Save backend handle (avoid warning by handle lost without dlclose())
+ _handle_map.emplace(backend, handle);
+}
+
+BackendManager::BackendManager()
+{
+ const auto backends = util::getConfigString(util::config::BACKENDS);
+ size_t prev_pos = 0;
+ auto pos = backends.find(";");
+ while (pos != std::string::npos)
+ {
+ loadBackend(backends.substr(prev_pos, pos - prev_pos));
+ prev_pos = pos + 1;
+ pos = backends.find(";", prev_pos);
+ }
+ // if backends doesn't terminate with ";"
+ if (prev_pos < backends.size())
+ {
+ loadBackend(backends.substr(prev_pos));
+ }
+}
+
+Backend *BackendManager::get(const std::string &key) { return _gen_map.at(key).get(); }
+
+const Backend *BackendManager::get(const std::string &key) const { return _gen_map.at(key).get(); }
+
+const Backend *BackendManager::getDefault() const { return get("cpu"); }
+
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/backend/BackendManager.h b/runtimes/neurun/core/src/backend/BackendManager.h
new file mode 100644
index 000000000..ef102123c
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/BackendManager.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_BACKEND_MANAGER_H__
+#define __NEURUN_BACKEND_BACKEND_MANAGER_H__
+
+#include <memory>
+#include <map>
+
+#include "model/Operands.h"
+#include "backend/Backend.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class BackendManager
+{
+public:
+ using backend_create_t = Backend *(*)();
+ using backend_destroy_t = void (*)(Backend *);
+
+ static BackendManager &instance();
+
+public:
+ Backend *get(const std::string &key);
+ const Backend *get(const std::string &key) const;
+ const Backend *getDefault() const;
+ const std::vector<const Backend *> &getAll() const { return _available_backends; };
+
+private:
+ BackendManager();
+
+private:
+ std::vector<const Backend *> _available_backends;
+ std::map<std::string, std::unique_ptr<Backend, backend_destroy_t>> _gen_map;
+ std::map<std::string, void *> _handle_map;
+ /**
+ * @brief Allocate an object of a class of a plugin by loading a plugin function, that does
+ * allocation, and calling it
+ *
+ * @param object_of_plugin_class target object
+ * @param obj_creator_func_name name of the plugin function, that allocates an object
+ * @param handle handle of the plugin
+ * @param args arguments to pass to constructor of the plugin class
+ *
+ * @return
+ */
+ template <typename T, class... Types>
+ void loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
+ const std::string obj_creator_func_name, void *handle,
+ Types &&... args);
+
+ /**
+ * @brief load backend plugin
+ *
+ * @param backend backend to be loaded
+ *
+ * @return
+ */
+ void loadBackend(const std::string &backend);
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_BACKEND_MANAGER_H__
diff --git a/runtimes/neurun/core/src/backend/CustomKernel.cc b/runtimes/neurun/core/src/backend/CustomKernel.cc
new file mode 100644
index 000000000..198e223cf
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/CustomKernel.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/CustomKernel.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace custom
+{
+
+// TODO move this elsewhere
+class APIConverter
+{
+public:
+ static nnfw_operand convertOperand(void *alloc, const TypeInfo &type)
+ {
+ nnfw_operand api_operand;
+ api_operand.allocation = alloc;
+ api_operand.type = convertType(type);
+ return api_operand;
+ }
+
+ static nnfw_tensorinfo convertType(const TypeInfo &type)
+ {
+ nnfw_tensorinfo api_type;
+ api_type.rank = type.shape.rank();
+ assert(type.shape.rank() <= 6);
+ std::copy(type.shape.dims().begin(), type.shape.dims().end(), std::begin(api_type.dims));
+
+ switch (type.dtype)
+ {
+ case model::DataType::FLOAT32:
+ api_type.dtype = NNFW_TYPE_TENSOR_FLOAT32;
+ break;
+ case model::DataType::INT32:
+ api_type.dtype = NNFW_TYPE_TENSOR_INT32;
+ break;
+ case model::DataType::QUANT8_ASYMM:
+ api_type.dtype = NNFW_TYPE_TENSOR_QUANT8_ASYMM;
+ break;
+ case model::DataType::BOOL8:
+ api_type.dtype = NNFW_TYPE_TENSOR_BOOL;
+ break;
+ default:
+ throw std::runtime_error("Unsupported tensor datatype");
+ }
+ return api_type;
+ }
+};
+
+Kernel::Kernel(const nnfw_custom_eval evalFunction)
+ : _params(), _userdata(nullptr), _userdata_size(0), _evalFunction(evalFunction)
+{
+}
+
+void Kernel::configure(Kernel::CustomKernelConfigParams &&inParams)
+{
+ _userdata = inParams.userdata;
+ _userdata_size = inParams.userdata_size;
+
+ _params.ninputs = inParams.input_allocations.size();
+ _params.inputs = new nnfw_operand[_params.ninputs];
+ for (size_t i = 0; i < _params.ninputs; ++i)
+ {
+ _params.inputs[i] =
+ APIConverter::convertOperand(inParams.input_allocations[i], inParams.input_types[i]);
+ }
+
+ _params.noutputs = inParams.output_allocations.size();
+ _params.outputs = new nnfw_operand[_params.noutputs];
+ for (size_t i = 0; i < _params.noutputs; ++i)
+ {
+ _params.outputs[i] =
+ APIConverter::convertOperand(inParams.output_allocations[i], inParams.output_types[i]);
+ }
+}
+
+void Kernel::run() { _evalFunction(&_params, _userdata, _userdata_size); }
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/backend/CustomKernelRegistry.cc b/runtimes/neurun/core/src/backend/CustomKernelRegistry.cc
new file mode 100644
index 000000000..4acab70a9
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/CustomKernelRegistry.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/CustomKernelRegistry.h"
+
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace custom
+{
+
+void KernelRegistry::registerKernel(const std::string &id, nnfw_custom_eval evalFunction)
+{
+ _storage.emplace(id, evalFunction);
+}
+
+std::unique_ptr<Kernel> KernelRegistry::buildKernelForOp(const std::string &id)
+{
+ auto it = _storage.find(id);
+ if (it == _storage.end())
+ {
+ throw std::runtime_error("Unable to find associated kernel for op");
+ }
+
+ return nnfw::cpp14::make_unique<custom::Kernel>(it->second);
+}
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/backend/ExecTime.cc b/runtimes/neurun/core/src/backend/ExecTime.cc
new file mode 100644
index 000000000..d5aa679d7
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/ExecTime.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/ExecTime.h"
+
+#include <fstream>
+#include <cassert>
+#include <limits>
+#include <algorithm>
+
+namespace neurun
+{
+namespace backend
+{
+
+int64_t ExecTime::getOperationExecTime(const Backend *backend, const std::string &operation,
+ bool quant, uint32_t op_size) const
+{
+ auto found_backend = _measurements.find(backend);
+ if (found_backend == _measurements.end())
+ return NOT_FOUND; // no execution time for this backend
+
+ auto found_operation_with_type = found_backend->second.find(operation);
+ if (found_operation_with_type == found_backend->second.end())
+ // no execution time for this operation
+ return NOT_FOUND;
+
+ auto found_operation = found_operation_with_type->second.find(quant);
+ if (found_operation == found_operation_with_type->second.end())
+ // no execution time for this operation
+ return NOT_FOUND;
+
+ auto found_size = found_operation->second.find(op_size);
+ if (found_size != found_operation->second.end())
+ return found_size->second; // found execution time
+
+ // Try to interpolate
+ if (found_operation->second.size() < 2)
+ // not possible to do linear interpolation
+ return found_operation->second.begin()->second;
+
+ // if we reach here, then this means, that there is no record, that is equal to op_size
+ auto upper_bound = found_operation->second.upper_bound(op_size); // > op_size
+ auto lower_bound = upper_bound;
+
+ if (upper_bound == found_operation->second.end()) // all values <= op_size
+ {
+ upper_bound--;
+ lower_bound = upper_bound;
+ lower_bound--;
+ }
+ else if (upper_bound == found_operation->second.begin()) // all values > op_size
+ {
+ upper_bound++;
+ }
+ else // op_size between
+ {
+ lower_bound--;
+ }
+
+ // Linear interpolation
+ const auto x0 = static_cast<int64_t>(lower_bound->first); // size
+ const auto x1 = static_cast<int64_t>(upper_bound->first); // size
+ const int64_t y0 = lower_bound->second; // time
+ const int64_t y1 = upper_bound->second; // time
+ const auto x = static_cast<int64_t>(op_size);
+
+ int64_t interpolated_value = y0 + (x - x0) * (y1 - y0) / (x1 - x0);
+
+ // In some cases ops with smaller inputs is executed slower than the one
+ // with larger inputs, more likely because of a backend's load difference
+ if (interpolated_value < 0 && x > x1)
+ {
+ return y0;
+ }
+ // It must be non-positive ONLY if it's lesser than both of them
+ assert(interpolated_value > 0 || x < x0);
+
+ // execution time must be non-negative
+ return std::max<int64_t>(interpolated_value, 1);
+}
+
+void ExecTime::updateOperationExecTime(const Backend *backend, const std::string &operation,
+ bool quant, uint32_t op_size, int64_t time)
+{
+ // If the op is not implemented for some input, it should not be scheduled
+ const auto &recs = _measurements[backend][operation][quant];
+ if (time == getMax() ||
+ std::any_of(recs.begin(), recs.end(),
+ [](std::pair<const uint32_t, const int64_t> p) { return p.second == getMax(); }))
+ {
+ _measurements[backend][operation][quant].clear();
+ _measurements[backend][operation][quant].emplace(op_size, getMax());
+ }
+ else
+ {
+ auto it = _measurements[backend][operation][quant].emplace(op_size, time);
+ if (!it.second)
+ {
+ // affect of the last measurement is bigger than the previous ones:
+ // this prefers new metrics than older once, so will adapt backend changes
+ it.first->second = (it.first->second + time) / 2;
+ }
+ }
+}
+
+void ExecTime::updatePermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size, int64_t time)
+{
+ updateOperationExecTime(from_backend, to_backend->config()->id(), quant, op_size, time);
+}
+
+int64_t ExecTime::getPermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size) const
+{
+ return getOperationExecTime(from_backend, to_backend->config()->id(), quant, op_size);
+}
+
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/backend/JSONExecTime.cc b/runtimes/neurun/core/src/backend/JSONExecTime.cc
new file mode 100644
index 000000000..e2404b2c8
--- /dev/null
+++ b/runtimes/neurun/core/src/backend/JSONExecTime.cc
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/JSONExecTime.h"
+#include "backend/IConfig.h"
+#include <fstream>
+
+namespace neurun
+{
+namespace backend
+{
+/**
+ * @brief Helper function for reading string from stream
+ *
+ * @param str Output string
+ * @param stream File stream
+ */
+void readString(std::string &str, std::ifstream &stream)
+{
+ str.clear();
+ char buf;
+ while (stream.good())
+ {
+ stream.get(buf);
+ if (buf == '"')
+ break;
+ str.push_back(buf);
+ }
+}
+
+/**
+ * @brief Helper function for reading bool from stream
+ *
+ * @param quant Output bool
+ * @param stream File stream
+ */
+void readBool(bool &quant, std::ifstream &stream)
+{
+ char buf;
+ stream.get(buf);
+ quant = (buf == '1');
+ stream.get(buf);
+}
+
+void printString(const std::string &str, std::ofstream &stream) { stream << "\"" << str << "\""; }
+
+void printBool(bool quant, std::ofstream &stream) { stream << "\"" << quant << "\""; }
+
+void JSON::readOperation(const std::string &backend, const std::string &operation, bool quant,
+ std::ifstream &stream)
+{
+ uint32_t size = 0;
+ int64_t time = 0;
+
+ std::string int_buf;
+ char buf;
+ int number_of_closed_braces = 0;
+ int number_of_commas = 0;
+
+ while (stream.good())
+ {
+ stream.get(buf);
+
+ switch (buf)
+ {
+ case ']':
+ {
+ number_of_closed_braces++;
+ break;
+ }
+ case '[':
+ {
+ number_of_closed_braces--;
+ break;
+ }
+ default:
+ {
+ if (std::isdigit(buf))
+ {
+ int_buf.push_back(buf);
+ }
+ break;
+ }
+ }
+
+ if (number_of_closed_braces == 1)
+ break;
+
+ if ((buf == ']' && number_of_closed_braces == 0) ||
+ (buf == ',' && number_of_closed_braces == -1))
+ {
+ switch (number_of_commas % 2)
+ {
+ case 0:
+ {
+ size = static_cast<uint32_t>(std::atoi(int_buf.c_str()));
+ break;
+ }
+ case 1:
+ {
+ time = static_cast<int64_t>(std::atol(int_buf.c_str()));
+ auto bf = _backends.find(backend);
+ if (bf != _backends.end())
+ {
+ _measurements[bf->second][operation][quant][size] = time;
+ } // we ignore the records for unsupported backends
+ break;
+ }
+ }
+ number_of_commas++;
+ int_buf.clear();
+ }
+ }
+}
+void JSON::printOperation(const std::map<uint32_t, int64_t> &operation_info,
+ std::ofstream &stream) const
+{
+ for (const auto &items : operation_info)
+ {
+ stream << "[" << items.first << ", " << items.second << "], ";
+ }
+ stream.seekp(-2, std::ofstream::end);
+}
+
+void JSON::uploadOperationsExecTime() const
+{
+ std::ofstream stream(_measurement_file);
+ if (!stream.is_open())
+ {
+ throw std::runtime_error("Failed to save backend config file");
+ }
+ else
+ {
+ stream << "{";
+ for (const auto &backend : _measurements)
+ {
+ printString(backend.first->config()->id(), stream);
+ stream << ": {";
+ for (const auto &operation : backend.second)
+ {
+ printString(operation.first, stream);
+ stream << ": {";
+ for (const auto &type : operation.second)
+ {
+ printBool(type.first, stream);
+ stream << ": [";
+ printOperation(type.second, stream);
+ stream << "], ";
+ }
+ stream.seekp(-2, std::ofstream::end);
+ stream << "}, ";
+ }
+ stream.seekp(-2, std::ofstream::end);
+ stream << "}, ";
+ }
+ stream.seekp(-2, std::ofstream::end);
+ stream << "}";
+ stream.close();
+ }
+}
+
+void JSON::loadOperationsExecTime()
+{
+ std::ifstream stream(_measurement_file);
+ if (stream.is_open())
+ {
+ std::string backend;
+ std::string operation;
+ bool quant = false;
+ char buf;
+ int number_of_open_braces = 0;
+
+ while (stream.good())
+ {
+ stream.get(buf);
+ switch (buf)
+ {
+ case '{':
+ number_of_open_braces++;
+ break;
+ case '}':
+ number_of_open_braces--;
+ break;
+ case '"':
+ {
+ if (number_of_open_braces == 1)
+ {
+ // read backend string
+ readString(backend, stream);
+ }
+ if (number_of_open_braces == 2)
+ {
+ // read operation string
+ readString(operation, stream);
+ }
+ if (number_of_open_braces == 3)
+ {
+ // read operation string
+ readBool(quant, stream);
+ }
+ break;
+ }
+ case '[':
+ {
+ // reading and creating all info for operation
+ readOperation(backend, operation, quant, stream);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ stream.close();
+ }
+}
+
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/BackendResolver.cc b/runtimes/neurun/core/src/compiler/BackendResolver.cc
new file mode 100644
index 000000000..0c544190c
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/BackendResolver.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BackendResolver.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+BackendResolver::BackendResolver(const BackendResolver &obj)
+ : _context_manager{}, _gen_map{obj._gen_map}
+{
+ for (const auto &e : obj._context_manager)
+ {
+ _context_manager.emplace(e.first, nnfw::cpp14::make_unique<backend::BackendContext>(*e.second));
+ }
+}
+
+BackendResolver &BackendResolver::operator=(const BackendResolver &obj)
+{
+ _gen_map = obj._gen_map;
+
+ _context_manager.clear();
+ for (const auto &e : obj._context_manager)
+ {
+ _context_manager.emplace(e.first, nnfw::cpp14::make_unique<backend::BackendContext>(*e.second));
+ }
+
+ return *this;
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/BackendResolver.h b/runtimes/neurun/core/src/compiler/BackendResolver.h
new file mode 100644
index 000000000..248ef2f2e
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/BackendResolver.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_BACKEND_RESOLVER_H__
+#define __NEURUN_COMPILER_BACKEND_RESOLVER_H__
+
+#include <unordered_map>
+#include <typeindex>
+
+#include "util/logging.h"
+#include "backend/Backend.h"
+#include "backend/BackendManager.h"
+#include "backend/ITensorBuilder.h"
+#include "model/OperationIndexMap.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+class BackendResolver
+{
+public:
+ BackendResolver(const model::Operands &operands,
+ const std::vector<const backend::Backend *> &backends,
+ const std::shared_ptr<backend::custom::KernelRegistry> &registry)
+ {
+ for (const auto backend : backends)
+ {
+ _context_manager.emplace(backend, backend->newContext(operands, registry));
+ }
+ }
+
+ ~BackendResolver() = default;
+ BackendResolver(const BackendResolver &obj);
+ BackendResolver(BackendResolver &&obj) = default;
+ BackendResolver &operator=(const BackendResolver &obj);
+ BackendResolver &operator=(BackendResolver &&obj) = default;
+
+public:
+ const backend::BackendContext *getBackendContext(const model::OperationIndex &index) const
+ {
+ return _context_manager.at(_gen_map.at(index)).get();
+ }
+
+ const backend::BackendContext *getBackendContext(const backend::Backend *backend) const
+ {
+ return _context_manager.at(backend).get();
+ }
+
+ backend::TensorBuilderSet tensor_builders() const
+ {
+ backend::TensorBuilderSet ret;
+ for (const auto &e : _context_manager)
+ {
+ ret.insert(e.second->tensor_builder);
+ }
+ return ret;
+ }
+
+ const backend::Backend *getBackend(const model::OperationIndex &index) const
+ {
+ return getBackendContext(index)->backend;
+ }
+
+ void setBackend(const model::OperationIndex &index, const backend::Backend *backend)
+ {
+ _gen_map[index] = backend;
+ }
+
+ void iterate(const std::function<void(const model::OperationIndex &,
+ const backend::BackendContext &)> &fn) const
+ {
+ for (const auto &e : _gen_map)
+ {
+ fn(e.first, *_context_manager.at(e.second));
+ }
+ }
+
+private:
+ std::unordered_map<const backend::Backend *, std::unique_ptr<backend::BackendContext>>
+ _context_manager;
+ model::OperationIndexMap<const backend::Backend *> _gen_map;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_BACKEND_RESOLVER_H__
diff --git a/runtimes/neurun/core/src/compiler/Compiler.cc b/runtimes/neurun/core/src/compiler/Compiler.cc
new file mode 100644
index 000000000..6a378faa9
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/Compiler.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler/Compiler.h"
+
+#include "BackendResolver.h"
+#include "ParamChecker.h"
+#include "ExecutorFactory.h"
+
+#include "compiler/IScheduler.h"
+#include "compiler/ManualScheduler.h"
+#include "compiler/HEScheduler.h"
+#include "backend/ExecTime.h"
+#include "graph/operation/LowerInfo.h"
+#include "dumper/dot/DotDumper.h"
+#include "compiler/Linear.h"
+#include "exec/interp/ExecManager.h"
+#include "backend/ExecTime.h"
+#include "util/ConfigSource.h"
+
+namespace neurun
+{
+
+namespace compiler
+{
+
+void Compiler::compile(void)
+{
+ _state = State::STARTED;
+
+ if (!checkCompilable())
+ {
+ _executor = std::make_shared<exec::interp::ExecManager>(_graph->shareModel());
+ return;
+ }
+
+ /***************************************************
+ * Backend independent analysis & optimization phase
+ ***************************************************/
+ // Schedule
+ std::unique_ptr<BackendResolver> br;
+ std::shared_ptr<model::OperationIndexMap<int64_t>> indexed_ranks;
+ if (util::getConfigBool(util::config::USE_SCHEDULER))
+ {
+ auto scheduler =
+ compiler::HEScheduler(_graph->operands(), backend::BackendManager::instance().getAll(),
+ _graph->getKernelRegistry());
+ br = scheduler.schedule(*_graph);
+ indexed_ranks = scheduler.getIndexedRanks();
+ }
+ else
+ {
+ auto scheduler = compiler::ManualScheduler();
+ br = scheduler.schedule(*_graph);
+ }
+ _graph->setBackendResolver(std::move(br));
+ /*************************************************************
+ * Backend independent analysis & optimization phase finished
+ *************************************************************/
+
+ // dump graph to .dot
+ auto dump_level =
+ static_cast<dumper::dot::DotDumper::Level>(util::getConfigInt(util::config::GRAPH_DOT_DUMP));
+ neurun::dumper::dot::DotDumper dot_dumper(*_graph, dump_level);
+ dot_dumper.dump("before_lower");
+
+ // Lower: decide backend
+ _graph->lower();
+ _state = State::LOWERED;
+
+ dot_dumper.dump("after_lower");
+
+ const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
+
+ _executor =
+ std::shared_ptr<exec::IExecutor>{ExecutorFactory::instance().create(executor_str, *_graph)};
+ _executor->setIndexedRanks(indexed_ranks);
+ /********************************
+ * Code generation phase finished
+ ********************************/
+ _state = State::COMPILED;
+}
+
+bool Compiler::checkCompilable()
+{
+ // Disable compile phase
+ // When ready to use interpreter backend, remove this config and use backend setting
+ const auto env_disable_compile = util::getConfigBool(util::config::DISABLE_COMPILE);
+ if (env_disable_compile)
+ {
+ return false;
+ }
+
+ // TODO check unspecified operand shape
+
+ // Check compilable parameter
+ ParamChecker paramChecker{_graph};
+ paramChecker();
+ if (paramChecker.haveNoneConstParam())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace compiler
+
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/ExecutorFactory.cc b/runtimes/neurun/core/src/compiler/ExecutorFactory.cc
new file mode 100644
index 000000000..2ff32a57e
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ExecutorFactory.cc
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutorFactory.h"
+
+#include <functional>
+#include "exec/ExecutionObservers.h"
+#include "exec/LinearExecutor.h"
+#include "exec/DataflowExecutor.h"
+#include "exec/ParallelExecutor.h"
+#include "compiler/BackendResolver.h"
+#include "backend/ExecTime.h"
+#include "compiler/Linear.h"
+#include "graph/dumper/Dumper.h"
+#include "OperationValidator.h"
+#include "SubTensorAnalyzer.h"
+#include "backend/IConstantInitializer.h"
+#include "backend/IKernelGenerator.h"
+#include "backend/IShapeFixer.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+ExecutorFactory &ExecutorFactory::instance()
+{
+ static ExecutorFactory singleton;
+ return singleton;
+}
+
+ExecutorFactory::ExecutorFactory()
+{
+ _map["Linear"] = createLinearExecutor;
+ _map["Dataflow"] = std::bind(createDataflowExecutor, std::placeholders::_1, false);
+ _map["Parallel"] = std::bind(createDataflowExecutor, std::placeholders::_1, true);
+}
+
+exec::IExecutor *ExecutorFactory::create(const std::string &id, graph::Graph &graph)
+{
+ return _map.at(id)(graph);
+}
+
+exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
+{
+ auto operand_context = std::make_shared<OperandContext>();
+ const auto &operands = graph.operands();
+
+ // Compilation result will be filled in operand_context and operation_sequence
+ auto function_sequence = std::make_shared<exec::FunctionSequence>();
+
+ // linearize
+ auto linear = graph.linearize();
+
+ // Dump ops
+ linear->accept(neurun::graph::dumper::Dumper{});
+
+ linear->accept(OperationValidator{operands});
+
+ /*************************************************
+ * Backend dependent analysis & optimization phase
+ *************************************************/
+
+ // SubTensorInfo should be generated after lower, before shape correction and finalize
+ // because SubTensorAnalyzer assume that insert permutation is already finished
+ // lower: decide backend and insert permutation
+ // fix shapes: prepare codegen to optimization
+ // generate tensor objects: generate tensor using subtensor info
+ // generate kernels
+ // allocate tesor memory
+ // constant intialization: fill the constants with values
+ // Generated SubTensorInfo is in operand(Object)
+ // for easy pass SubTensorInfo to plan builder and tensor builder
+ linear->accept(SubTensorAnalyzer{graph.operands()});
+
+ /**********************************************************
+ * Backend dependent analysis & optimization phase finished
+ **********************************************************/
+
+ /***********************
+ * Code generation phase
+ ***********************/
+
+ // Fix shapes
+ linear->iterate([&](const compiler::Linear::Element &element) {
+ auto backend = element.lower_info->backend();
+ auto shape_fixer = linear->getBackendContext(backend)->shape_fixer;
+ shape_fixer->fix(*element.subgraph);
+ });
+
+ linear->planTensors();
+
+ auto tensor_builders = linear->backend_resolver()->tensor_builders();
+
+ // Prepare tensors
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->prepare();
+ }
+
+ // Generate initializers
+ linear->generateConstantInitializers();
+
+ class ExecutionBuilder final : public IExecutionBuilder
+ {
+ public:
+ ExecutionBuilder(exec::FunctionSequence &functions) : _functions{functions}
+ {
+ // DO NOTHING
+ }
+
+ public:
+ void append(std::unique_ptr<::neurun::exec::IFunction> &&f) override
+ {
+ _functions.append(std::move(f));
+ }
+
+ private:
+ exec::FunctionSequence &_functions;
+ };
+
+ auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>(*function_sequence);
+
+ // Generate kernels
+ linear->iterate([&](const compiler::Linear::Element &element) {
+ auto backend = element.lower_info->backend();
+ auto kernel_gen = linear->getBackendContext(backend)->kernel_gen;
+ kernel_gen->generate(*element.subgraph, execution_builder.get());
+ });
+
+ // Allocate Tensor Memory
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->allocate();
+ }
+
+ // TODO Add optimization passes
+
+ // Initialize constant tensors
+ for (const auto backend : backend::BackendManager::instance().getAll())
+ {
+ linear->getBackendContext(backend)->constant_initializer->run();
+ }
+
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->finalize();
+ }
+
+ // Wrap tensors as Object and store them to plan
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // Prepare each TensorManager on each backend
+ auto tensor_mgrs = nnfw::cpp14::make_unique<backend::TensorManagerSet>();
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_mgrs->insert(tensor_builder->releaseTensorManager());
+ }
+
+ return new exec::LinearExecutor{graph.shareModel(), linear->releaseSubgraphs(),
+ operand_context, linear->releaseLowerInfo(),
+ std::move(tensor_mgrs), linear->releaseElements(),
+ function_sequence};
+}
+
+exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel)
+{
+ auto operand_context = std::make_shared<OperandContext>();
+
+ graph.subgraphs().iterate([&](const model::SubgraphIndex &, const model::Subgraph &subg) {
+ auto subtensor_analyzer = SubTensorAnalyzer{graph.operands()};
+ subg.accept(subtensor_analyzer);
+ });
+
+ // Fix shapes
+ graph.subgraphs().iterate(
+ [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+ auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer;
+ shape_fixer->fix(subg);
+ });
+
+ graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ const auto lower_info = graph.getLowerInfo(ind);
+ for (auto factor : lower_info->def_factors())
+ {
+ bool isSubTensor = false;
+ auto backend = factor.backend();
+ auto tensor_builder = graph.backend_resolver()->getBackendContext(backend)->tensor_builder;
+
+ if (backend->config()->SupportSubTensorAlloc())
+ {
+ const auto parentInfo = obj.parent_info();
+ if (parentInfo != nullptr)
+ {
+ isSubTensor = true;
+ }
+ }
+
+ if (isSubTensor)
+ {
+ const compiler::SubTensorInfo info(obj);
+ tensor_builder->registerSubTensorInfo(ind, info);
+ }
+ else
+ {
+ const auto info = obj.info();
+ // NOTE This assumes an operand can have one layout, and only PermutateNode can have
+ // different layouts for input and output
+ const auto &def = *obj.getDef().list().cbegin();
+ auto frontend_layout =
+ graph.subgraphs().at(graph.subgraphs().getOperation(def)).getLayout();
+ if (frontend_layout == model::Layout::UNKNOWN)
+ {
+ const auto &use = *obj.getUses().list().cbegin();
+ frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
+ }
+ const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout,
+ obj.isConstant());
+ // To make this never be deallocated, this is a workaround to use static memory planner
+ tensor_builder->notifyFirstUse(ind);
+ }
+ }
+ });
+
+ auto tensor_builders = graph.backend_resolver()->tensor_builders();
+
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->prepare();
+ }
+
+ class ExecutionBuilder : public IExecutionBuilder
+ {
+ public:
+ void append(std::unique_ptr<exec::IFunction> &&fn) override
+ {
+ auto itr = _code_map.find(_next_index);
+ if (itr == _code_map.end())
+ {
+ _code_map[_next_index] = nnfw::cpp14::make_unique<exec::FunctionSequence>();
+ }
+ _code_map[_next_index]->append(std::move(fn));
+ };
+
+ // TODO Remove this method and make `append` to get index value as an argument
+ void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
+
+ exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
+
+ private:
+ model::SubgraphIndex _next_index;
+ exec::DataflowExecutor::CodeMap _code_map;
+ };
+
+ auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>();
+
+ // Generate kernels
+ graph.subgraphs().iterate(
+ [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+ auto constant_initializer =
+ graph.backend_resolver()->getBackendContext(backend)->constant_initializer;
+ constant_initializer->generate(subg, graph.operands());
+ // TODO This approach is temporal. See declaration of `setNextIndex`.
+ execution_builder->setNextIndex(subg_index);
+ auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen;
+ kernel_gen->generate(subg, execution_builder.get());
+ });
+
+ for (const auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->allocate();
+ }
+
+ // Initialize constant tensors
+ for (const auto backend : backend::BackendManager::instance().getAll())
+ {
+ graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run();
+ }
+
+ auto lower_info = graph.releaseLowerInfo();
+
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->finalize();
+ }
+
+ // Wrap tensors as Object and store them to plan
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // Prepare each TensorManager on each backend
+ auto tensor_mgrs = nnfw::cpp14::make_unique<backend::TensorManagerSet>();
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_mgrs->insert(tensor_builder->releaseTensorManager());
+ }
+
+ if (parallel)
+ {
+ return new exec::ParallelExecutor{
+ graph.shareModel(), graph.releaseSubgraphs(),
+ operand_context, std::move(lower_info),
+ std::move(tensor_mgrs), std::move(execution_builder->releaseCodeMap())};
+ }
+ else
+ {
+ auto exec = new exec::DataflowExecutor{
+ graph.shareModel(), graph.releaseSubgraphs(),
+ operand_context, std::move(lower_info),
+ std::move(tensor_mgrs), std::move(execution_builder->releaseCodeMap())};
+ if (util::getConfigBool(util::config::PROFILING_MODE))
+ {
+ auto et = std::make_shared<backend::ExecTime>(backend::BackendManager::instance().getAll());
+ std::unique_ptr<exec::IExecutionObserver> obs =
+ nnfw::cpp14::make_unique<exec::ProfileObserver>(et);
+ exec->addObserver(std::move(obs));
+ }
+ return exec;
+ }
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/ExecutorFactory.h b/runtimes/neurun/core/src/compiler/ExecutorFactory.h
new file mode 100644
index 000000000..894fec1b5
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ExecutorFactory.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_EXECUTOR_FACTORY_H__
+#define __NEURUN_COMPILER_EXECUTOR_FACTORY_H__
+
+#include <unordered_map>
+
+#include "exec/IExecutor.h"
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+class ExecutorFactory
+{
+public:
+ static ExecutorFactory &instance();
+
+public:
+ exec::IExecutor *create(const std::string &id, graph::Graph &graph);
+
+private:
+ ExecutorFactory();
+
+private:
+ static exec::IExecutor *createLinearExecutor(graph::Graph &graph);
+ static exec::IExecutor *createDataflowExecutor(graph::Graph &graph, bool parallel);
+
+private:
+ std::unordered_map<std::string, std::function<exec::IExecutor *(graph::Graph &)>> _map;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_EXECUTOR_FACTORY_H__
diff --git a/runtimes/neurun/core/src/compiler/HEScheduler.cc b/runtimes/neurun/core/src/compiler/HEScheduler.cc
new file mode 100644
index 000000000..a3d1a5990
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/HEScheduler.cc
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Operand.h"
+#include "compiler/HEScheduler.h"
+#include "graph/Graph.h"
+#include "util/ConfigSource.h"
+#include "compiler/IExecutionBuilder.h"
+#include "compiler/BackendResolver.h"
+#include "backend/IShapeFixer.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+#include "exec/FunctionSequence.h"
+#include <cassert>
+#include <cmath>
+#include <chrono>
+
+namespace neurun
+{
+
+namespace compiler
+{
+static uint32_t getOperationsFlattenedIOSize(const graph::Graph &graph,
+ const model::Operation &node)
+{
+ uint32_t size = 0;
+ for (const auto &input : node.getInputs())
+ {
+ size += graph.operands().at(input).info().total_size();
+ }
+ for (const auto &output : node.getOutputs())
+ {
+ size += graph.operands().at(output).info().total_size();
+ }
+ return size;
+}
+
+static bool isQuant(const graph::Graph &graph, const model::Operation &node)
+{
+ for (const auto &input : node.getInputs())
+ {
+ const auto &obj = graph.operands().at(input);
+ if (obj.typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend *backend,
+ const model::Operation &node, bool quant)
+{
+ /* TODO: this is workaround, come up with better solution if have.
+ Adding exception in stage doesn't help. Because if there is a record for add without
+ broadcast, scheduling will select it since it doesn't distinguish broadcast and
+ non-broadcast like it does for quant non-quantized*/
+ if (backend->config()->id() == "cpu" && node.getName() == "Add")
+ {
+ const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
+ /*Broadcasting isn't supported on CPU: no way to differ the existing exec_time record of
+ * Add with and without broadcasting*/
+ /*Quant is also unsupported: throws an exception in run(): in case of scheduling without warm-up
+ it isn't catched by tryBackend()*/
+ if (quant ||
+ !(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape()))
+ {
+ return true;
+ }
+ }
+ /* TODO: this is workaround, come up with better solution if have.
+ Adding exception in stage doesn't help. Because if there is a record for Mul without
+ broadcast, scheduling will select it since it doesn't distinguish broadcast and
+ non-broadcast like it does for quant non-quantized*/
+ else if (backend->config()->id() == "acl_neon" && node.getName() == "Mul")
+ {
+ const auto lhs_index{node.getInputs().at(model::operation::MulNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::MulNode::Input::RHS)};
+
+ // Nontrivial broadcasting isn't supported yet
+ if (quant ||
+ !(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape()))
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+// if a node can be merged into subgraph
+static bool isMergable(const graph::Graph &graph, const model::Operation &node)
+{
+ size_t prev_op_cnt = 0;
+ for (const auto &input : node.getInputs())
+ {
+ // only valid_inputs
+ const auto &operand = graph.operands().at(input);
+ if (operand.isConstant())
+ continue;
+
+ // This operand is input of operation, not weight or bias
+ if (operand.getDef().list().size() > 0)
+ ++prev_op_cnt;
+
+ // Current node has multiple inputs as concat or at the beginning of the separated branch
+ if (prev_op_cnt > 1 || operand.getUses().list().size() > 1)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+void HEScheduler::scheduleShufflingBackends()
+{
+ VERBOSE(HEScheduler::scheduleNode)
+ << "Started task scheduling: uses all backends to get more metrics for data transfer"
+ << std::endl;
+ size_t backend_ind = 0;
+ for (const auto &rank : _rank_to_op)
+ {
+ VERBOSE(HEScheduler::scheduleNode) << "scheduling (" << rank.second.value() << ")" << std::endl;
+ const auto &node = _graph->operations().at(rank.second);
+ const bool quant = isQuant(*_graph, node);
+ const auto size = getOperationsFlattenedIOSize(*_graph, node);
+ for (size_t i = 0;; ++i)
+ {
+ if (i == _all_backends.size())
+ {
+ // wasn't able to find backend
+ assert(false);
+ break;
+ }
+ if (backend_ind == _all_backends.size())
+ {
+ backend_ind = 0;
+ }
+ if (isWorkaroundSkip(*_graph, _all_backends[backend_ind], node, quant))
+ {
+ ++backend_ind;
+ continue;
+ }
+ const auto exec_time =
+ _exec_time->getOperationExecTime(_all_backends[backend_ind], node.getName(), quant, size);
+ // Scheduling to measure data transfer must be done after measuring all backends separately
+ assert(exec_time != _exec_time->NOT_FOUND);
+ if (exec_time == _exec_time->getMax())
+ {
+ ++backend_ind;
+ continue;
+ }
+ _backend_resolver->setBackend(rank.second, _all_backends[backend_ind]);
+ VERBOSE(HEScheduler::schedule) << "backend for " << node.getName() << " is "
+ << _all_backends[backend_ind]->config()->id() << std::endl;
+ ++backend_ind;
+ break;
+ }
+ }
+}
+
+bool HEScheduler::isNodeProfiled(const model::Operation &node)
+{
+ const bool quant = isQuant(*_graph, node);
+ const auto size = getOperationsFlattenedIOSize(*_graph, node);
+ for (const auto *backend : _all_backends)
+ {
+ const auto exec_time = _exec_time->getOperationExecTime(backend, node.getName(), quant, size);
+ if (exec_time == _exec_time->NOT_FOUND)
+ return false;
+ }
+ return true;
+}
+
+std::unique_ptr<compiler::BackendResolver> HEScheduler::schedule(const graph::Graph &graph)
+{
+ _graph = &graph;
+ VERBOSE(HEScheduler::schedule) << "task scheduling started" << std::endl;
+ // Make ranks and save in descending order
+ makeRank();
+
+ for (const auto *backend : _all_backends)
+ {
+ _backends_avail_time.emplace(backend, std::map<int64_t, int64_t>{{0, 0}});
+ }
+
+ const bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
+ if (is_profiling)
+ {
+ // Check if profiling info about all backend/node pairs already exists
+ bool all_nodes_are_profiled = true;
+ _graph->operations().iterate([&](const model::OperationIndex &, const model::Operation &op) {
+ if (all_nodes_are_profiled)
+ all_nodes_are_profiled = isNodeProfiled(op);
+ });
+
+ // If all nodes are already profiled - schedule backends in such order, so more profiling
+ // information about between-backends data transfer could be collected
+ if (all_nodes_are_profiled)
+ {
+ scheduleShufflingBackends();
+ VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
+ return std::move(_backend_resolver);
+ }
+ }
+
+ // for each task select the backend with the smallest earliest finishing time(eft)
+ for (const auto &rank : _rank_to_op)
+ {
+ scheduleNode(rank.second);
+ }
+ VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
+ return std::move(_backend_resolver);
+}
+
+int64_t HEScheduler::getOpTime(const backend::Backend *backend, const std::string &operation,
+ bool quant, uint32_t size)
+{
+ const auto time = _exec_time->getOperationExecTime(backend, operation, quant, size);
+ if (time != _exec_time->NOT_FOUND)
+ return time;
+
+ return _is_supported.at(backend).at(operation) ? 1 : _exec_time->getMax();
+}
+
+int64_t HEScheduler::getPermuteTime(const backend::Backend *src_backend,
+ const backend::Backend *dst_backend, bool quant, uint32_t size)
+{
+ const auto time = _exec_time->getPermuteTime(src_backend, dst_backend, quant, size);
+ if (time != _exec_time->NOT_FOUND)
+ return time;
+
+ // Makes the scheduler prefer keeping computations on one backend
+ return size / 200;
+}
+
+int64_t HEScheduler::tryBackend(const model::Operation &node, const backend::Backend *backend)
+{
+ auto iter = _is_supported.find(backend);
+ if (iter != _is_supported.end())
+ {
+ auto it2 = iter->second.find(node.getName());
+ if (it2 != iter->second.end())
+ {
+ return _is_supported[backend][node.getName()] ? 1 : _exec_time->getMax();
+ }
+ }
+ try
+ {
+ _backend_resolver->getBackendContext(backend)->shape_fixer->fix(node);
+
+ if (!util::getConfigBool(util::config::PROFILING_MODE))
+ throw std::runtime_error("You are trying to run heterogeneous scheduler with disabled "
+ "profiling mode, while there is no profiling information about some "
+ "nodes. Run scheduler with enabled profiling mode first.");
+
+ _is_supported[backend][node.getName()] = true;
+ }
+ catch (std::runtime_error &e)
+ {
+ _is_supported[backend][node.getName()] = false;
+ }
+ return _is_supported[backend][node.getName()] ? 1 : _exec_time->getMax();
+}
+
+void HEScheduler::makeRank()
+{
+ VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl;
+
+ _graph->operations().iterate(
+ [&](const model::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); });
+
+ // Check that ranks are calculated for all operations(nodes)
+ _graph->operations().iterate([&](const model::OperationIndex &index, const model::Operation &) {
+ UNUSED_RELEASE(index);
+ assert(_op_to_rank->find(index) != _op_to_rank->end());
+ });
+ VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl;
+}
+
+int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index)
+{
+ auto op_to_rank_it = _op_to_rank->find(index);
+ if (op_to_rank_it != _op_to_rank->end())
+ return op_to_rank_it->second;
+
+ const auto &node = _graph->operations().at(index);
+ int64_t rank = 0;
+ const bool quant = isQuant(*_graph, node);
+ const auto size = getOperationsFlattenedIOSize(*_graph, node);
+ auto supported_backends_quantity = static_cast<int64_t>(_all_backends.size());
+
+ const auto max_child_rank = DFSChildrenMaxRank(index);
+
+ // get average exec time of this op
+ for (const auto &backend : _all_backends)
+ {
+ auto exec_time = _exec_time->getOperationExecTime(backend, node.getName(), quant, size);
+ if (exec_time == _exec_time->NOT_FOUND)
+ {
+ exec_time = tryBackend(node, backend);
+ }
+ if (exec_time < _exec_time->getMax())
+ {
+ rank += exec_time;
+ }
+ else
+ {
+ // this operation isn't supported in this backend
+ --supported_backends_quantity;
+ }
+ }
+ assert((supported_backends_quantity > 0) && "Encountered unsupported op");
+ rank /= supported_backends_quantity;
+
+ // get standard deviation
+ int64_t std = 0;
+ for (const auto backend : _all_backends)
+ {
+ const auto exec_time = getOpTime(backend, node.getName(), quant, size);
+ if (exec_time < _exec_time->getMax())
+ {
+ std += (exec_time - rank) * (exec_time - rank);
+ }
+ }
+ std /= supported_backends_quantity;
+ if (std > 0)
+ {
+ std = static_cast<int>(std::sqrt(std));
+ rank *= std;
+ }
+ rank += max_child_rank;
+
+ assert(rank >= 0);
+ _rank_to_op.emplace(rank, index);
+ _op_to_rank->emplace(index, rank);
+ VERBOSE(HEScheduler::DFSMaxRank) << "rank of operation (" << index.value() << ")"
+ << node.getName() << " is " << rank << std::endl;
+
+ return rank;
+}
+
+int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index)
+{
+ const auto &node = _graph->operations().at(index);
+ int64_t max_child_rank = 0;
+ for (const auto &output : node.getOutputs())
+ {
+ const auto &operand = _graph->operands().at(output);
+ const bool quant = operand.typeInfo().type() == model::DataType::QUANT8_ASYMM;
+ // average data transfer cost of this operand's data
+ int64_t avg_transfer_cost = 1;
+ for (const auto *backend : _all_backends)
+ {
+ for (const auto *other_backend : _all_backends)
+ {
+ if (backend == other_backend)
+ {
+ continue;
+ }
+ auto transfer_cost =
+ _exec_time->getPermuteTime(backend, other_backend, quant, operand.info().total_size());
+ if (transfer_cost == _exec_time->NOT_FOUND)
+ {
+ // Makes the scheduler prefer keeping computations on one backend
+ transfer_cost = operand.info().total_size() / 100;
+ }
+ avg_transfer_cost += transfer_cost;
+ }
+ }
+ avg_transfer_cost /= _all_backends.size();
+ for (const auto &use : operand.getUses().list())
+ {
+ const auto cur_child_rank = DFSMaxRank(use);
+ max_child_rank = std::max(max_child_rank, cur_child_rank + avg_transfer_cost);
+ }
+ }
+ return max_child_rank;
+}
+
+int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend,
+ const int64_t &starting_time, const int64_t &time_amount)
+{
+ const auto backend_times = _backends_avail_time.at(backend);
+ // finishing and starting times of an op, that will come after current op
+ auto next_op_fst = backend_times.upper_bound(starting_time);
+ // finishing time of an op, that will come before current op
+ auto prev_op_ft = starting_time;
+ // until reach the "hole/gap", that is enough to run this op
+ while (next_op_fst != backend_times.end() && next_op_fst->second - prev_op_ft <= time_amount)
+ {
+ prev_op_ft = next_op_fst->first + 1;
+ ++next_op_fst;
+ }
+ return prev_op_ft;
+}
+
+void HEScheduler::scheduleNode(const model::OperationIndex &index)
+{
+ VERBOSE(HEScheduler::scheduleNode) << "scheduling (" << index.value() << ")" << std::endl;
+ int64_t eft = std::numeric_limits<int64_t>::max(), selected_exec_time = 0;
+ const auto &node = _graph->operations().at(index);
+
+ std::multimap<int64_t, int64_t> selected_transfer_st_exec_time;
+ // select the backend with the smallest eft of this task
+ const backend::Backend *chosen_backend = nullptr;
+ for (const auto *backend : _all_backends)
+ {
+ std::multimap<int64_t, int64_t> transfer_st_exec_time;
+ const auto est_and_et = ESTAndExecTime(backend, index, transfer_st_exec_time);
+
+ if (eft > est_and_et.first + est_and_et.second)
+ {
+ eft = est_and_et.first + est_and_et.second;
+ selected_exec_time = est_and_et.second;
+ chosen_backend = backend;
+ selected_transfer_st_exec_time = transfer_st_exec_time;
+ }
+ }
+
+ if (chosen_backend == nullptr)
+ {
+ throw std::runtime_error{"Fail to choose backend on scheduler"};
+ }
+
+ for (const auto &it : selected_transfer_st_exec_time)
+ {
+ auto prev_op_ft = backendAvailableTime(_cpu_backend, it.first, it.second);
+ _backends_avail_time[_cpu_backend].insert({prev_op_ft + it.second, prev_op_ft});
+ }
+
+ _ops_eft[index] = eft;
+ _backends_avail_time[chosen_backend].emplace(eft, eft - selected_exec_time);
+ _backend_resolver->setBackend(index, chosen_backend);
+
+ VERBOSE(HEScheduler::scheduleNode) << "backend for " << node.getName() << " is "
+ << chosen_backend->config()->id() << ". Its eft: " << eft
+ << std::endl;
+}
+
+std::pair<int64_t, int64_t>
+HEScheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time)
+{
+ const bool is_linear_exec = "Linear" == util::getConfigString(util::config::EXECUTOR);
+ const bool is_parallel_exec = "Parallel" == util::getConfigString(util::config::EXECUTOR);
+ // Permutation will cause creating a separate subgraph that contains just this permutation node.
+ // This isn't needed for Linear executor since it doesn't use subgraphs
+ // Number 1 ms is picked experimentally
+ int64_t permute_fine = 1000;
+ // Multiply cpu operations' exec time by 2 because in parallel executor it might be busy with
+ // permutation on other branches or non-nnfw specific tasks and have to wait for it.
+ // Number 2 is picked experimentally
+ const int64_t CPU_DELAY = 2;
+ const auto &node = _graph->operations().at(index);
+ const bool quant = isQuant(*_graph, node);
+ const auto size = getOperationsFlattenedIOSize(*_graph, node);
+ // if this node can be part of a subgraph, then assigning different backend will cause creating
+ // another subgraph
+ if (isMergable(*_graph, node))
+ {
+ permute_fine *= 2;
+ }
+ if (isWorkaroundSkip(*_graph, backend, node, quant))
+ {
+ return {_exec_time->getMax(), _exec_time->getMax()};
+ }
+ // get average exec time of the op on this backend
+ auto exec_time = getOpTime(backend, node.getName(), quant, size);
+ if (backend->config()->id() == "cpu" && is_parallel_exec)
+ {
+ exec_time *= CPU_DELAY;
+ }
+
+ // get max eft of direct (one level above) predecessors
+ auto max_pred_eft = predMaxEFT(backend, node, transfer_st_exec_time);
+
+ int64_t total_transfer_cost = 0;
+ std::vector<std::multimap<int64_t, int64_t>::iterator> inserted_permutations;
+ // Find free time for data transferring and insert it into backend taskset. This is needed:
+ // 1. Time for multiple permutations for this node's input is found correctly
+ // 2. If backend==cpu, then free time for this node must come after permutations
+ for (auto &it : transfer_st_exec_time)
+ {
+ if (is_parallel_exec)
+ {
+ it.second *= CPU_DELAY;
+ }
+ if (!is_linear_exec)
+ {
+ it.second += permute_fine;
+ }
+ total_transfer_cost += it.second;
+
+ const auto prev_op_ft = backendAvailableTime(_cpu_backend, it.first, it.second);
+
+ max_pred_eft = std::max(max_pred_eft, prev_op_ft + it.second);
+
+ const auto tmp = _backends_avail_time[_cpu_backend].emplace(prev_op_ft + it.second, prev_op_ft);
+ inserted_permutations.push_back(tmp.first);
+ }
+ // find the hole/gap, where this op can be put or the finishing time of the last assigned op
+ auto prev_op_ft = backendAvailableTime(backend, max_pred_eft, exec_time);
+
+ // Remove inserted permutation from cpu's task set
+ for (const auto &it : inserted_permutations)
+ {
+ _backends_avail_time[_cpu_backend].erase(it);
+ }
+
+ /* In case non-parallel executor measure just exec time and data transfer time
+ * because EFT(prev_op_ft) is the same for all backends. Since two operations
+ * can't be run simultaneously, finish of running operation must be waited for.
+ * When an operation starts, all backends are free. So, they need time just for
+ * data transfer.*/
+ if (!is_parallel_exec)
+ {
+ VERBOSE(HEScheduler::ESTAndExecTime)
+ << "exec_time of (" << index.value() << ") " << node.getName() << " quant==" << quant
+ << " on " << backend->config()->id() << " is " << exec_time
+ << " microseconds. Data transfer cost: " << total_transfer_cost << std::endl;
+
+ return {total_transfer_cost, exec_time};
+ }
+ VERBOSE(HEScheduler::ESTAndExecTime)
+ << "exec_time of (" << index.value() << ") " << node.getName() << " quant==" << quant
+ << " on " << backend->config()->id() << ": " << exec_time
+ << " microseconds. Backend available time: " << prev_op_ft
+ << " Parent's max eft: " << max_pred_eft - total_transfer_cost
+ << " data transfer cost: " << total_transfer_cost << std::endl;
+
+ return {prev_op_ft, exec_time};
+}
+
+int64_t HEScheduler::predMaxEFT(const backend::Backend *backend, const model::Operation &node,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time)
+{
+ int64_t max_pred_eft = 0;
+ for (const auto &input_operand_idx : node.getInputs())
+ {
+ const auto &input_operand = _graph->operands().at(input_operand_idx);
+ const bool quant = input_operand.typeInfo().type() == model::DataType::QUANT8_ASYMM;
+
+ for (const auto &input_node_idx : input_operand.getDef().list())
+ {
+ // Data transfer cost from parent's node backend to current node's backend:
+ auto parent_backend = _backend_resolver->getBackend(input_node_idx);
+
+ max_pred_eft = std::max(max_pred_eft, _ops_eft.at(input_node_idx));
+ if (parent_backend != backend)
+ {
+ // Multiply operand size by 2 because size must describe input+output size
+ int64_t transfer_cost =
+ getPermuteTime(parent_backend, backend, quant, input_operand.info().total_size() * 2);
+ transfer_st_exec_time.emplace(_ops_eft.at(input_node_idx), transfer_cost);
+ }
+ }
+ }
+ return max_pred_eft;
+}
+
+} // namespace compiler
+
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/HEScheduler.h b/runtimes/neurun/core/src/compiler/HEScheduler.h
new file mode 100644
index 000000000..2b818f248
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/HEScheduler.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file HEScheduler.h
+ * @brief This file contains HEScheduler class to define and run task Heterogeneous Execution
+ * Scheduler
+ */
+
+#ifndef __NEURUN_COMPILER_H_E_SCHEDULER_H_
+#define __NEURUN_COMPILER_H_E_SCHEDULER_H_
+
+#include "compiler/IScheduler.h"
+#include "graph/Graph.h"
+#include "backend/ExecTime.h"
+#include "backend/Backend.h"
+#include "cpp14/memory.h"
+#include "model/OperationIndexMap.h"
+#include <map>
+#include <memory>
+
+namespace neurun
+{
+
+namespace compiler
+{
+/**
+ * @brief Class to schedule tasks
+ */
+class HEScheduler : IScheduler
+{
+public:
+ /**
+ * @brief Construct a new Heterogeneous Execution Scheduler object
+ * @param[in] model Graph model
+ * @param[in] backend_resolver backend resolver
+ */
+ HEScheduler(const neurun::model::Operands &operands,
+ std::vector<const backend::Backend *> backends,
+ const std::shared_ptr<backend::custom::KernelRegistry> &registry)
+ : _is_supported{}, _backends_avail_time{}, _ops_eft{},
+ _op_to_rank{std::make_shared<model::OperationIndexMap<int64_t>>()},
+ _all_backends(std::move(backends))
+ {
+ _backend_resolver =
+ nnfw::cpp14::make_unique<compiler::BackendResolver>(operands, _all_backends, registry);
+ _exec_time = nnfw::cpp14::make_unique<backend::ExecTime>(_all_backends);
+
+ // Find cpu backend
+ auto cpu_backend_it = std::find_if(
+ _all_backends.begin(), _all_backends.end(),
+ [](const backend::Backend *backend) { return backend->config()->id() == "cpu"; });
+ if (cpu_backend_it == _all_backends.end())
+ throw std::runtime_error("HEScheduler could be used only if 'cpu' backend is available");
+ _cpu_backend = *cpu_backend_it;
+ }
+
+public:
+ /**
+ * @brief Task scheduling
+ *
+ * @note The main idea is taken from HSIP algo:
+ * https://www.hindawi.com/journals/sp/2016/3676149/
+ */
+ std::unique_ptr<compiler::BackendResolver> schedule(const graph::Graph &graph) final;
+ std::shared_ptr<model::OperationIndexMap<int64_t>> getIndexedRanks() { return _op_to_rank; }
+
+private:
+ bool isNodeProfiled(const model::Operation &);
+
+ void scheduleNode(const model::OperationIndex &);
+ /**
+ * @brief Get earliest starting time and execution time of an operation on a backend.
+ *
+ * @note Returns a time when operation's inputs are ready and backend is available
+ * It also returns exec time. If this is "cpu" backend, then exec_time*CPU_DELAY
+ *
+ * @param[in] backend: backend, for which to return the time
+ * @param[in] index: index of an operation
+ * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
+ *
+ * @return earliest starting time and execution time
+ */
+ std::pair<int64_t, int64_t>
+ ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time);
+ /**
+ * @brief Returns the latest finishing time of parents of a node.
+ *
+ * @param[in] backend: backend, for which to return the time
+ * @param[in] node: node to get eft of parents
+ * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
+ *
+ * @return earliest finishing time of parent nodes
+ */
+ int64_t predMaxEFT(const backend::Backend *backend, const model::Operation &node,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time);
+
+ void makeRank();
+
+ int64_t DFSMaxRank(const model::OperationIndex &index);
+
+ int64_t DFSChildrenMaxRank(const model::OperationIndex &index);
+ /**
+ * @brief Returns the time, when backend is available for at least given amount of time.
+ *
+ * @note Returns either hole/gap between two performing two already scheduled operations,
+ * or the finishing time of the last scheduled operation
+ *
+ * @param[in] backend backend, for which to return the time
+ * @param[in] starting_time time, starting which to look for gap
+ * @param[in] time_amount amount of the time, for which to look gap
+ *
+ * @return time, when backend has at least time_amount free time
+ */
+ int64_t backendAvailableTime(const backend::Backend *backend, const int64_t &starting_time,
+ const int64_t &time_amount);
+
+ int64_t getOpTime(const backend::Backend *backend, const std::string &operation, bool quant,
+ uint32_t size);
+
+ int64_t getPermuteTime(const backend::Backend *src_backend, const backend::Backend *dst_backend,
+ bool quant, uint32_t size);
+
+ void scheduleShufflingBackends();
+
+ int64_t tryBackend(const model::Operation &node, const backend::Backend *backend);
+
+private:
+ // This variable stores backend/node pairs with unknown execution time, and hints scheduler
+ // whether it should assign these backends to these nodes:
+ // * It stores false for unsupported nodes
+ // * During rank calculation with enabled profiling mode it stores true for supported nodes
+ std::unordered_map<const backend::Backend *, std::unordered_map<std::string, bool>> _is_supported;
+ // Finishing and starting time of each backend
+ std::unordered_map<const backend::Backend *, std::map<int64_t, int64_t>> _backends_avail_time;
+ model::OperationIndexMap<int64_t> _ops_eft;
+ std::multimap<int64_t, model::OperationIndex, std::greater<int64_t>> _rank_to_op;
+ std::shared_ptr<model::OperationIndexMap<int64_t>> _op_to_rank;
+ std::unique_ptr<compiler::BackendResolver> _backend_resolver;
+ std::unique_ptr<backend::ExecTime> _exec_time;
+ const graph::Graph *_graph{nullptr};
+ const std::vector<const backend::Backend *> _all_backends;
+ const backend::Backend *_cpu_backend{nullptr};
+};
+
+} // namespace compiler
+
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_H_E_SCHEDULER_H_
diff --git a/runtimes/neurun/core/src/compiler/IScheduler.h b/runtimes/neurun/core/src/compiler/IScheduler.h
new file mode 100644
index 000000000..5b425bf45
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/IScheduler.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CORE_COMPILER_I_SCHEDULER_H__
+#define __NEURUN_CORE_COMPILER_I_SCHEDULER_H__
+
+#include "BackendResolver.h"
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+struct IScheduler
+{
+ virtual ~IScheduler() = default;
+
+ virtual std::unique_ptr<BackendResolver> schedule(const graph::Graph &graph) = 0;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_CORE_COMPILER_I_SCHEDULER_H__
diff --git a/runtimes/neurun/core/src/compiler/Linear.cc b/runtimes/neurun/core/src/compiler/Linear.cc
new file mode 100644
index 000000000..72d0fdb8f
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/Linear.cc
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+
+#include "Linear.h"
+
+#include "graph/operation/LowerInfo.h"
+#include "graph/operand/LowerInfo.h"
+#include "backend/IShapeFixer.h"
+#include "backend/IConfig.h"
+#include "backend/IConstantInitializer.h"
+#include "backend/Backend.h"
+#include "compiler/SubTensorInfo.h"
+#include "model/OperandInfo.h"
+#include "model/OperandIndexMap.h"
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+Linear::Linear(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ std::unique_ptr<graph::LowerInfoMap> lower_info_map,
+ std::unique_ptr<compiler::BackendResolver> backend_resolver)
+ : _model(model), _subgraphs{std::move(subgraphs)}, _lower_info_map{std::move(lower_info_map)},
+ _backend_resolver{std::move(backend_resolver)}
+{
+ assert(_model && _subgraphs && _lower_info_map);
+
+ // Get SubgraphSequence by topological sorting
+ {
+ // _subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it
+ std::unordered_map<model::OperandIndex, std::list<model::SubgraphIndex>> input_to_subgs;
+
+ // Get the relations between input/subgraph to be used for dfs-post-iter
+ //
+ // [0] # input -> _input_to_subgraphes[0] = {SUBG0}
+ // |
+ // [SUBG0]
+ // |
+ // [1]-----. # input -> _input_to_subgraphes[1] = {SUBG1, SUBG2}
+ // | |
+ // [SUBG1] [SUBG2]
+ // | |
+ // [2] [3] # input -> _input_to_subgraphes[2] = {SUBG3}
+ // \ / # input -> _input_to_subgraphes[3] = {SUBG3}
+ // [SUBG3]
+ // |
+ // [4]
+ _subgraphs->iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) {
+ for (auto input : subg.getInputs())
+ {
+ // only valid_inputs
+ const auto &operand = _model->operands.at(input);
+ if (operand.isConstant())
+ continue;
+
+ auto it = input_to_subgs.find(input);
+ if (it == input_to_subgs.end())
+ {
+ std::list<model::SubgraphIndex> list{subg_idx};
+ input_to_subgs[input] = list;
+ }
+ else
+ {
+ it->second.push_back(subg_idx);
+ }
+ }
+ });
+
+ std::unordered_map<model::SubgraphIndex, bool> visited;
+ _subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) {
+ visited[index] = false;
+ });
+
+ std::function<void(const model::SubgraphIndex &, model::Subgraph &)> dfs_recursive =
+ [&](const model::SubgraphIndex &index, model::Subgraph &subg) -> void {
+ if (visited[index])
+ return;
+ visited[index] = true;
+
+ // The outputs should be not constants
+ for (auto output : subg.getOutputs())
+ {
+ const auto it = input_to_subgs.find(output);
+ if (it != input_to_subgs.end())
+ {
+ const auto &subg_index_list = it->second;
+ for (const auto &index : subg_index_list)
+ {
+ auto &subg = _subgraphs->at(index);
+ dfs_recursive(index, subg);
+ }
+ }
+ }
+
+ _elements.emplace_back(&_subgraphs->at(index), getLowerInfo(index));
+ };
+
+ _subgraphs->iterate(dfs_recursive);
+
+ // All of the nodes must have been visited.
+ assert(
+ std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const model::SubgraphIndex, bool> &v) { return v.second; }));
+
+ // NOTE. Now these subgraph are on the reverse order
+ std::reverse(_elements.begin(), _elements.end());
+ }
+
+ {
+ const auto &backendToString = [](const neurun::backend::Backend *backend) {
+ assert(backend);
+ std::string str;
+ str += backend->config()->id();
+ str += " ";
+ return "{ " + str + "}";
+ };
+
+ VERBOSE(Linear) << "Final SubgraphSequence" << std::endl;
+ for (const auto &element : _elements)
+ {
+ const auto subg = element.subgraph;
+ const auto lower_info = element.lower_info;
+ VERBOSE(Linear) << "* SUBG"
+ << " " << backendToString(lower_info->backend()) << " " << subg->getStr()
+ << std::endl;
+ }
+ }
+}
+
+void Linear::accept(model::OperationVisitor &&visitor) const
+{
+ for (const auto &e : _elements)
+ {
+ e.subgraph->accept(visitor);
+ }
+}
+
+void Linear::planTensors()
+{
+ model::OperandIndexMap<std::shared_ptr<backend::ITensorBuilder>> tensor_builder_map;
+
+ // NOTE
+ // While current ITensorBuilder exposes registerSubTensorInfo for subtensor,
+ // this stage uses registerSubTensorInfo() and notify{First|Last}Use()
+ // but handling subtensor should be processed on each backend. See #5726.
+ model::OperandIndexMap<uint32_t> uses_map;
+ model::OperandIndexMap<uint32_t> def_map;
+ model::OperandIndexSequence constants;
+
+ // Prepare scanning
+ _model->operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ const auto lower_info = getLowerInfo(ind);
+ // TODO Remove if neurun doesn't support anymore such as
+ // GeneratedTests.reshape_quant8_weights_as_inputs
+ if (lower_info->def_factors().size() == 0 && lower_info->use_factors().size() == 0 &&
+ _model->inputs.contains(ind) == false)
+ {
+ VERBOSE(LINEAR) << "Operand #" << ind.value() << " will be not used. no more process."
+ << std::endl;
+ return;
+ }
+
+ uses_map[ind] = obj.getUses().size();
+ def_map[ind] = obj.getDef().size(); // should be 1 or 0
+
+ bool is_const = obj.isConstant();
+ if (is_const)
+ {
+ constants.append(ind);
+ }
+
+ for (auto factor : lower_info->def_factors())
+ {
+ bool isSubTensor = false;
+ auto backend = factor.backend();
+ auto tensor_builder = _backend_resolver->getBackendContext(backend)->tensor_builder;
+
+ if (backend->config()->SupportSubTensorAlloc())
+ {
+ const auto parentInfo = obj.parent_info();
+ if (parentInfo != nullptr)
+ {
+ isSubTensor = true;
+ }
+ }
+
+ if (isSubTensor)
+ {
+ const compiler::SubTensorInfo info(obj);
+ tensor_builder->registerSubTensorInfo(ind, info);
+ }
+ else
+ {
+ const auto info = obj.info();
+
+ // NOTE This assumes an operand can have one layout, and only PermutateNode can have
+ // different layouts for input and output
+ const auto &def = *obj.getDef().list().cbegin();
+ auto frontend_layout = _subgraphs->at(_subgraphs->getOperation(def)).getLayout();
+ if (frontend_layout == model::Layout::UNKNOWN)
+ {
+ const auto &use = *obj.getUses().list().cbegin();
+ frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
+ }
+ const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const);
+ }
+
+ tensor_builder_map[ind] = tensor_builder;
+ }
+ });
+
+ // If a tensor is model output, increase the use of the tensor.
+ // This aim is same to above one.
+ for (const auto &ind : _model->outputs)
+ {
+ uses_map[ind]++;
+ }
+
+ // Start scanning to do notify{First|Last}Use for each tensor
+
+ // If a tensor is a constant, increase the use of the tensor.
+ // It makes the tensor not be dealloced. It means these will be deallocated last.
+ // And allocate constant operands first
+ VERBOSE(LINEAR) << "TENSORS as CONSTANT" << std::endl;
+ for (const auto &ind : constants)
+ {
+ uses_map[ind]++;
+ tensor_builder_map[ind]->notifyFirstUse(ind);
+ }
+
+ // Allocate Model's inputs
+ VERBOSE(LINEAR) << "TENSORS as MODEL INPUT" << std::endl;
+ for (const auto &ind : _model->inputs)
+ {
+ auto tensor_builder = tensor_builder_map[ind];
+ if (!tensor_builder) // for GeneratedTests.xxx_weights_as_inputs
+ continue;
+ tensor_builder->notifyFirstUse(ind);
+ }
+
+ // At each operation,
+ // 1. Scan DEF of outputs. If the DEF, allocate it
+ // 2. Scan USE of inputs. Decrease the USE and deallocate if the USE is 0
+ VERBOSE(LINEAR) << "TENSORS" << std::endl;
+ for (const auto &e : _elements)
+ {
+ for (const auto &op : e.subgraph->operations())
+ {
+ for (const auto &ind : op.node->getOutputs())
+ {
+ assert(def_map.find(ind) != def_map.end());
+ if (def_map[ind])
+ {
+ def_map[ind] = 0;
+ tensor_builder_map[ind]->notifyFirstUse(ind);
+ }
+ }
+
+ for (const auto &ind : op.node->getInputs())
+ {
+ assert(uses_map.find(ind) != uses_map.end());
+ assert(uses_map[ind] > 0);
+ uses_map[ind]--;
+ if (uses_map[ind] == 0)
+ {
+ tensor_builder_map[ind]->notifyLastUse(ind);
+ }
+ }
+ }
+ }
+
+ // Dispose and validate
+ for (const auto &ind : _model->outputs)
+ {
+ --uses_map[ind];
+ assert(uses_map[ind] == 0);
+ tensor_builder_map[ind]->notifyLastUse(ind);
+ }
+
+ for (const auto &ind : constants)
+ {
+ --uses_map[ind];
+ assert(uses_map[ind] == 0);
+ tensor_builder_map[ind]->notifyLastUse(ind);
+ }
+
+ assert(std::all_of(
+ uses_map.begin(), uses_map.end(),
+ [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+
+ assert(std::all_of(
+ def_map.begin(), def_map.end(),
+ [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+}
+
+void Linear::iterate(const std::function<void(const Element &element)> &fn) const
+{
+ for (const auto &e : _elements)
+ {
+ fn(e);
+ }
+}
+
+void Linear::generateConstantInitializers(void) const
+{
+ iterate([&](const compiler::Linear::Element &element) {
+ auto backend = element.lower_info->backend();
+
+ auto constant_initializer = _backend_resolver->getBackendContext(backend)->constant_initializer;
+ constant_initializer->generate(*element.subgraph, _model->operands);
+ });
+}
+
+const graph::operation::LowerInfo *Linear::getLowerInfo(const model::SubgraphIndex &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operation.find(index);
+ if (itr == _lower_info_map->operation.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+const graph::operand::LowerInfo *Linear::getLowerInfo(const model::OperandIndex &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/Linear.h b/runtimes/neurun/core/src/compiler/Linear.h
new file mode 100644
index 000000000..78c782a78
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/Linear.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_LINEAR_H__
+#define __NEURUN_COMPILER_LINEAR_H__
+
+#include <vector>
+#include <memory>
+
+#include "model/Model.h"
+#include "model/Subgraphs.h"
+#include "backend/ITensorBuilder.h"
+#include "graph/LowerInfoMap.h"
+#include "compiler/BackendResolver.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+struct OperationVisitor;
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+
+class Linear
+{
+public:
+ struct Element
+ {
+ const model::Subgraph *subgraph;
+ const graph::operation::LowerInfo *lower_info;
+
+ Element(const model::Subgraph *subgraph, const graph::operation::LowerInfo *lower_info)
+ : subgraph{subgraph}, lower_info{lower_info}
+ {
+ // DO NOTHING
+ }
+ };
+
+public:
+ Linear(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ std::unique_ptr<graph::LowerInfoMap> lower_info_map,
+ std::unique_ptr<compiler::BackendResolver> backend_resolver);
+
+public:
+ Linear(const Linear &linear) = delete;
+
+public:
+ void accept(model::OperationVisitor &&visitor) const;
+
+ void planTensors();
+
+ void iterate(const std::function<void(const Element &element)> &fn) const;
+
+ void generateConstantInitializers(void) const;
+
+ std::unique_ptr<graph::LowerInfoMap> releaseLowerInfo() { return std::move(_lower_info_map); }
+ graph::LowerInfoMap *getLowerInfo() { return _lower_info_map.get(); }
+
+ std::unique_ptr<model::Subgraphs> releaseSubgraphs() { return std::move(_subgraphs); }
+
+ std::vector<Element> &&releaseElements() { return std::move(_elements); }
+
+ const backend::BackendContext *getBackendContext(const backend::Backend *backend)
+ {
+ return _backend_resolver->getBackendContext(backend);
+ }
+
+ const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); }
+
+private:
+ // TODO Replace these getLowerInfo methods with ones of LowerInfoMap in the future
+ const graph::operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &index) const;
+ const graph::operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const;
+
+private:
+ std::shared_ptr<const model::Model> _model;
+ std::unique_ptr<model::Subgraphs> _subgraphs;
+ std::unique_ptr<graph::LowerInfoMap> _lower_info_map;
+ std::vector<Element> _elements;
+ std::unique_ptr<compiler::BackendResolver> _backend_resolver;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_LINEAR_H__
diff --git a/runtimes/neurun/core/src/compiler/ManualScheduler.cc b/runtimes/neurun/core/src/compiler/ManualScheduler.cc
new file mode 100644
index 000000000..efd5ccc31
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ManualScheduler.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ManualScheduler.h"
+#include "model/Operations.Include.h"
+#include "backend/Backend.h"
+#include "backend/BackendManager.h"
+#include "backend/IConfig.h"
+#include "util/ConfigSource.h"
+#include "misc/string_helpers.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &graph)
+{
+ auto backend_resolver = nnfw::cpp14::make_unique<compiler::BackendResolver>(
+ graph.operands(), backend::BackendManager::instance().getAll(), graph.getKernelRegistry());
+
+ // 1. Backend for All operations
+ const auto backend_all_str = util::getConfigString(util::config::OP_BACKEND_ALLOPS);
+ auto backend_all = backend::BackendManager::instance().get(backend_all_str);
+
+ VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all_str << std::endl;
+
+ graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) {
+ backend_resolver->setBackend(index, backend_all);
+ });
+
+ // 2. Backend per operation type
+ std::unordered_map<std::type_index, backend::Backend *> op_type_map;
+ // By default, CustomNode uses cpu backend
+ op_type_map[typeid(model::operation::CustomNode)] =
+ backend::BackendManager::instance().get("cpu");
+#define OP(InternalName, IsNnApi) \
+ if (IsNnApi) \
+ { \
+ const auto &backend_str = util::getConfigString(util::config::OP_BACKEND_##InternalName); \
+ if (!backend_str.empty()) \
+ { \
+ auto backend = backend::BackendManager::instance().get(backend_str); \
+ VERBOSE(Lower) << "backend for " << #InternalName << ": " << backend_str << std::endl; \
+ op_type_map[typeid(model::operation::InternalName)] = backend; \
+ } \
+ }
+#include "model/Operations.lst"
+#undef OP
+ graph.operations().iterate(
+ [&](const model::OperationIndex &index, const model::Operation &operation) {
+ auto itr = op_type_map.find(typeid(operation));
+ if (itr != op_type_map.end())
+ {
+ backend_resolver->setBackend(index, itr->second);
+ }
+ });
+
+ // 3. Backend per operation
+ try
+ {
+ auto map_str = util::getConfigString(util::config::OP_BACKEND_MAP);
+ auto key_val_list = nnfw::misc::split(map_str, ';');
+ for (const auto &key_val_str : key_val_list)
+ {
+ if (key_val_str.empty())
+ {
+ continue;
+ }
+
+ auto key_val = nnfw::misc::split(key_val_str, '=');
+ const auto &key_str = key_val.at(0);
+ const auto &val = key_val.at(1);
+ auto key = static_cast<uint32_t>(std::stoi(key_str));
+
+ graph.operations().at(model::OperationIndex{key}); // Check if exist, or this wil throw
+ backend_resolver->setBackend(model::OperationIndex{key},
+ backend::BackendManager::instance().get(val));
+ }
+ }
+ catch (...)
+ {
+ VERBOSE(ManualScheduler) << "Invalid value from " << util::config::OP_BACKEND_MAP
+ << ". Some of the given values are ignored" << std::endl;
+ }
+
+ // Dump final assignment
+ backend_resolver->iterate(
+ [&](const model::OperationIndex &index, const backend::BackendContext &backend_ctx) {
+ VERBOSE(ManualScheduler) << "backend for operation #" << index.value() << ": "
+ << backend_ctx.backend->config()->id() << std::endl;
+ });
+
+ return backend_resolver;
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/ManualScheduler.h b/runtimes/neurun/core/src/compiler/ManualScheduler.h
new file mode 100644
index 000000000..c40318a70
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ManualScheduler.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CORE_COMPILER_MANUAL_SCHEDULER_H__
+#define __NEURUN_CORE_COMPILER_MANUAL_SCHEDULER_H__
+
+#include "IScheduler.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+class ManualScheduler : public IScheduler
+{
+public:
+ std::unique_ptr<BackendResolver> schedule(const graph::Graph &graph) override;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_CORE_COMPILER_MANUAL_SCHEDULER_H__
diff --git a/runtimes/neurun/core/src/compiler/OperandContext.cc b/runtimes/neurun/core/src/compiler/OperandContext.cc
new file mode 100644
index 000000000..77adc556c
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/OperandContext.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperandContext.h"
+
+#include <cassert>
+
+namespace neurun
+{
+namespace compiler
+{
+
+OperandContext &OperandContext::set(const model::OperandIndex &id,
+ const std::shared_ptr<backend::operand::IObject> &object)
+{
+ // Only one object for an id
+ assert(_objects.find(id) == _objects.end());
+ _objects[id] = object;
+ return (*this);
+}
+
+void OperandContext::iterate(
+ const std::function<void(const model::OperandIndex &, backend::operand::IObject &)> &fn)
+{
+ for (auto &e : _objects)
+ {
+ fn(e.first, *e.second);
+ }
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/OperandContext.h b/runtimes/neurun/core/src/compiler/OperandContext.h
new file mode 100644
index 000000000..169122500
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/OperandContext.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_OPERAND_CONTEXT_H__
+#define __NEURUN_COMPILER_OPERAND_CONTEXT_H__
+
+#include "backend/operand/IObject.h"
+#include "model/OperandIndexMap.h"
+#include <unordered_map>
+#include <memory>
+
+namespace neurun
+{
+namespace compiler
+{
+
+class OperandContext
+{
+public:
+ OperandContext &set(const model::OperandIndex &ind,
+ const std::shared_ptr<backend::operand::IObject> &object);
+
+public:
+ bool exist(const ::neurun::model::OperandIndex &ind) const
+ {
+ return _objects.find(ind) != _objects.end();
+ }
+
+public:
+ std::shared_ptr<backend::operand::IObject> at(const model::OperandIndex &ind) const
+ {
+ return _objects.at(ind);
+ }
+
+ std::shared_ptr<backend::operand::IObject> &at(const model::OperandIndex &ind)
+ {
+ return _objects.at(ind);
+ }
+
+ void
+ iterate(const std::function<void(const model::OperandIndex &, backend::operand::IObject &)> &fn);
+
+private:
+ model::OperandIndexMap<std::shared_ptr<backend::operand::IObject>> _objects;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_OPERAND_CONTEXT_H__
diff --git a/runtimes/neurun/core/src/compiler/OperationValidator.cc b/runtimes/neurun/core/src/compiler/OperationValidator.cc
new file mode 100644
index 000000000..0be680941
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/OperationValidator.cc
@@ -0,0 +1,879 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationValidator.h"
+
+#include <typeinfo>
+
+#include "model/Operands.h"
+#include "graph/operation/LowerInfo.h"
+
+#include "util/logging.h"
+#include "util/Utils.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+void OperationValidator::visit(const model::Subgraph &subgraph)
+{
+ _current_subg_layout = subgraph.getLayout();
+ for (const auto &e : subgraph.operations())
+ {
+ const auto &node = *(e.node);
+ node.accept(*this);
+ }
+}
+
+void OperationValidator::visit(const model::operation::CastNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(0)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
+}
+
+void OperationValidator::visit(const model::operation::ComparisonNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto lhs_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT0)};
+ const auto rhs_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT1)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(lhs_index);
+ UNUSED_RELEASE(rhs_index);
+
+ assert(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type());
+ assert(_ctx.at(output_index).typeInfo().type() == model::DataType::BOOL8);
+}
+
+void OperationValidator::visit(const model::operation::SoftmaxNode &node)
+{
+ VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
+
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(0)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
+}
+
+void OperationValidator::visit(const model::operation::PermuteNode &node)
+{
+ VERBOSE(Permute) << "Configure Permute operation" << std::endl;
+
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(0)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
+}
+
+void OperationValidator::visit(const model::operation::ReduceSumNode &node)
+{
+ VERBOSE(Permute) << "Configure ReduceSum operation" << std::endl;
+
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ReduceSumNode::Input::INPUT)};
+ const auto axis_index{node.param().axis_index};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+ UNUSED_RELEASE(axis_index);
+
+ const auto input_shape = _ctx.at(input_index).shape();
+ const auto output_shape = _ctx.at(output_index).shape();
+ const auto axis_shape = _ctx.at(axis_index).shape();
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(input_shape);
+ UNUSED_RELEASE(axis_shape);
+
+ assert(input_shape.rank() <= 4);
+ assert(output_shape.rank() <= input_shape.rank());
+ assert(_ctx.at(axis_index).isConstant());
+ assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
+
+ // NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
+ // supports cases reducing height and width or reducing depth.
+ // TODO We have to support all cases of dimensions up to 4.
+ // For correct permuting, we have to set output's shape to be equal in dimension position of the
+ // input. But the positions of the same dimensions in the input and output may be set differently.
+ // For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
+ // output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
+ // extend it in 4 dimensions, it should be {1,1,3,5}.
+ // Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
+ // output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
+ // next operation is not desired.
+ if (input_shape.rank() == 4 && input_shape.rank() != output_shape.rank())
+ {
+ if (output_shape.rank() == 2)
+ {
+ // Reducing HW
+ assert(input_shape.dim(0) == output_shape.dim(0) &&
+ input_shape.dim(3) == output_shape.dim(1));
+ }
+ else if (output_shape.rank() == 3)
+ {
+ // Reducing C or
+ // (Reducing H and C(input and output) == 1) or (Reducing W and C(input and output) == 1)
+ assert((input_shape.dim(0) == output_shape.dim(0) &&
+ input_shape.dim(1) == output_shape.dim(1) &&
+ input_shape.dim(2) == output_shape.dim(2)) ||
+ (input_shape.dim(0) == output_shape.dim(0) &&
+ (input_shape.dim(1) == output_shape.dim(1) ||
+ input_shape.dim(2) == output_shape.dim(1)) &&
+ input_shape.dim(3) == 1 && output_shape.dim(2) == 1));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::TransposeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::TransposeNode::Input::INPUT)};
+ const auto perm_idx{node.param().perm};
+
+ const auto &output_shape = _ctx.at(output_index).shape();
+ const auto &input_shape = _ctx.at(input_index).shape();
+ const auto &perm_shape = _ctx.at(perm_idx).shape();
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(input_shape);
+ UNUSED_RELEASE(perm_shape);
+
+ assert(perm_shape.rank() == 1);
+ assert(input_shape.rank() == perm_shape.dim(0));
+ assert(input_shape.rank() == output_shape.rank());
+}
+
+void OperationValidator::visit(const model::operation::ReduceMaxNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ReduceMaxNode::Input::INPUT)};
+ const auto axis_index{node.param().axis_index};
+
+ auto output_shape = _ctx.at(output_index).shape();
+ auto input_shape = _ctx.at(input_index).shape();
+ auto axis_shape = _ctx.at(axis_index).shape();
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(input_shape);
+ UNUSED_RELEASE(axis_shape);
+
+ assert(input_shape.rank() <= 4);
+ assert(output_shape.rank() <= input_shape.rank());
+ assert(_ctx.at(axis_index).isConstant());
+ assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
+
+ // NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
+ // supports cases reducing height and width or reducing depth.
+ // TODO We have to support all cases of dimensions up to 4.
+ // For correct permuting, we have to set output's shape to be equal in dimension position of the
+ // input. But the positions of the same dimensions in the input and output may be set differently.
+ // For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
+ // output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
+ // extend it in 4 dimensions, it should be {1,1,3,5}.
+ // Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
+ // output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
+ // next operation is not desired.
+ if (input_shape.rank() == 4 && input_shape.rank() != output_shape.rank())
+ {
+ if (output_shape.rank() == 2)
+ {
+ // Reducing HW
+ assert(input_shape.dim(0) == output_shape.dim(0) &&
+ input_shape.dim(3) == output_shape.dim(1));
+ }
+ else if (output_shape.rank() == 3)
+ {
+ // Reducing C or
+ // (Reducing H and C(ifm and ofm) == 1) or (Reducing W and C(ifm and ofm) == 1)
+ assert((input_shape.dim(0) == output_shape.dim(0) &&
+ input_shape.dim(1) == output_shape.dim(1) &&
+ input_shape.dim(2) == output_shape.dim(2)) ||
+ (input_shape.dim(0) == output_shape.dim(0) &&
+ (input_shape.dim(1) == output_shape.dim(1) ||
+ input_shape.dim(2) == output_shape.dim(1)) &&
+ input_shape.dim(3) == 1 && output_shape.dim(2) == 1));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::RNNNode &node)
+{
+ // NOTE This validation is for static rnn(non-dynamic shape), but not for dynamic rnn
+ // TODO Support dynamic rnn
+ const auto output_index{node.getOutputs().at(model::operation::RNNNode::Output::OUTPUT)};
+ const auto hidden_state_out_index{
+ node.getOutputs().at(model::operation::RNNNode::Output::HIDDEN_STATE_OUT)};
+
+ const auto input_index{node.getInputs().at(model::operation::RNNNode::Input::INPUT)};
+ const auto weights_index{node.getInputs().at(model::operation::RNNNode::Input::WEIGHTS)};
+ const auto recurrent_weights_index{
+ node.getInputs().at(model::operation::RNNNode::Input::RECURRENT_WEIGHTS)};
+ const auto bias_index{node.getInputs().at(model::operation::RNNNode::Input::BIAS)};
+ const auto hidden_state_in_index{
+ node.getInputs().at(model::operation::RNNNode::Input::HIDDEN_STATE_IN)};
+
+ const auto batch_size = _ctx.at(output_index).shape().dim(0);
+ const auto num_units = _ctx.at(output_index).shape().dim(1);
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(hidden_state_out_index);
+ UNUSED_RELEASE(input_index);
+ UNUSED_RELEASE(weights_index);
+ UNUSED_RELEASE(recurrent_weights_index);
+ UNUSED_RELEASE(bias_index);
+ UNUSED_RELEASE(hidden_state_in_index);
+ UNUSED_RELEASE(batch_size);
+ UNUSED_RELEASE(num_units);
+
+ assert(_ctx.at(output_index).shape().rank() == 2 &&
+ _ctx.at(hidden_state_out_index).shape().rank() == 2 &&
+ _ctx.at(input_index).shape().rank() == 2 && _ctx.at(weights_index).shape().rank() == 2 &&
+ _ctx.at(recurrent_weights_index).shape().rank() == 2 &&
+ _ctx.at(hidden_state_in_index).shape().rank() == 2);
+ assert(_ctx.at(bias_index).shape().rank() == 1);
+
+ assert(batch_size == _ctx.at(input_index).shape().dim(0) &&
+ batch_size == _ctx.at(hidden_state_in_index).shape().dim(0) &&
+ batch_size == _ctx.at(hidden_state_out_index).shape().dim(0));
+ assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(weights_index).shape().dim(1));
+
+ assert(num_units == _ctx.at(weights_index).shape().dim(0) &&
+ num_units == _ctx.at(recurrent_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(bias_index).shape().dim(0));
+ assert(num_units == _ctx.at(output_index).shape().dim(1) &&
+ num_units == _ctx.at(recurrent_weights_index).shape().dim(1) &&
+ num_units == _ctx.at(hidden_state_in_index).shape().dim(1) &&
+ num_units == _ctx.at(hidden_state_out_index).shape().dim(1));
+}
+
+void OperationValidator::visit(const model::operation::SpaceToDepthNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::SpaceToDepthNode::Input::INPUT)};
+ const auto block_size_index{node.param().block_size_index};
+
+ const auto frontend_layout = _current_subg_layout;
+ const auto input_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
+ const auto output_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
+ const auto block_size = _ctx.at(block_size_index).asScalar<int32_t>();
+
+ UNUSED_RELEASE(input_shape);
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(block_size);
+
+ // All assertions as per NNAPI specification.
+ assert(_ctx.at(ifm_index).shape().rank() == 4);
+ assert(_ctx.at(ofm_index).shape().rank() == 4);
+ assert((block_size >= 1) && (input_shape.H % block_size == 0) &&
+ (input_shape.W % block_size == 0));
+ assert(input_shape.N == output_shape.N);
+ assert(input_shape.C * block_size * block_size == output_shape.C);
+}
+
+void OperationValidator::visit(const model::operation::EmbeddingLookupNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto lookups_index{
+ node.getInputs().at(model::operation::EmbeddingLookupNode::Input::LOOKUPS)};
+ const auto values_index{
+ node.getInputs().at(model::operation::EmbeddingLookupNode::Input::VALUES)};
+
+ const auto &output_obj = _ctx.at(output_index);
+ const auto &lookups_obj = _ctx.at(lookups_index);
+ const auto &values_obj = _ctx.at(values_index);
+
+ UNUSED_RELEASE(output_obj);
+ UNUSED_RELEASE(lookups_obj);
+ UNUSED_RELEASE(values_obj);
+
+ // Verify operand here, not at SimpleEmbeddingLookup::configure() to avoid acl's modifying
+ // TensorShape sometimes(Issue: https://github.sec.samsung.net/STAR/nnfw/issues/729)
+ {
+ assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32);
+
+ const auto &output_shape = output_obj.shape();
+ const auto &lookups_shape = lookups_obj.shape();
+ const auto &values_shape = values_obj.shape();
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(lookups_shape);
+ UNUSED_RELEASE(values_shape);
+
+ assert(lookups_shape.rank() == 1);
+ assert(values_shape.rank() >= 2);
+
+ // output should be a n-D tensor with the same rank and shape as the values tensor, except for
+ // the first dimension which has the same size as lookups' only dimension.
+ assert(output_shape.rank() == values_shape.rank());
+ assert(output_shape.dim(0) == lookups_shape.dim(0));
+ for (int n = 1; n < output_shape.rank(); ++n)
+ {
+ assert(output_shape.dim(n) == values_shape.dim(n));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::ExpNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ExpNode::Input::INPUT)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
+ assert(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
+}
+
+void OperationValidator::visit(const model::operation::FloorNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::FloorNode::Input::INPUT)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
+ assert(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type());
+}
+
+void OperationValidator::visit(const model::operation::HashtableLookupNode &node)
+{
+ const auto output_index{
+ node.getOutputs().at(model::operation::HashtableLookupNode::Output::OUTPUT)};
+ const auto hits_index{node.getOutputs().at(model::operation::HashtableLookupNode::Output::HITS)};
+
+ const auto lookups_index{
+ node.getInputs().at(model::operation::HashtableLookupNode::Input::LOOKUPS)};
+ const auto keys_index{node.getInputs().at(model::operation::HashtableLookupNode::Input::KEYS)};
+ const auto values_index{
+ node.getInputs().at(model::operation::HashtableLookupNode::Input::VALUES)};
+
+ const auto &output_obj = _ctx.at(output_index);
+ const auto &hits_obj = _ctx.at(hits_index);
+
+ const auto &lookups_obj = _ctx.at(lookups_index);
+ const auto &keys_obj = _ctx.at(keys_index);
+ const auto &values_obj = _ctx.at(values_index);
+
+ assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32);
+ assert(keys_obj.typeInfo().type() == neurun::model::DataType::INT32);
+ assert(hits_obj.typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM);
+
+ const auto &output_shape = output_obj.shape();
+ const auto &hits_shape = hits_obj.shape();
+
+ const auto &lookups_shape = lookups_obj.shape();
+ const auto &keys_shape = keys_obj.shape();
+ const auto &values_shape = values_obj.shape();
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(hits_shape);
+ UNUSED_RELEASE(lookups_shape);
+ UNUSED_RELEASE(keys_shape);
+ UNUSED_RELEASE(values_shape);
+
+ assert(values_shape.rank() == output_shape.rank());
+ assert(lookups_shape.rank() == 1);
+ assert(keys_shape.rank() == 1);
+ assert(values_shape.dim(0) == keys_shape.dim(0));
+ assert(lookups_shape.dim(0) == output_shape.dim(0));
+}
+
+void OperationValidator::visit(const model::operation::TransposeConvNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto out_shape_index{
+ node.getInputs().at(model::operation::TransposeConvNode::Input::OUTPUT_SHAPE)};
+ const auto ifm_index{node.getInputs().at(model::operation::TransposeConvNode::Input::INPUT)};
+ const auto ker_index{node.getInputs().at(model::operation::TransposeConvNode::Input::KERNEL)};
+
+ // Only 4D tensors are supported
+ assert(_ctx.at(ofm_index).shape().rank() == 4);
+ assert(_ctx.at(ofm_index).shape().rank() == _ctx.at(ifm_index).shape().rank());
+ assert(_ctx.at(ofm_index).shape().rank() == _ctx.at(ker_index).shape().rank());
+
+ const auto frontend_layout = _current_subg_layout;
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(frontend_layout);
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(frontend_layout);
+ // The kernel has only IHWO layout on frontend
+ // So ker_shape is treated here below
+ // I -> N
+ // H -> H
+ // W -> W
+ // O -> C
+ const auto ker_shape = _ctx.at(ker_index).shape().asFeature(model::Layout::NHWC);
+
+ UNUSED_RELEASE(ofm_shape);
+ UNUSED_RELEASE(ifm_shape);
+ UNUSED_RELEASE(ker_shape);
+
+ assert((node.param().padding.type == model::PaddingType::SAME) ||
+ (node.param().padding.type == model::PaddingType::VALID));
+ assert(ifm_shape.N == ofm_shape.N);
+ assert(ifm_shape.C == ker_shape.C);
+ assert(ker_shape.N == ofm_shape.C);
+}
+
+void OperationValidator::visit(const model::operation::GatherNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+
+ const auto ifm_index{node.getInputs().at(model::operation::GatherNode::Input::INPUT)};
+ const auto indices_index{node.getInputs().at(model::operation::GatherNode::Input::INDICES)};
+
+ const auto axis_index{node.param().axis_index};
+
+ const auto ifm_shape = _ctx.at(ifm_index).shape();
+ const auto indices_shape = _ctx.at(indices_index).shape();
+ const auto axis_shape = _ctx.at(axis_index).shape();
+ const auto ofm_shape = _ctx.at(ofm_index).shape();
+
+ UNUSED_RELEASE(ifm_shape);
+ UNUSED_RELEASE(indices_shape);
+ UNUSED_RELEASE(axis_shape);
+ UNUSED_RELEASE(ofm_shape);
+
+ assert(ifm_shape.rank() <= 4);
+ assert(indices_shape.rank() <= 3);
+ assert(ofm_shape.rank() <= 4);
+ assert(_ctx.at(axis_index).isConstant());
+ assert(axis_shape.rank() == 0);
+}
+
+void OperationValidator::visit(const model::operation::DequantizeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::DequantizeNode::Input::INPUT)};
+
+ UNUSED_RELEASE(output_index);
+ UNUSED_RELEASE(input_index);
+
+ assert(_ctx.at(input_index).shape().rank() <= 4);
+ assert(_ctx.at(input_index).shape() == _ctx.at(output_index).shape());
+ assert(_ctx.at(input_index).typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM);
+ assert(_ctx.at(output_index).typeInfo().type() == neurun::model::DataType::FLOAT32);
+}
+
+void OperationValidator::visit(const model::operation::MeanNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::MeanNode::Input::INPUT)};
+
+ const auto ifm_shape = _ctx.at(ifm_index).shape();
+ const auto ofm_shape = _ctx.at(ofm_index).shape();
+
+ // NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
+ // supports cases reducing height and width or reducing depth.
+ // TODO We have to support all cases of dimensions up to 4.
+ // For correct permuting, we have to set output's shape to be equal in dimension position of the
+ // input. But the positions of the same dimensions in the input and output may be set differently.
+ // For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
+ // output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
+ // extend it in 4 dimensions, it should be {1,1,3,5}.
+ // Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
+ // output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
+ // next operation is not desired.
+ if (ifm_shape.rank() == 4 && ifm_shape.rank() != ofm_shape.rank())
+ {
+ if (ofm_shape.rank() == 2)
+ {
+ // Reducing HW
+ assert(ifm_shape.dim(0) == ofm_shape.dim(0) && ifm_shape.dim(3) == ofm_shape.dim(1));
+ }
+ else if (ofm_shape.rank() == 3)
+ {
+ // Reducing C or
+ // (Reducing H and C(ifm and ofm) == 1) or (Reducing W and C(ifm and ofm) == 1)
+ assert((ifm_shape.dim(0) == ofm_shape.dim(0) && ifm_shape.dim(1) == ofm_shape.dim(1) &&
+ ifm_shape.dim(2) == ofm_shape.dim(2)) ||
+ (ifm_shape.dim(0) == ofm_shape.dim(0) &&
+ (ifm_shape.dim(1) == ofm_shape.dim(1) || ifm_shape.dim(2) == ofm_shape.dim(1)) &&
+ ifm_shape.dim(3) == 1 && ofm_shape.dim(2) == 1));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::DepthToSpaceNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::DepthToSpaceNode::Input::INPUT)};
+ const auto block_size_index{node.param().block_size_index};
+
+ const auto frontend_layout = _current_subg_layout;
+ const auto output_shape = _ctx.at(output_index).shape().asFeature(frontend_layout);
+ const auto input_shape = _ctx.at(input_index).shape().asFeature(frontend_layout);
+
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(input_shape);
+
+ assert(_ctx.at(input_index).shape().rank() == 4);
+ assert(_ctx.at(output_index).shape().rank() == 4);
+
+ int32_t block_size = _ctx.at(block_size_index).asScalar<int32_t>();
+
+ UNUSED_RELEASE(block_size);
+
+ assert(block_size > 0);
+
+ { // assertions block
+ assert(output_shape.N == input_shape.N);
+ assert(output_shape.H == input_shape.H * block_size);
+ assert(output_shape.W == input_shape.W * block_size);
+ assert(input_shape.C % (block_size * block_size) == 0);
+ assert(output_shape.C == input_shape.C / (block_size * block_size));
+ }
+}
+
+void OperationValidator::visit(const model::operation::ReduceMinNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::ReduceMinNode::Input::INPUT)};
+ const auto axis_index{node.param().axis_index};
+
+ auto ifm_shape = _ctx.at(ifm_index).shape();
+ auto ofm_shape = _ctx.at(ofm_index).shape();
+ auto axis_shape = _ctx.at(axis_index).shape();
+
+ UNUSED_RELEASE(ifm_shape);
+ UNUSED_RELEASE(ofm_shape);
+ UNUSED_RELEASE(axis_shape);
+
+ assert(ifm_shape.rank() <= 4);
+ assert(ofm_shape.rank() <= ifm_shape.rank());
+ assert(_ctx.at(axis_index).isConstant());
+ assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
+
+ // NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
+ // supports cases reducing height and width or reducing depth.
+ // TODO We have to support all cases of dimensions up to 4.
+ // For correct permuting, we have to set output's shape to be equal in dimension position of the
+ // input. But the positions of the same dimensions in the input and output may be set differently.
+ // For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
+ // output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
+ // extend it in 4 dimensions, it should be {1,1,3,5}.
+ // Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
+ // output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
+ // next operation is not desired.
+ if (ifm_shape.rank() == 4 && ifm_shape.rank() != ofm_shape.rank())
+ {
+ if (ofm_shape.rank() == 2)
+ {
+ // Reducing HW
+ assert(ifm_shape.dim(0) == ofm_shape.dim(0) && ifm_shape.dim(3) == ofm_shape.dim(1));
+ }
+ else if (ofm_shape.rank() == 3)
+ {
+ // Reducing C or
+ // (Reducing H and C(ifm and ofm) == 1) or (Reducing W and C(ifm and ofm) == 1)
+ assert((ifm_shape.dim(0) == ofm_shape.dim(0) && ifm_shape.dim(1) == ofm_shape.dim(1) &&
+ ifm_shape.dim(2) == ofm_shape.dim(2)) ||
+ (ifm_shape.dim(0) == ofm_shape.dim(0) &&
+ (ifm_shape.dim(1) == ofm_shape.dim(1) || ifm_shape.dim(2) == ofm_shape.dim(1)) &&
+ ifm_shape.dim(3) == 1 && ofm_shape.dim(2) == 1));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::LSTMNode &node)
+{
+ // NOTE This validation is for static rnn(non-dynamic shape), but not for dynamic rnn
+ // TODO Support dynamic rnn
+ const auto scratch_buffer_index{
+ node.getOutputs().at(model::operation::LSTMNode::Output::SCRATCH_BUFFER)};
+ const auto output_state_out_index{
+ node.getOutputs().at(model::operation::LSTMNode::Output::OUTPUT_STATE_OUT)};
+ const auto cell_state_out_index{
+ node.getOutputs().at(model::operation::LSTMNode::Output::CELL_STATE_OUT)};
+ const auto output_index{node.getOutputs().at(model::operation::LSTMNode::Output::OUTPUT)};
+
+ const auto input_index{node.getInputs().at(model::operation::LSTMNode::Input::INPUT)};
+ const auto input_to_input_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::INPUT_TO_INPUT_WEIGHTS)};
+ const auto input_to_forget_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::INPUT_TO_FORGET_WEIGHTS)};
+ const auto input_to_cell_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::INPUT_TO_CELL_WEIGHTS)};
+ const auto input_to_output_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::INPUT_TO_OUTPUT_WEIGHTS)};
+ const auto recurrent_to_input_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::RECURRENT_TO_INPUT_WEIGHTS)};
+ const auto recurrent_to_forget_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::RECURRENT_TO_FORGET_WEIGHTS)};
+ const auto recurrent_to_cell_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::RECURRENT_TO_CELL_WEIGHTS)};
+ const auto recurrent_to_output_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::RECURRENT_TO_OUTPUT_WEIGHTS)};
+ const auto cell_to_input_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::CELL_TO_INPUT_WEIGHTS)};
+ const auto cell_to_forget_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::CELL_TO_FORGET_WEIGHTS)};
+ const auto cell_to_output_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::CELL_TO_OUTPUT_WEIGHTS)};
+ const auto input_gate_bias_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::INPUT_GATE_BIAS)};
+ const auto forget_gate_bias_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::FORGET_GATE_BIAS)};
+ const auto cell_bias_index{node.getInputs().at(model::operation::LSTMNode::Input::CELL_BIAS)};
+ const auto output_gate_bias_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::OUTPUT_GATE_BIAS)};
+ const auto projection_weights_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::PROJECTION_WEIGHTS)};
+ const auto projection_bias_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::PROJECTION_BIAS)};
+ const auto output_state_in_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::OUTPUT_STATE_IN)};
+ const auto cell_state_in_index{
+ node.getInputs().at(model::operation::LSTMNode::Input::CELL_STATE_IN)};
+
+ UNUSED_RELEASE(scratch_buffer_index);
+ UNUSED_RELEASE(output_state_out_index);
+ UNUSED_RELEASE(cell_state_out_index);
+ UNUSED_RELEASE(output_index);
+
+ UNUSED_RELEASE(input_index);
+ UNUSED_RELEASE(input_to_input_weights_index);
+ UNUSED_RELEASE(input_to_forget_weights_index);
+ UNUSED_RELEASE(input_to_cell_weights_index);
+ UNUSED_RELEASE(input_to_output_weights_index);
+ UNUSED_RELEASE(recurrent_to_input_weights_index);
+ UNUSED_RELEASE(recurrent_to_forget_weights_index);
+ UNUSED_RELEASE(recurrent_to_cell_weights_index);
+ UNUSED_RELEASE(recurrent_to_output_weights_index);
+ UNUSED_RELEASE(cell_to_input_weights_index);
+ UNUSED_RELEASE(cell_to_forget_weights_index);
+ UNUSED_RELEASE(cell_to_output_weights_index);
+ UNUSED_RELEASE(input_gate_bias_index);
+ UNUSED_RELEASE(forget_gate_bias_index);
+ UNUSED_RELEASE(cell_bias_index);
+ UNUSED_RELEASE(output_gate_bias_index);
+ UNUSED_RELEASE(projection_weights_index);
+ UNUSED_RELEASE(projection_bias_index);
+ UNUSED_RELEASE(output_state_in_index);
+ UNUSED_RELEASE(cell_state_in_index);
+
+ assert(_ctx.at(scratch_buffer_index).shape().rank() == 2 &&
+ _ctx.at(output_state_out_index).shape().rank() == 2 &&
+ _ctx.at(cell_state_out_index).shape().rank() == 2 &&
+ _ctx.at(output_index).shape().rank() == 2 && _ctx.at(input_index).shape().rank() == 2 &&
+ _ctx.at(input_to_input_weights_index).shape().rank() == 2 &&
+ _ctx.at(input_to_forget_weights_index).shape().rank() == 2 &&
+ _ctx.at(input_to_cell_weights_index).shape().rank() == 2 &&
+ _ctx.at(input_to_output_weights_index).shape().rank() == 2 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().rank() == 2 &&
+ _ctx.at(recurrent_to_forget_weights_index).shape().rank() == 2 &&
+ _ctx.at(recurrent_to_cell_weights_index).shape().rank() == 2 &&
+ _ctx.at(recurrent_to_output_weights_index).shape().rank() == 2 &&
+ _ctx.at(projection_weights_index).shape().rank() == 2 &&
+ _ctx.at(output_state_in_index).shape().rank() == 2 &&
+ _ctx.at(cell_state_in_index).shape().rank() == 2);
+
+ assert(_ctx.at(cell_to_input_weights_index).shape().rank() == 1 &&
+ _ctx.at(cell_to_forget_weights_index).shape().rank() == 1 &&
+ _ctx.at(cell_to_output_weights_index).shape().rank() == 1 &&
+ _ctx.at(input_gate_bias_index).shape().rank() == 1 &&
+ _ctx.at(forget_gate_bias_index).shape().rank() == 1 &&
+ _ctx.at(cell_bias_index).shape().rank() == 1 &&
+ _ctx.at(output_gate_bias_index).shape().rank() == 1 &&
+ _ctx.at(projection_bias_index).shape().rank() == 1);
+
+ // CIFG assertion
+ assert((_ctx.at(input_to_input_weights_index).shape().dim(0) == 0 &&
+ _ctx.at(input_to_input_weights_index).shape().dim(1) == 0 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(0) == 0 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(1) == 0 &&
+ _ctx.at(input_gate_bias_index).shape().dim(0) == 0 &&
+ _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0) ||
+ (_ctx.at(input_to_input_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(input_to_input_weights_index).shape().dim(1) != 0 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0 &&
+ _ctx.at(input_gate_bias_index).shape().dim(0) != 0));
+
+ // Peephole assertion
+ assert((_ctx.at(cell_to_forget_weights_index).shape().dim(0) == 0 &&
+ _ctx.at(cell_to_output_weights_index).shape().dim(0) == 0) ||
+ (_ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0));
+
+ bool has_input_to_input_weights = _ctx.at(input_to_input_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(input_to_input_weights_index).shape().dim(1) != 0;
+ bool has_recurrent_to_input_weights =
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0;
+ bool has_input_gate_bias = _ctx.at(input_gate_bias_index).shape().dim(0) != 0;
+ bool has_cell_to_input_weights = _ctx.at(cell_to_input_weights_index).shape().dim(0) != 0;
+ bool has_cell_to_forget_weights = _ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0;
+ bool has_cell_to_output_weights = _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0;
+ bool has_projection_weights = _ctx.at(projection_weights_index).shape().dim(0) != 0 &&
+ _ctx.at(projection_weights_index).shape().dim(1) != 0;
+ bool has_projection_bias = _ctx.at(projection_bias_index).shape().dim(0);
+
+ // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
+ // true: no CIFG
+ // false: CIFG
+ bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
+
+ // NOTE The cell_to_input_weights do not exist in regular CIFG although peephole.
+ // true: peephole
+ // false: no peephole
+ bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights;
+
+ // NOTE The projection weights may have data but the projection bias may not.
+ bool has_projection_param = has_projection_weights;
+
+ UNUSED_RELEASE(has_input_to_input_weights);
+ UNUSED_RELEASE(has_recurrent_to_input_weights);
+ UNUSED_RELEASE(has_input_gate_bias);
+ UNUSED_RELEASE(has_cell_to_input_weights);
+ UNUSED_RELEASE(has_cell_to_forget_weights);
+ UNUSED_RELEASE(has_cell_to_output_weights);
+ UNUSED_RELEASE(has_projection_weights);
+ UNUSED_RELEASE(has_projection_bias);
+ UNUSED_RELEASE(has_cifg_param);
+ UNUSED_RELEASE(has_peephole_param);
+ UNUSED_RELEASE(has_projection_param);
+
+ const auto batch_size = _ctx.at(input_index).shape().dim(0);
+ UNUSED_RELEASE(batch_size);
+ assert(batch_size == _ctx.at(output_state_in_index).shape().dim(0) &&
+ batch_size == _ctx.at(cell_state_in_index).shape().dim(0) &&
+ batch_size == _ctx.at(scratch_buffer_index).shape().dim(0) &&
+ batch_size == _ctx.at(output_state_out_index).shape().dim(0) &&
+ batch_size == _ctx.at(cell_state_out_index).shape().dim(0) &&
+ batch_size == _ctx.at(output_index).shape().dim(0));
+
+ const auto input_size = _ctx.at(input_index).shape().dim(1);
+ UNUSED_RELEASE(input_size);
+ assert(input_size == _ctx.at(input_to_forget_weights_index).shape().dim(1) &&
+ input_size == _ctx.at(input_to_cell_weights_index).shape().dim(1) &&
+ input_size == _ctx.at(input_to_output_weights_index).shape().dim(1));
+
+ const auto num_units = _ctx.at(cell_state_out_index).shape().dim(1);
+ UNUSED_RELEASE(num_units);
+ assert(num_units == _ctx.at(input_to_forget_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(input_to_cell_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(input_to_output_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(recurrent_to_forget_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(recurrent_to_cell_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(recurrent_to_output_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(forget_gate_bias_index).shape().dim(0) &&
+ num_units == _ctx.at(cell_bias_index).shape().dim(0) &&
+ num_units == _ctx.at(output_gate_bias_index).shape().dim(0) &&
+ num_units == _ctx.at(cell_state_in_index).shape().dim(1) &&
+ (((num_units * 3) == _ctx.at(scratch_buffer_index).shape().dim(1)) ||
+ ((num_units * 4) == _ctx.at(scratch_buffer_index).shape().dim(1))));
+
+ const auto output_size = _ctx.at(output_index).shape().dim(1);
+ UNUSED_RELEASE(output_size);
+ assert(output_size == _ctx.at(recurrent_to_forget_weights_index).shape().dim(1) &&
+ output_size == _ctx.at(recurrent_to_cell_weights_index).shape().dim(1) &&
+ output_size == _ctx.at(recurrent_to_output_weights_index).shape().dim(1) &&
+ output_size == _ctx.at(output_state_in_index).shape().dim(1) &&
+ output_size == _ctx.at(output_state_out_index).shape().dim(1));
+
+ if (has_cifg_param)
+ {
+ assert(input_size == _ctx.at(input_to_input_weights_index).shape().dim(1));
+ assert(num_units == _ctx.at(input_to_input_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(recurrent_to_input_weights_index).shape().dim(0) &&
+ (num_units == _ctx.at(cell_to_input_weights_index).shape().dim(0) ||
+ _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0 /* non-peephole */) &&
+ num_units == _ctx.at(input_gate_bias_index).shape().dim(0));
+ assert(output_size == _ctx.at(recurrent_to_input_weights_index).shape().dim(1));
+ assert(has_input_to_input_weights && has_recurrent_to_input_weights && has_input_gate_bias);
+ if (has_cell_to_input_weights)
+ {
+ // NOTE The cell_to_input_weights exist only in case of non-CIFG and peephole.
+ assert(has_peephole_param);
+ }
+ assert(_ctx.at(scratch_buffer_index).shape().dim(1) == num_units * 4);
+ }
+ else
+ {
+ assert(_ctx.at(scratch_buffer_index).shape().dim(1) == num_units * 3);
+ }
+
+ if (has_peephole_param)
+ {
+ assert(num_units == _ctx.at(cell_to_forget_weights_index).shape().dim(0) &&
+ num_units == _ctx.at(cell_to_output_weights_index).shape().dim(0) &&
+ (num_units == _ctx.at(cell_to_input_weights_index).shape().dim(0) ||
+ _ctx.at(cell_to_input_weights_index).shape().dim(0) == 0 /* CIFG */));
+ }
+
+ if (has_projection_param)
+ {
+ assert(num_units == _ctx.at(projection_weights_index).shape().dim(1));
+ assert(output_size == _ctx.at(projection_weights_index).shape().dim(0));
+ if (has_projection_bias)
+ {
+ assert(output_size == _ctx.at(projection_bias_index).shape().dim(0));
+ }
+ }
+}
+
+void OperationValidator::visit(const model::operation::UnpackNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::UnpackNode::Input::INPUT)};
+ const auto num{node.param().num};
+ const auto axis{node.param().axis};
+
+ const auto &input_shape = _ctx.at(input_index).shape();
+ const auto input_rank = static_cast<int32_t>(input_shape.rank());
+
+ UNUSED_RELEASE(num);
+ UNUSED_RELEASE(axis);
+ UNUSED_RELEASE(input_rank);
+
+ assert(num == static_cast<int32_t>(node.getOutputs().size()));
+ assert(axis >= -input_rank && axis < input_rank);
+}
+
+void OperationValidator::visit(const model::operation::PadNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::PadNode::Input::INPUT)};
+ const auto pad_index{node.getInputs().at(model::operation::PadNode::Input::PAD)};
+ const auto output_index{node.getInputs().at(0)};
+
+ const auto &pad_shape = _ctx.at(pad_index).shape();
+ const auto input_rank = static_cast<int32_t>(_ctx.at(input_index).shape().rank());
+
+ UNUSED_RELEASE(pad_shape);
+ UNUSED_RELEASE(input_rank);
+ UNUSED_RELEASE(output_index);
+
+ assert(pad_shape.rank() == 2);
+ assert(pad_shape.dim(0) == input_rank);
+ assert(pad_shape.dim(1) == 2);
+ assert(_ctx.at(pad_index).typeInfo().type() == model::DataType::INT32);
+ assert(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank());
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/OperationValidator.h b/runtimes/neurun/core/src/compiler/OperationValidator.h
new file mode 100644
index 000000000..76774daeb
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/OperationValidator.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_OPERATION_VALIDATOR_H__
+#define __NEURUN_COMPILER_OPERATION_VALIDATOR_H__
+
+#include "model/Layout.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+class Operands;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+
+class OperationValidator : public model::OperationVisitor
+{
+public:
+ OperationValidator(const neurun::model::Operands &ctx)
+ : _ctx{ctx}, _current_subg_layout{model::Layout::UNKNOWN}
+ {
+ }
+
+public:
+ void visit(const model::Subgraph &node) override;
+ void visit(const model::operation::CastNode &node) override;
+ void visit(const model::operation::ComparisonNode &node) override;
+ void visit(const model::operation::SoftmaxNode &node) override;
+ void visit(const model::operation::PermuteNode &node) override;
+ void visit(const model::operation::ReduceSumNode &node) override;
+ void visit(const model::operation::TransposeNode &node) override;
+ void visit(const model::operation::ReduceMaxNode &node) override;
+ void visit(const model::operation::RNNNode &node) override;
+ void visit(const model::operation::SpaceToDepthNode &node) override;
+ void visit(const model::operation::EmbeddingLookupNode &node) override;
+ void visit(const model::operation::ExpNode &node) override;
+ void visit(const model::operation::FloorNode &node) override;
+ void visit(const model::operation::HashtableLookupNode &node) override;
+ void visit(const model::operation::TransposeConvNode &node) override;
+ void visit(const model::operation::GatherNode &node) override;
+ void visit(const model::operation::DequantizeNode &node) override;
+ void visit(const model::operation::MeanNode &node) override;
+ void visit(const model::operation::DepthToSpaceNode &node) override;
+ void visit(const model::operation::ReduceMinNode &node) override;
+ void visit(const model::operation::LSTMNode &node) override;
+ void visit(const model::operation::UnpackNode &node) override;
+ void visit(const model::operation::PadNode &node) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ model::Layout _current_subg_layout;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_OPERATION_VALIDATOR_H__
diff --git a/runtimes/neurun/core/src/compiler/ParamChecker.cc b/runtimes/neurun/core/src/compiler/ParamChecker.cc
new file mode 100644
index 000000000..10bfa1ea3
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ParamChecker.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ParamChecker.h"
+
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+void ParamChecker::operator()()
+{
+ _model->operations().iterate(
+ [&](const model::OperationIndex &, const model::Operation &node) { node.accept(*this); });
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/ParamChecker.h b/runtimes/neurun/core/src/compiler/ParamChecker.h
new file mode 100644
index 000000000..82f46692d
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/ParamChecker.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ParamChecker.h
+ * @brief This file contains ParamChecker to check\n
+ * operations' parameters are compilable at machine independent phase\n
+ * ex) Check param is constant
+ */
+#ifndef __NEURUN_COMPILER_PARAM_CHECKER_H__
+#define __NEURUN_COMPILER_PARAM_CHECKER_H__
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+
+class ParamChecker : public model::OperationVisitor
+{
+public:
+ /**
+ * @brief Construct a new Param Checker object (deleted)
+ */
+ ParamChecker(void) = delete;
+ /**
+ * @brief Construct a new Param Checker object
+ * @param[in] model Graph model to check
+ */
+ ParamChecker(std::shared_ptr<graph::Graph> model) : _model{model} {}
+
+public:
+ /**
+ * @brief Run parameter analysis
+ */
+ void operator()();
+ /**
+ * @brief Return analysis result if model have non-const parameter
+ * @return @c true if there is non-const parameter, otherwise @c false
+ */
+ bool haveNoneConstParam(void) { return _nonConstParam; }
+
+private:
+ const std::shared_ptr<graph::Graph> _model;
+ bool _nonConstParam{false};
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_OPERATION_VALIDATOR_H__
diff --git a/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc
new file mode 100644
index 000000000..c2c6da290
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SubTensorAnalyzer.h"
+
+#include <typeinfo>
+
+#include "cpp14/memory.h"
+#include "model/OperandIndexSequence.h"
+#include "util/logging.h"
+#include "util/Coordinates.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+void SubTensorAnalyzer::visit(const model::operation::ConcatNode &node)
+{
+ // If operator is concat (or other operators related with subsumption), fill subsumption info
+ // TODO: if one tensor is subset of many parents or model input
+ // Solution 1. Handle 1st parent only, ignore others (need to invert for other children)
+ // Solution 2. Insert copy operation for other parents
+ int32_t axis_raw = node.param().axis;
+
+ auto &output_index = node.getOutputs().at(0);
+ auto &inputs = node.getInputs();
+
+ int32_t axis_point = 0;
+ const auto rank = _ctx.at(output_index).shape().rank();
+ int32_t axis = axis_raw < 0 ? (axis_raw + rank) : axis_raw;
+ assert(rank > axis);
+
+ // NOTE Not support multiple parent tensor yet
+ for (auto &input_index : inputs)
+ {
+ if (_ctx.at(input_index).parent_info() != nullptr)
+ {
+ return;
+ }
+ }
+
+ for (auto &input_index : inputs)
+ {
+ auto input_shape = _ctx.at(input_index).shape();
+ assert(rank == input_shape.rank());
+
+ neurun::util::Coordinates coordinate_info{};
+ for (int i = 0; i < rank; i++)
+ {
+ coordinate_info.set(i, 0);
+ }
+ coordinate_info.set(axis, axis_point);
+
+ std::unique_ptr<graph::operand::ParentInfo> parentInfo =
+ nnfw::cpp14::make_unique<graph::operand::ParentInfo>(output_index, coordinate_info);
+
+ _ctx.at(input_index).parent_info(std::move(parentInfo));
+
+ axis_point += input_shape.dim(axis);
+ }
+}
+
+} // namespace compiler
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.h b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.h
new file mode 100644
index 000000000..606d755b7
--- /dev/null
+++ b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file SubTensorAnalyzer.h
+ * @brief This file contains SubTensorAnalyzer to analyze tensor subsumption
+ * using operation visitor
+ */
+
+#ifndef __NEURUN_COMPILER_SUBTENSOR_ANALYZER_H__
+#define __NEURUN_COMPILER_SUBTENSOR_ANALYZER_H__
+
+#include "model/OperationVisitor.h"
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operand
+{
+class Set;
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace compiler
+{
+
+/**
+ * @brief Class to analyze tensor subsumption
+ */
+class SubTensorAnalyzer : public model::OperationVisitor
+{
+public:
+ /**
+ * @brief Construct a new SubTensorAnalyzer object
+ * @param[in] ctx Graph operand set
+ */
+ SubTensorAnalyzer(neurun::model::Operands &ctx) : _ctx{ctx}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void visit(const model::operation::ConcatNode &) override;
+
+private:
+ neurun::model::Operands &_ctx; // TODO Refactor : Do not update Operands
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_SUBTENSOR_ANALYZER_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/DotBuilder.cc b/runtimes/neurun/core/src/dumper/dot/DotBuilder.cc
new file mode 100644
index 000000000..8563b4cf0
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotBuilder.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DotBuilder.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+// DotDumper
+DotBuilder::DotBuilder() {}
+
+void DotBuilder::update(const Node &node_info)
+{
+ addNode(node_info);
+ for (auto edge : node_info.edges())
+ {
+ addEdge(node_info, *edge);
+ }
+}
+
+void DotBuilder::addSubgraph(const DotSubgraphInfo &subgraph_info)
+{
+ _dot << "subgraph cluster_" << subgraph_info.index().value() << " {\n";
+ _dot << " label=\"" << subgraph_info.label() << "\";\n";
+ _dot << " style=filled;\n";
+ _dot << " color=lightgrey;\n";
+ _dot << " ";
+ for (auto op : subgraph_info.operations())
+ {
+ _dot << "operation" << op.value() << "; ";
+ }
+ for (auto op : subgraph_info.operands())
+ {
+ _dot << "operand" << op.value() << "; ";
+ }
+ _dot << "\n";
+ _dot << "}\n";
+}
+
+void DotBuilder::writeDot(std::ostream &os)
+{
+ os << "digraph D {\n"
+ << _dot.str() << "\n"
+ << "}\n";
+}
+
+void DotBuilder::addNode(const Node &node)
+{
+ _dot << node.id();
+ std::stringstream ss;
+ _dot << "[";
+ for (auto attr : node.attributes())
+ {
+ _dot << attr.first << "=\"" << attr.second << "\" ";
+ }
+ _dot << "];\n";
+}
+
+void DotBuilder::addEdge(const Node &node1, const Node &node2)
+{
+ _dot << node1.id() << " -> " << node2.id() << ";\n";
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/DotBuilder.h b/runtimes/neurun/core/src/dumper/dot/DotBuilder.h
new file mode 100644
index 000000000..b78fd4469
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotBuilder.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_DUMPER_DOT_DOT_BUILDER_H__
+#define __NEURUN_DUMPER_DOT_DOT_BUILDER_H__
+
+#include <sstream>
+
+#include "model/Index.h"
+#include "model/Operation.h"
+#include "model/Operand.h"
+
+#include "OperationNode.h"
+#include "OperandNode.h"
+#include "DotSubgraphInfo.h"
+
+using Operation = neurun::model::Operation;
+using Object = neurun::model::Operand;
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+class DotBuilder
+{
+public:
+ DotBuilder();
+
+public:
+ void update(const Node &dotinfo);
+ void addSubgraph(const DotSubgraphInfo &subgraph_info);
+
+ void writeDot(std::ostream &os);
+
+private:
+ void addNode(const Node &dotinfo);
+ void addEdge(const Node &dotinfo1, const Node &dotinfo2);
+
+ std::stringstream _dot;
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_DUMPER_DOT_DOT_BUILDER_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/DotDumper.cc b/runtimes/neurun/core/src/dumper/dot/DotDumper.cc
new file mode 100644
index 000000000..d01b472c3
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotDumper.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <unordered_map>
+
+#include "DotDumper.h"
+#include "DotBuilder.h"
+#include "DotSubgraphInfo.h"
+#include "model/Subgraph.h"
+#include "model/OperationIndexMap.h"
+#include "backend/Backend.h"
+#include "backend/BackendManager.h"
+#include "backend/IConfig.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+using namespace neurun::graph;
+
+void DotDumper::dump(const std::string &tag)
+{
+ if (_level == Level::OFF)
+ {
+ return;
+ }
+
+ neurun::dumper::dot::DotBuilder dot_builder;
+
+ auto &operations = _graph.operations();
+ auto &operands = _graph.operands();
+
+ model::OperationIndexMap<std::unique_ptr<OperationNode>> operation_nodes;
+ std::unordered_map<model::OperandIndex, std::unique_ptr<OperandNode>> operand_nodes;
+
+ operations.iterate([&](const model::OperationIndex &index, const model::Operation &op) {
+ auto node = nnfw::cpp14::make_unique<OperationNode>(index, op);
+
+ for (auto output : op.getOutputs())
+ {
+ using neurun::dumper::dot::OperandNode;
+ auto child = std::make_shared<OperandNode>(output, OperandNode::Type::MODEL_OUTPUT);
+ node->addEdge(child);
+ }
+
+ operation_nodes.emplace(index, std::move(node));
+ });
+
+ auto backend_to_fillcolor = [](const backend::Backend *backend) {
+ static const auto map = []() {
+ std::unordered_map<const backend::Backend *, std::string> ret;
+ uint32_t index = 1; // Start from 1 to avoid 0(red) which is too dark :(
+ for (const auto backend : backend::BackendManager::instance().getAll())
+ {
+ ret.emplace(backend, Node::BG_COLORS[index]);
+ index = (index + 1) % (sizeof(Node::BG_COLORS) / sizeof(Node::BG_COLORS[0]));
+ }
+ return ret;
+ }();
+
+ auto itr = map.find(backend);
+ if (itr == map.end())
+ {
+ return Node::DEFAULT_FILLCOLOR;
+ }
+ else
+ {
+ return itr->second;
+ }
+ };
+
+ util::Set<model::OperandIndex> shown_operand_set;
+
+ operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) {
+ bool showing_cond = false;
+ if (_level == Level::ALL)
+ {
+ showing_cond = true;
+ }
+ else
+ {
+ showing_cond = !object.isConstant();
+ }
+ if (object.isConstant() || _graph.getInputs().contains(index))
+ {
+ showing_cond = showing_cond && (object.getUses().size() > 0);
+ }
+ if (showing_cond)
+ {
+ shown_operand_set.add(index);
+
+ auto type = [&]() {
+ using neurun::dumper::dot::OperandNode;
+ if (_graph.getInputs().contains(index))
+ return OperandNode::Type::MODEL_INPUT;
+ if (_graph.getOutputs().contains(index))
+ return OperandNode::Type::MODEL_OUTPUT;
+ return OperandNode::Type::INTERNAL;
+ }();
+
+ auto lower_info = _graph.getLowerInfo(index);
+ auto node = nnfw::cpp14::make_unique<OperandNode>(index, type);
+
+ {
+ // Display LowerInfo attributes
+ std::string label = std::to_string(index.value());
+ std::string fillcolor = "";
+ if (lower_info)
+ {
+ const auto &def_factors = lower_info->def_factors();
+ label += "\\n[";
+ label += def_factors.getOnlyElement().backend()->config()->id();
+ label += "]";
+
+ fillcolor = backend_to_fillcolor(lower_info->def_factors().getOnlyElement().backend());
+ }
+ node->setAttribute("label", label);
+ node->setAttribute("fillcolor", fillcolor);
+ }
+
+ for (auto operation_index : object.getUses().list())
+ {
+ auto &operation = operations.at(operation_index);
+ auto child = std::make_shared<OperationNode>(operation_index, operation);
+ node->addEdge(child);
+ }
+
+ operand_nodes.emplace(index, std::move(node));
+ }
+ });
+
+ const auto subgraphs = _graph.subgraphs();
+ if (subgraphs)
+ {
+ subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &subgraph) {
+ const auto lower_info = _graph.getLowerInfo(index);
+ auto fillcolor = backend_to_fillcolor(lower_info->backend());
+ std::string label =
+ std::to_string(index.value()) + " [" + lower_info->backend()->config()->id() + "]";
+ DotSubgraphInfo subgraph_info{index, subgraph, shown_operand_set};
+ subgraph_info.label(label);
+ subgraph_info.fillcolor(fillcolor);
+ dot_builder.addSubgraph(subgraph_info);
+
+ // Set fillcolor of all operations in the subgraph
+ for (const auto &op : subgraph.operations())
+ {
+ auto found = operation_nodes.find(op.index);
+ if (found != operation_nodes.end())
+ {
+ auto &&op = found->second;
+ op->setAttribute("fillcolor", fillcolor);
+ }
+ }
+ });
+ }
+
+ for (const auto &e : operation_nodes)
+ dot_builder.update(*e.second);
+ for (const auto &e : operand_nodes)
+ dot_builder.update(*e.second);
+
+ // Dump to file
+ {
+ std::string file_name;
+ file_name += tag;
+ file_name += ".dot";
+ std::filebuf fb;
+
+ fb.open(file_name, std::ios::out);
+ std::ostream os(&fb);
+
+ dot_builder.writeDot(os);
+
+ fb.close();
+ }
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/DotDumper.h b/runtimes/neurun/core/src/dumper/dot/DotDumper.h
new file mode 100644
index 000000000..4ccaac882
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotDumper.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph/Graph.h"
+
+#ifndef __NEURUN_DUMPER_DOT_DOT_DUMPER_H__
+#define __NEURUN_DUMPER_DOT_DOT_DUMPER_H__
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+class DotDumper
+{
+public:
+ enum Level
+ {
+ OFF = 0, //< Do not dump
+ ALL_BUT_CONSTANTS = 1, //< Emit all operations and operands but constants
+ ALL = 2 //< Emit all operations and operands
+ };
+
+public:
+ DotDumper(const neurun::graph::Graph &graph, Level level) : _graph(graph), _level{level} {}
+
+public:
+ /**
+ * @brief Dump to dot file as tag name if "GRAPH_DOT_DUMP" is set
+ *
+ * @param[in] tag The name of dot file that would be created
+ * @return N/A
+ */
+ void dump(const std::string &tag);
+
+private:
+ const neurun::graph::Graph &_graph;
+ Level _level;
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_DUMPER_DOT_DOT_DUMPER_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.cc b/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.cc
new file mode 100644
index 000000000..1ea681bdb
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DotSubgraphInfo.h"
+
+#include <sstream>
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+DotSubgraphInfo::DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph,
+ const util::Set<model::OperandIndex> &shown_operands)
+ : _index{index}
+{
+ for (const auto &element : subgraph.operations())
+ {
+ _operations.insert(element.index);
+ for (auto o : element.node->getInputs())
+ {
+ // Must be a shown operand, not subgraph's inputs
+ if (shown_operands.contains(o) && !subgraph.getInputs().contains(o))
+ {
+ _operands.insert(o);
+ }
+ }
+ for (auto o : element.node->getOutputs())
+ {
+ // Must be a shown operand, not subgraph's inputs
+ if (shown_operands.contains(o) && !subgraph.getOutputs().contains(o))
+ {
+ _operands.insert(o);
+ }
+ }
+ }
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.h b/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.h
new file mode 100644
index 000000000..771c5552e
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/DotSubgraphInfo.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
+#define __NEURUN_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
+
+#include <unordered_set>
+
+#include "model/Index.h"
+#include "model/Subgraph.h"
+#include "util/Set.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+class DotSubgraphInfo
+{
+public:
+ DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph,
+ const util::Set<model::OperandIndex> &shown_operands);
+
+ model::SubgraphIndex index() const { return _index; }
+ std::string label() const { return _label; }
+ void label(const std::string &val) { _label = val; }
+ std::string fillcolor() const { return _fillcolor; }
+ void fillcolor(const std::string &val) { _fillcolor = val; }
+ const std::unordered_set<model::OperationIndex> &operations() const { return _operations; }
+ const std::unordered_set<model::OperandIndex> &operands() const { return _operands; }
+
+private:
+ model::SubgraphIndex _index;
+ std::string _label;
+ std::string _fillcolor;
+ std::unordered_set<model::OperationIndex> _operations;
+ std::unordered_set<model::OperandIndex> _operands;
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_CORE_DUMPER_DOT_DOT_SUBGRAPH_INFO_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/Node.cc b/runtimes/neurun/core/src/dumper/dot/Node.cc
new file mode 100644
index 000000000..166f0f40f
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/Node.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Node.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+const std::string Node::DEFAULT_COLORSCHEME = "x11";
+const std::string Node::DEFAULT_FILLCOLOR = "white";
+// RED, BLUE, GREEN, PURPLE, ORANGE, YELLOW, BROWN, PINK
+const std::string Node::BG_COLORS[8] = {"1", "2", "3", "4", "5", "6", "7", "8"};
+
+Node::Node(const std::string &id) : _id{id}
+{
+ // Set default values
+ _attributes["style"] = "filled";
+ _attributes["colorscheme"] = DEFAULT_COLORSCHEME;
+ _attributes["fillcolor"] = DEFAULT_FILLCOLOR;
+}
+
+void Node::setAttribute(const std::string &key, const std::string &val) { _attributes[key] = val; }
+
+std::string Node::getAttribute(const std::string &key)
+{
+ auto itr = _attributes.find(key);
+ if (itr == _attributes.end())
+ {
+ return "";
+ }
+ else
+ {
+ return itr->second;
+ }
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/Node.h b/runtimes/neurun/core/src/dumper/dot/Node.h
new file mode 100644
index 000000000..364cb08a4
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/Node.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Node.h
+ * @brief This file contains Node class
+ * @ingroup COM_AI_RUNTIME
+ *
+ */
+
+#ifndef __NEURUN_DUMPER_DOT_NODE_H__
+#define __NEURUN_DUMPER_DOT_NODE_H__
+
+#include <string>
+#include <memory>
+#include <vector>
+#include <unordered_map>
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+enum BGCOLORS : int
+{
+ RED,
+ BLUE,
+ GREEN,
+ PUPLE,
+ ORANGE,
+ YELLOW,
+ BROWN,
+ PINK
+};
+
+/**
+ * @brief Class that represents a Node in "dot" format
+ *
+*/
+class Node
+{
+public:
+ const static std::string DEFAULT_FILLCOLOR;
+ const static std::string DEFAULT_COLORSCHEME;
+ const static std::string BG_COLORS[8];
+
+public:
+ /**
+ * @brief Destroy the Node object
+ *
+ */
+ virtual ~Node() = default;
+
+ /**
+ * @brief Construct a new Node object
+ *
+ * @param id
+ */
+ Node(const std::string &id);
+
+ /**
+ * @brief return id
+ *
+ * @return id
+ */
+ std::string id() const { return _id; }
+
+ /**
+ * @brief return attributes
+ *
+ * @return const reference of attributes object
+ */
+ const std::unordered_map<std::string, std::string> &attributes() const { return _attributes; }
+ /**
+ * @brief Store an attribute with key-value pair
+ *
+ * @param[in] key attribute's key
+ * @param[in] val attribute's value that is associated with the key
+ */
+ void setAttribute(const std::string &key, const std::string &val);
+ /**
+ * @brief Get the attributte value that is associated with key
+ *
+ * @param[in] key key of the attribute
+ * @return value that is associated with the key
+ */
+ std::string getAttribute(const std::string &key);
+
+ /**
+ * @brief Add an edge in the graph, which is an outgoing edge
+ *
+ * @param[in] dotinfo A node that the new edge will be connected to
+ */
+ void addEdge(std::shared_ptr<Node> dotinfo) { _children.emplace_back(dotinfo); }
+ /**
+ * @brief Return list of edges
+ *
+ * @return Edges
+ */
+ const std::vector<std::shared_ptr<Node>> &edges() const { return _children; }
+
+private:
+ std::string _id;
+ std::unordered_map<std::string, std::string> _attributes;
+ std::vector<std::shared_ptr<Node>> _children;
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_DUMPER_DOT_NODE_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/OperandNode.cc b/runtimes/neurun/core/src/dumper/dot/OperandNode.cc
new file mode 100644
index 000000000..338dfc4b6
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/OperandNode.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+
+#include "OperandNode.h"
+#include "graph/Graph.h"
+#include "graph/operand/LowerInfo.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+const std::string OperandNode::INPUT_SHAPE = "doublecircle";
+const std::string OperandNode::OUTPUT_SHAPE = "doublecircle";
+const std::string OperandNode::OPERAND_SHAPE = "ellipse";
+const std::string OperandNode::BG_COLOR_SCHEME = "set18";
+
+OperandNode::OperandNode(const neurun::model::OperandIndex &index, Type type)
+ : Node{"operand" + std::to_string(index.value())}
+{
+ {
+ auto type_to_shape = [](Type type) {
+ switch (type)
+ {
+ case Type::MODEL_INPUT:
+ return INPUT_SHAPE;
+ case Type::MODEL_OUTPUT:
+ return OUTPUT_SHAPE;
+ case Type::UNDEFINED:
+ case Type::INTERNAL:
+ default:
+ return OPERAND_SHAPE;
+ }
+ };
+ setAttribute("shape", type_to_shape(type));
+ }
+
+ setAttribute("colorscheme", BG_COLOR_SCHEME);
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/OperandNode.h b/runtimes/neurun/core/src/dumper/dot/OperandNode.h
new file mode 100644
index 000000000..40f715eac
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/OperandNode.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OperandNode.h
+ * @brief This file contains OperandNode
+ * @ingroup COM_AI_RUNTIME
+ *
+ */
+
+#ifndef __NEURUN_DUMPER_DOT_DOT_OPERAND_INFO_H__
+#define __NEURUN_DUMPER_DOT_DOT_OPERAND_INFO_H__
+
+#include <vector>
+
+#include "Node.h"
+#include "model/Operand.h"
+#include "model/Index.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+/**
+ * @brief Class that represents an Operand
+ *
+ */
+class OperandNode : public Node
+{
+public:
+ enum class Type
+ {
+ UNDEFINED,
+ MODEL_INPUT,
+ MODEL_OUTPUT,
+ INTERNAL
+ };
+
+public:
+ static const std::string INPUT_SHAPE;
+ static const std::string OUTPUT_SHAPE;
+ static const std::string OPERAND_SHAPE;
+ static const std::string BG_COLOR_SCHEME;
+
+public:
+ /**
+ * @brief Construct a new Operand Node object
+ *
+ * @param[in] index Operand index
+ * @param[in] type Operand type
+ * @param[in] lower_info Operand LowerInfo
+ */
+ OperandNode(const neurun::model::OperandIndex &index, Type type);
+
+private:
+ void addBackendLabel();
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_DUMPER_DOT_DOT_OPERAND_INFO_H__
diff --git a/runtimes/neurun/core/src/dumper/dot/OperationNode.cc b/runtimes/neurun/core/src/dumper/dot/OperationNode.cc
new file mode 100644
index 000000000..040241daa
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/OperationNode.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+
+#include "OperationNode.h"
+#include "graph/Graph.h"
+#include "graph/operation/LowerInfo.h"
+#include "backend/IConfig.h"
+#include "backend/Backend.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+const std::string OperationNode::OPERATION_SHAPE = "rect";
+const std::string OperationNode::BG_COLOR_SCHEME = "pastel18";
+
+OperationNode::OperationNode(const neurun::model::OperationIndex &index,
+ const neurun::model::Operation &node)
+ : Node{"operation" + std::to_string(index.value())}
+{
+ setAttribute("label", std::to_string(index.value()) + " : " + node.getName());
+ setAttribute("shape", OPERATION_SHAPE);
+ setAttribute("colorscheme", BG_COLOR_SCHEME);
+ setAttribute("fillcolor", DEFAULT_FILLCOLOR);
+}
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/dumper/dot/OperationNode.h b/runtimes/neurun/core/src/dumper/dot/OperationNode.h
new file mode 100644
index 000000000..6b8dede2d
--- /dev/null
+++ b/runtimes/neurun/core/src/dumper/dot/OperationNode.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OperationNode.h
+ * @brief This file contains OperationNode
+ * @ingroup COM_AI_RUNTIME
+ *
+ */
+
+#ifndef __NEURUN_DUMPER_DOT_DOT_NODE_INFO_H__
+#define __NEURUN_DUMPER_DOT_DOT_NODE_INFO_H__
+
+#include "Node.h"
+#include "model/Operation.h"
+#include "model/Index.h"
+
+namespace neurun
+{
+namespace dumper
+{
+namespace dot
+{
+
+/**
+ * @brief Class that represents an Operation
+ *
+ */
+class OperationNode : public Node
+{
+public:
+ static const std::string OPERATION_SHAPE;
+ static const std::string BG_COLOR_SCHEME;
+
+public:
+ /**
+ * @brief Construct a new Operation Node object
+ *
+ * @param[in] index operation index
+ * @param[in] node operation object
+ */
+ OperationNode(const neurun::model::OperationIndex &index, const neurun::model::Operation &node);
+};
+
+} // namespace dot
+} // namespace dumper
+} // namespace neurun
+
+#endif // __NEURUN_DUMPER_DOT_DOT_NODE_INFO_H__
diff --git a/runtimes/neurun/core/src/exec/DataflowExecutor.cc b/runtimes/neurun/core/src/exec/DataflowExecutor.cc
new file mode 100644
index 000000000..75d616131
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/DataflowExecutor.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DataflowExecutor.h"
+
+#include <cassert>
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+int64_t DataflowExecutor::calculateRank(const std::vector<model::Element> &operations)
+{
+ int64_t rank = 0;
+ if (!_indexed_ranks)
+ {
+ return rank;
+ }
+ for (const auto &element : operations)
+ {
+ auto it = _indexed_ranks->find(element.index);
+ if (it == _indexed_ranks->end())
+ {
+ assert(element.node->getName() == "Permute");
+ // assign int32_t::max to prevent integer overflow
+ rank += std::numeric_limits<int32_t>::max();
+ }
+ else
+ {
+ rank += it->second;
+ }
+ }
+ return rank;
+}
+
+void DataflowExecutor::emplaceToReadyJobs(const uint32_t &id)
+{
+ auto &job = _waiting_jobs[id];
+ assert(job != nullptr);
+ auto &subg = _subgraphs->at(_job_to_subgraph[job->index()]);
+ auto rank = calculateRank(subg.operations());
+ _ready_jobs.emplace(rank, std::move(job));
+}
+
+void DataflowExecutor::notify(uint32_t finished_job_id)
+{
+ for (auto id : _output_info[finished_job_id])
+ {
+ assert(_input_info[id] > 0);
+ auto count = --_input_info[id];
+ if (count == 0) // No dependent jobs left, ready for execution
+ {
+ emplaceToReadyJobs(id);
+ }
+ }
+}
+bool DataflowExecutor::noWaitingJobs()
+{
+ return std::all_of(_waiting_jobs.begin(), _waiting_jobs.end(),
+ [](const std::unique_ptr<Job> &job) { return job == nullptr; });
+}
+
+DataflowExecutor::DataflowExecutor(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
+ CodeMap &&code_map)
+ : ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
+ std::move(tensor_mgrs)},
+ _code_map{std::move(code_map)}
+{
+ VERBOSE(DataflowExecutor) << "Constructing Dataflow Executor" << std::endl;
+
+ assert(_subgraphs);
+ // Assign jobs convert SubgraphIndex to job index(uint32_t)
+ uint32_t next_job_index = 0;
+ std::unordered_map<model::SubgraphIndex, uint32_t> subgraph_to_job;
+ _subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &) {
+ VERBOSE(DataflowExecutor) << "Create a job #" << next_job_index << " with SubgraphIndex "
+ << subg_index.value() << std::endl;
+ _finished_jobs.emplace_back(
+ nnfw::cpp14::make_unique<Job>(next_job_index, _code_map.at(subg_index).get(),
+ _lower_info->operation.at(subg_index)->backend()));
+ subgraph_to_job[subg_index] = next_job_index++;
+ });
+
+ _waiting_jobs.resize(next_job_index);
+ _output_info.resize(next_job_index);
+ _initial_input_info.resize(next_job_index, 0);
+
+ _subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto job_index = subgraph_to_job[subg_index];
+ for (auto output : subg.getOutputs())
+ {
+ // Update output and input info
+ _subgraphs->iterate(
+ [&](const model::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) {
+ if (subg_cur.getInputs().contains(output))
+ {
+ auto dep_index = subgraph_to_job[subg_cur_index];
+ ++_initial_input_info[dep_index];
+ _output_info[job_index].push_back(dep_index);
+ }
+ });
+ }
+ });
+ for (const auto &s : subgraph_to_job)
+ _job_to_subgraph.emplace(s.second, s.first);
+
+ _input_info = _initial_input_info;
+}
+
+void DataflowExecutor::executeImpl()
+{
+ assert(noWaitingJobs());
+
+ // Execution setup
+ _waiting_jobs.swap(_finished_jobs); // Move finished jobs to waiting jobs
+
+ for (uint32_t i = 0; i < _waiting_jobs.size(); ++i)
+ {
+ if (_input_info[i] == 0)
+ {
+ emplaceToReadyJobs(i);
+ }
+ }
+ assert(!_ready_jobs.empty()); // Cannot begin if there is no initial jobs
+ bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
+ // TODO Fix indentation
+ {
+ // Notifiy Execution Begin
+ for (auto &o : _observers)
+ {
+ o->handleBegin(this);
+ }
+ }
+
+ while (!_ready_jobs.empty())
+ {
+ auto job = std::move((_ready_jobs.begin())->second);
+ _ready_jobs.erase(_ready_jobs.begin());
+ auto job_index = job->index();
+ VERBOSE(DataflowExecutor) << "Run job #" << job_index << std::endl;
+ notifyJobBegin(job_index);
+ if (is_profiling)
+ job->fn()->runSync();
+ else
+ job->run();
+ notifyJobEnd(job_index);
+ notify(job_index);
+ _finished_jobs[job_index] = std::move(job);
+ }
+ assert(noWaitingJobs());
+
+ for (auto &o : _observers)
+ {
+ o->handleEnd(this);
+ }
+
+ // Reset input info for the next execution
+ _input_info = _initial_input_info;
+}
+
+void DataflowExecutor::notifyJobBegin(uint32_t job_index)
+{
+ auto subgraph_index = _job_to_subgraph[job_index];
+ // Workaround - assumes only one operation
+ auto node = _subgraphs->at(subgraph_index).operations().at(0).node;
+ const backend::Backend *backend = _lower_info->operation.at(subgraph_index)->backend();
+ for (auto &o : _observers)
+ {
+ o->handleBegin(this, node, backend);
+ }
+}
+
+void DataflowExecutor::notifyJobEnd(uint32_t job_index)
+{
+ auto subgraph_index = _job_to_subgraph[job_index];
+ // Workaround - assumes only one operation
+ auto node = _subgraphs->at(subgraph_index).operations().at(0).node;
+ const backend::Backend *backend = _lower_info->operation.at(subgraph_index)->backend();
+ for (auto &o : _observers)
+ {
+ o->handleEnd(this, node, backend);
+ }
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/DataflowExecutor.h b/runtimes/neurun/core/src/exec/DataflowExecutor.h
new file mode 100644
index 000000000..935f9976d
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/DataflowExecutor.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__
+#define __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__
+
+#include <list>
+#include <map>
+#include <unordered_map>
+
+#include "FunctionSequence.h"
+#include "Job.h"
+#include "model/OperandIndexSequence.h"
+#include "model/Index.h"
+#include "model/Model.h"
+#include "cpp14/memory.h"
+#include "exec/ExecutorBase.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+class DataflowExecutor : public ExecutorBase
+{
+public:
+ using CodeMap = std::unordered_map<model::SubgraphIndex, std::unique_ptr<FunctionSequence>>;
+
+protected:
+ virtual void notify(uint32_t finished_job_id);
+ bool noWaitingJobs();
+
+public:
+ /**
+ * @brief Constructs a DataflowExecutor object
+ *
+ * @param model Model object
+ * @param operand_context (Only for input/output operand data access)
+ * @param lower_info LowerInfo object (Only to know input/output operands layout)
+ * @param code_map Compiled code map
+ * @param ranks Operation ranks for ordering execution
+ */
+ DataflowExecutor(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs, CodeMap &&code_map);
+
+ void executeImpl() override;
+
+ void notifyJobEnd(uint32_t job_index);
+ void notifyJobBegin(uint32_t job_index);
+
+ void addObserver(std::unique_ptr<IExecutionObserver> ref)
+ {
+ _observers.emplace_back(std::move(ref));
+ };
+ void removeObserver(std::unique_ptr<IExecutionObserver> ref) { _observers.remove(ref); };
+
+protected:
+ int64_t calculateRank(const std::vector<model::Element> &operations);
+ void emplaceToReadyJobs(const uint32_t &id);
+
+protected:
+ CodeMap _code_map;
+ /**
+ * @brief A vector of finished jobs for current execution
+ * After a run it has all the jobs of this execution for the next run
+ */
+ std::vector<std::unique_ptr<Job>> _finished_jobs;
+ /**
+ * @brief A vector of waiting jobs for current execution
+ * All the jobs are moved from #_finished_jobs to it when start a run
+ */
+ std::vector<std::unique_ptr<Job>> _waiting_jobs;
+ /**
+ * @brief Jobs' output info
+ * Used for notifying after finishing a job
+ */
+ std::vector<std::list<uint32_t>> _output_info;
+ std::vector<uint32_t> _initial_input_info;
+ std::vector<uint32_t> _input_info;
+ /**
+ * @brief A collection of jobs that are ready for execution
+ * Jobs in it are ready to be scheduled.
+ * Ordered by priority from `_indexed_ranks`
+ */
+ std::multimap<int64_t, std::unique_ptr<Job>, std::greater<int64_t>> _ready_jobs;
+
+ /// @brief Which job runs which op and function.
+ std::unordered_map<uint32_t, model::SubgraphIndex> _job_to_subgraph;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__
diff --git a/runtimes/neurun/core/src/exec/Execution.cc b/runtimes/neurun/core/src/exec/Execution.cc
new file mode 100644
index 000000000..01114d8c8
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/Execution.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exec/Execution.h"
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+Execution::Execution(const std::shared_ptr<IExecutor> &executor) : _executor{executor}
+{
+ _io_desc.inputs.resize(_executor->model().inputs.size());
+ _io_desc.outputs.resize(_executor->model().outputs.size());
+}
+
+void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t length)
+{
+ const auto input_index = model().inputs.at(index);
+ const auto info = model().operands.at(input_index).info();
+
+ if (length < info.total_size())
+ {
+ throw std::runtime_error{"Too small length"};
+ }
+
+ _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique<InputDesc>(info, buffer, length);
+}
+
+void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &type,
+ const model::Shape &shape, const void *buffer, size_t length)
+{
+ const model::OperandInfo info{shape, type};
+
+ if (length < info.total_size())
+ {
+ throw std::runtime_error{"Too small length"};
+ }
+
+ _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique<InputDesc>(info, buffer, length);
+}
+
+void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length)
+{
+ const auto output_index = model().outputs.at(index);
+ const auto info = model().operands.at(output_index).info();
+
+ if (length < info.total_size())
+ {
+ throw std::runtime_error{"Too small length"};
+ }
+
+ _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length);
+}
+
+void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &type,
+ const model::Shape &shape, void *buffer, size_t length)
+{
+ const model::OperandInfo info{shape, type};
+
+ if (length < info.total_size())
+ {
+ throw std::runtime_error{"Too small length"};
+ }
+
+ _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length);
+}
+
+void Execution::execute()
+{
+ VERBOSE(Execution) << "Start execution" << std::endl;
+
+ _executor->execute(_io_desc);
+
+ VERBOSE(Execution) << "Execution finished" << std::endl;
+}
+
+void Execution::startExecute()
+{
+ VERBOSE(Execution) << "Create asynchronous execution thread" << std::endl;
+
+ _exec_thread = nnfw::cpp14::make_unique<std::thread>(&Execution::execute, this);
+}
+
+void Execution::waitFinish()
+{
+ VERBOSE(Execution) << "Wait to finish execution" << std::endl;
+
+ _exec_thread->join();
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/ExecutionObservers.cc b/runtimes/neurun/core/src/exec/ExecutionObservers.cc
new file mode 100644
index 000000000..e6561fe5c
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ExecutionObservers.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exec/ExecutionObservers.h"
+#include "util/logging.h"
+#include "model/operation/PermuteNode.h"
+#include "exec/IExecutor.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+
+namespace exec
+{
+
+void ProfileObserver::handleBegin(neurun::exec::IExecutor *, const neurun::model::Operation *,
+ const neurun::backend::Backend *backend)
+{
+ _timer = backend->config()->timer();
+ if (_timer == nullptr)
+ throw std::runtime_error("To profile backend timer() method must be implemented");
+ _timer->handleBegin();
+}
+
+void ProfileObserver::handleEnd(IExecutor *exec, const model::Operation *node,
+ const backend::Backend *backend)
+{
+ _timer->handleEnd();
+ const auto timer_res = _timer->getTime();
+
+ auto node_name = node->getName();
+ VERBOSE(ProfileInfo) << "Time for " << node_name << " : " << timer_res << std::endl;
+
+ // fill ExecTime:
+ bool is_quantized = exec->model().operands.at(node->getInputs().at(0)).typeInfo().type() ==
+ model::DataType::QUANT8_ASYMM;
+
+ uint32_t size = 0;
+ for (const auto &input : node->getInputs())
+ {
+ size += exec->model().operands.at(input).info().total_size();
+ }
+ for (const auto &output : node->getOutputs())
+ {
+ size += exec->model().operands.at(output).info().total_size();
+ }
+ if (node_name == "Permute")
+ {
+ auto *permute_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::PermuteNode *>(node);
+ assert(permute_node != nullptr);
+ _et->updatePermuteTime(permute_node->param().input_backend_ctx->backend,
+ permute_node->param().output_backend_ctx->backend, is_quantized, size,
+ timer_res);
+ }
+ else
+ {
+ _et->updateOperationExecTime(backend, node_name, is_quantized, size, timer_res);
+ }
+};
+
+} // namespace exec
+
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/ExecutorBase.cc b/runtimes/neurun/core/src/exec/ExecutorBase.cc
new file mode 100644
index 000000000..827d4dc8b
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ExecutorBase.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutorBase.h"
+#include "util/logging.h"
+namespace neurun
+{
+namespace exec
+{
+
+ExecutorBase::ExecutorBase(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs)
+ : _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
+ _operand_context{operand_context}, _lower_info{std::move(lower_info)},
+ _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
+{
+ // DO NOTHING
+}
+
+std::unique_ptr<ISource> ExecutorBase::source(const model::IOIndex &index,
+ const model::TypeInfo &type, const void *buffer,
+ size_t length)
+{
+ using ::neurun::model::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return source<float>(index, buffer, length);
+ case DataType::INT32:
+ return source<int32_t>(index, buffer, length);
+ case DataType::UINT32:
+ return source<uint32_t>(index, buffer, length);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ return source<uint8_t>(index, buffer, length);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+std::unique_ptr<ISink> ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type,
+ void *buffer, size_t length)
+{
+ using ::neurun::model::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return sink<float>(index, buffer, length);
+ case DataType::INT32:
+ return sink<int32_t>(index, buffer, length);
+ case DataType::UINT32:
+ return sink<uint32_t>(index, buffer, length);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ return sink<uint8_t>(index, buffer, length);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+void ExecutorBase::execute(const IODescription &desc)
+{
+ // For thread-safe, use mutex
+ // TODO: if all used backends on this executor are thread-safe,
+ // do not need to use mutex (otherwise, use mutex)
+ std::lock_guard<std::mutex> lock(_mutex);
+
+ std::vector<std::unique_ptr<ISource>> sources{_model->inputs.size()};
+ std::vector<std::unique_ptr<ISink>> sinks{_model->outputs.size()};
+
+ // Set input(s)
+ for (uint32_t n = 0; n < _model->inputs.size(); ++n)
+ {
+ model::IOIndex input_index{n};
+ model::OperandIndex index{_model->inputs.at(input_index)};
+
+ if (desc.inputs.at(n) == nullptr)
+ {
+ // Optional input
+ continue;
+ }
+
+ const auto operand_li = _lower_info->operand.at(index).get();
+ if (operand_li->def_factors().empty())
+ {
+ // This input is not used (i.e. constant, EX. reshape's axis)
+ continue;
+ }
+
+ const auto &input = *desc.inputs.at(n);
+ sources.at(n) = source(input_index, input.info.typeInfo(), input.buffer, input.size);
+
+ auto setter = [&](::neurun::backend::operand::ITensor &tensor) { sources.at(n)->push(tensor); };
+
+ auto object = _operand_context->at(index);
+
+ object->access(setter);
+ }
+
+ executeImpl();
+
+ // Get output(s)
+ for (uint32_t n = 0; n < _model->outputs.size(); ++n)
+ {
+ neurun::model::IOIndex output_index{n};
+ // Optional output
+ if (desc.outputs.at(n) == nullptr)
+ {
+ continue;
+ }
+ const auto &output = *desc.outputs.at(n);
+ sinks.at(n) = sink(output_index, output.info.typeInfo(), output.buffer, output.size);
+
+ auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); };
+
+ ::neurun::model::OperandIndex index{_model->outputs.at(output_index)};
+ auto object = _operand_context->at(index);
+
+ object->access(getter);
+ }
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/ExecutorBase.h b/runtimes/neurun/core/src/exec/ExecutorBase.h
new file mode 100644
index 000000000..c283e7f61
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ExecutorBase.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_EXECUTOR_BASE_H__
+#define __NEURUN_EXEC_EXECUTOR_BASE_H__
+
+#include <mutex>
+
+#include "Source.h"
+#include "exec/ExecutionObservers.h"
+#include "Sink.h"
+#include "exec/IExecutor.h"
+#include "model/Model.h"
+#include "graph/LowerInfoMap.h"
+#include "backend/IConfig.h"
+#include "model/OperandInfo.h"
+#include "backend/Backend.h"
+#include "compiler/OperandContext.h"
+#include "model/Subgraphs.h"
+#include "model/Subgraph.h"
+#include "backend/ExecTime.h"
+#include "exec/IFunction.h"
+#include "backend/ITensorManager.h"
+#include <list>
+
+namespace neurun
+{
+namespace exec
+{
+
+class ExecutorBase : public IExecutor
+{
+public:
+ ExecutorBase(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs);
+
+ virtual ~ExecutorBase() = default;
+
+ const model::Model &model() override { return *_model; }
+
+ void execute(const IODescription &desc) final;
+
+ // Used only in Dataflow and Parallel Executors
+ void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>> ranks) final
+ {
+ _indexed_ranks = std::move(ranks);
+ };
+
+ virtual void executeImpl(void) = 0;
+
+private:
+ std::unique_ptr<ISource> source(const model::IOIndex &index, const model::TypeInfo &type,
+ const void *buffer, size_t length);
+ std::unique_ptr<ISink> sink(const model::IOIndex &index, const model::TypeInfo &type,
+ void *buffer, size_t length);
+
+ template <typename T>
+ std::unique_ptr<ISource> source(const model::IOIndex &index, const void *buffer, size_t length)
+ {
+ const auto operand_index = _model->inputs.at(index);
+ const auto &operand = _model->operands.at(operand_index);
+
+ const auto tensor = _operand_context->at(operand_index)->ptr();
+ const auto output_layout = tensor->layout();
+ // TODO Set input_layout as frontend model's input layout
+ auto input_layout = model::Layout::NHWC;
+ if ((input_layout == model::Layout::NHWC) && (output_layout == model::Layout::NCHW))
+ {
+ return nnfw::cpp14::make_unique<PermutateSource<T>>(buffer, length, operand.shape());
+ }
+ // TODO Supports NCHW -> NHWC
+
+ return nnfw::cpp14::make_unique<CopySource<T>>(buffer, length, operand.shape());
+ }
+
+ template <typename T>
+ std::unique_ptr<ISink> sink(const model::IOIndex &index, void *buffer, size_t length)
+ {
+ const auto operand_index = _model->outputs.at(index);
+ const auto &operand = _model->operands.at(operand_index);
+ const auto tensor = _operand_context->at(operand_index)->ptr();
+ const auto input_layout = tensor->layout();
+ // TODO Set output_layout as frontend model's output layout
+ auto output_layout = model::Layout::NHWC;
+ if ((input_layout == model::Layout::NCHW) && (output_layout == model::Layout::NHWC))
+ {
+ return nnfw::cpp14::make_unique<PermutateSink<T>>(buffer, length, operand.shape());
+ }
+ // TODO Supports NHWC -> NCHW
+
+ return nnfw::cpp14::make_unique<CopySink<T>>(buffer, length, operand.shape());
+ }
+
+protected:
+ std::list<std::unique_ptr<IExecutionObserver>> _observers;
+ std::shared_ptr<model::OperationIndexMap<int64_t>> _indexed_ranks;
+ std::shared_ptr<const model::Model> _model;
+ std::unique_ptr<model::Subgraphs> _subgraphs;
+ std::shared_ptr<compiler::OperandContext> _operand_context;
+ std::unique_ptr<graph::LowerInfoMap> _lower_info;
+ std::unique_ptr<backend::TensorManagerSet> _tensor_mgrs;
+ std::mutex _mutex;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_EXECUTOR_BASE_H__
diff --git a/runtimes/neurun/core/src/exec/FunctionSequence.cc b/runtimes/neurun/core/src/exec/FunctionSequence.cc
new file mode 100644
index 000000000..00214fcfa
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/FunctionSequence.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FunctionSequence.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+void FunctionSequence::run()
+{
+ for (const auto &function : _functions)
+ {
+ function->run();
+ }
+}
+
+void FunctionSequence::runSync()
+{
+ for (const auto &function : _functions)
+ {
+ function->runSync();
+ }
+}
+
+void FunctionSequence::prepare()
+{
+ for (const auto &function : _functions)
+ {
+ function->prepare();
+ }
+}
+
+void FunctionSequence::append(std::unique_ptr<IFunction> &&function)
+{
+ _functions.push_back(std::move(function));
+}
+
+void FunctionSequence::iterate(const std::function<void(IFunction &)> &fn)
+{
+ for (const auto &func : _functions)
+ {
+ fn(*func);
+ }
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/FunctionSequence.h b/runtimes/neurun/core/src/exec/FunctionSequence.h
new file mode 100644
index 000000000..2ba5c0b08
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/FunctionSequence.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_FUNCTION_SEQUENCE_H__
+#define __NEURUN_EXEC_FUNCTION_SEQUENCE_H__
+
+#include <memory>
+#include <vector>
+#include <functional>
+
+#include "exec/IFunction.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+class FunctionSequence : public IFunction
+{
+public:
+ virtual ~FunctionSequence() = default;
+
+ void run() override;
+ void runSync() override;
+ void prepare() override;
+
+ /**
+ * @brief Appends an IFunction object to the function sequence
+ *
+ * @param function IFunction object to be appended
+ */
+ void append(std::unique_ptr<IFunction> &&function);
+
+ void iterate(const std::function<void(IFunction &)> &fn);
+
+private:
+ std::vector<std::unique_ptr<IFunction>> _functions;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_FUNCTION_SEQUENCE_H__
diff --git a/runtimes/neurun/core/src/exec/Job.cc b/runtimes/neurun/core/src/exec/Job.cc
new file mode 100644
index 000000000..6ce3a84f9
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/Job.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Job.h"
+
+#include <cassert>
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+Job::Job(uint32_t index, IFunction *fn, const backend::Backend *backend)
+ : _index{index}, _fn{fn}, _backend{backend}
+{
+}
+
+void Job::run() { _fn->run(); }
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/Job.h b/runtimes/neurun/core/src/exec/Job.h
new file mode 100644
index 000000000..108f39e99
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/Job.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_JOB_H__
+#define __NEURUN_EXEC_JOB_H__
+
+#include <unordered_set>
+
+#include "exec/IFunction.h"
+#include "model/Index.h"
+#include "model/OperandIndexSequence.h"
+#include "backend/Backend.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+class Job
+{
+public:
+ /**
+ * @brief Constructs a Job object
+ *
+ * @param index Operation index for this job
+ * @param fn compiled code to run this job
+ * @param inputs Input operand list
+ * @param outputs Output operand list
+ */
+ Job(uint32_t index, IFunction *fn, const backend::Backend *backend);
+ /**
+ * @brief Execute the compiled code
+ */
+ void run();
+ /**
+ * @brief Return job index
+ *
+ * @return Job index
+ */
+ uint32_t index() const { return _index; }
+ /**
+ * @brief Return the function to be executed
+ *
+ * @return Pointer of the function
+ */
+ IFunction *fn() { return _fn; }
+
+ /**
+ * @brief Return the backend
+ *
+ * @return Backend
+ */
+ const backend::Backend *backend() { return _backend; }
+
+private:
+ uint32_t _index;
+ IFunction *_fn;
+ const backend::Backend *_backend;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_JOB_H__
diff --git a/runtimes/neurun/core/src/exec/LinearExecutor.cc b/runtimes/neurun/core/src/exec/LinearExecutor.cc
new file mode 100644
index 000000000..35197a257
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/LinearExecutor.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearExecutor.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+void LinearExecutor::executeImpl() { _fn_seq->run(); }
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/LinearExecutor.h b/runtimes/neurun/core/src/exec/LinearExecutor.h
new file mode 100644
index 000000000..58c1ea9ae
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/LinearExecutor.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file LinearExecutor.h
+ * @brief This file contains LinearExecutor class to define and run execution phase
+ */
+
+#ifndef __NEURUN_EXEC_EXECUTOR_H_
+#define __NEURUN_EXEC_EXECUTOR_H_
+
+#include "ExecutorBase.h"
+#include "compiler/Linear.h"
+#include "exec/FunctionSequence.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+/**
+ * @brief Class to handle execution phase. Simple run the sequence of operations that is sorted in
+ * topological order
+ */
+class LinearExecutor final : public ExecutorBase
+{
+public:
+ /**
+ * @brief Construct a new LinearExecutor object
+ * @param[in] plan Execution plan generated by compiled result
+ */
+ LinearExecutor(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
+ std::vector<compiler::Linear::Element> &&elements,
+ const std::shared_ptr<exec::FunctionSequence> &fn_seq)
+ : ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
+ std::move(tensor_mgrs)},
+ _fn_seq{fn_seq}, _elements{std::move(elements)}
+ {
+ }
+
+public:
+ void executeImpl(void) override;
+
+private:
+ std::shared_ptr<exec::FunctionSequence> _fn_seq;
+ std::vector<compiler::Linear::Element> _elements;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_EXECUTOR_H_
diff --git a/runtimes/neurun/core/src/exec/ParallelExecutor.cc b/runtimes/neurun/core/src/exec/ParallelExecutor.cc
new file mode 100644
index 000000000..81d4ac03f
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ParallelExecutor.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ParallelExecutor.h"
+
+#include <cassert>
+
+#include "util/logging.h"
+#include "exec/IFunction.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+class HookFunction : public IFunction
+{
+public:
+ HookFunction(IFunction *fn, std::function<void()> teardown) : _fn{fn}, _teardown{teardown} {}
+
+public:
+ void run() override
+ {
+ // TODO Introduce and call setup() function here
+ _fn->run();
+ _teardown();
+ }
+ void runSync() override { throw("runSync is needed just for profiling in Dataflow executor"); }
+
+private:
+ IFunction *_fn;
+ std::function<void()> _teardown;
+};
+
+void ParallelExecutor::notify(uint32_t finished_job_id)
+{
+ std::unique_lock<std::mutex> lock{_mu_jobs};
+
+ DataflowExecutor::notify(finished_job_id);
+
+ lock.unlock();
+ _cv_jobs.notify_all();
+}
+
+ParallelExecutor::ParallelExecutor(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
+ CodeMap &&code_map)
+ : DataflowExecutor{model,
+ std::move(subgraphs),
+ operand_context,
+ std::move(lower_info),
+ std::move(tensor_mgrs),
+ std::move(code_map)}
+{
+ VERBOSE(ParallelExecutor) << "Constructing Parallel Executor" << std::endl;
+}
+
+void ParallelExecutor::executeImpl()
+{
+ // Init scheduler
+ // TODO Consider to have distinct backend set in LowerInfoMap
+ graph::BackendSet backends;
+ for (auto &itr : _lower_info->operation)
+ {
+ backends.add(itr.second->backend());
+ }
+ _scheduler = nnfw::cpp14::make_unique<ParallelScheduler>(backends);
+
+ assert(noWaitingJobs());
+
+ // Execution setup
+ _waiting_jobs.swap(_finished_jobs); // Move finished jobs to waiting jobs
+
+ for (uint32_t i = 0; i < _waiting_jobs.size(); ++i)
+ {
+ VERBOSE(ParallelExecutor) << i << ": " << _input_info[i] << std::endl;
+ if (_input_info[i] == 0)
+ {
+ emplaceToReadyJobs(i);
+ }
+ }
+ assert(!_ready_jobs.empty()); // Cannot begin if there is no initial jobs
+
+ VERBOSE(ParallelExecutor) << "INITIAL JOBS : " << _ready_jobs.size() << std::endl;
+
+ while (true)
+ {
+ std::unique_lock<std::mutex> lock{_mu_jobs};
+
+ if (_ready_jobs.empty())
+ {
+ _cv_jobs.wait(lock, [this] { return !_ready_jobs.empty() || noWaitingJobs(); });
+ // Check finish condition
+ if (_ready_jobs.empty() && noWaitingJobs())
+ {
+ break;
+ }
+ }
+
+ auto job = std::move(_ready_jobs.begin()->second);
+ _ready_jobs.erase(_ready_jobs.begin());
+
+ lock.unlock();
+
+ VERBOSE(ParallelExecutor) << "Assigning fn #" << job->index() << std::endl;
+
+ auto job_index = job->index();
+ auto teardown = [&, job_index]() { notify(job_index); };
+
+ _scheduler->assign(nnfw::cpp14::make_unique<HookFunction>(job->fn(), teardown), job->backend());
+ _finished_jobs[job_index] = std::move(job);
+ }
+
+ assert(noWaitingJobs());
+
+ // Wait for all the jobs done
+ _scheduler->finish();
+
+ // Reset input info for the next execution
+ _input_info = _initial_input_info;
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/ParallelExecutor.h b/runtimes/neurun/core/src/exec/ParallelExecutor.h
new file mode 100644
index 000000000..7a4673b9c
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ParallelExecutor.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_PARALLEL_EXECUTOR_H__
+#define __NEURUN_EXEC_PARALLEL_EXECUTOR_H__
+
+#include <list>
+#include <queue>
+#include <unordered_map>
+
+#include "FunctionSequence.h"
+#include "Job.h"
+#include "model/OperandIndexSequence.h"
+#include "model/Index.h"
+#include "model/Model.h"
+#include "cpp14/memory.h"
+#include "exec/DataflowExecutor.h"
+#include "ParallelScheduler.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+/**
+ * @brief Class to execute Graph in parallel
+ */
+class ParallelExecutor : public DataflowExecutor
+{
+protected:
+ void notify(uint32_t finished_job_id) override;
+
+public:
+ /**
+ * @brief Constructs a ParallelExecutor object
+ *
+ * @param model Model object
+ * @param operand_context (Only for input/output operand data access)
+ * @param lower_info LowerInfo object (Only to know input/output operands layout)
+ * @param code_map Compiled code map
+ * @param ranks Operation ranks for ordering execution
+ */
+ ParallelExecutor(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs, CodeMap &&code_map);
+
+ void executeImpl() override;
+
+private:
+ std::condition_variable _cv_jobs;
+ std::mutex _mu_jobs;
+ std::unique_ptr<ParallelScheduler> _scheduler;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_PARALLEL_EXECUTOR_H__
diff --git a/runtimes/neurun/core/src/exec/ParallelScheduler.cc b/runtimes/neurun/core/src/exec/ParallelScheduler.cc
new file mode 100644
index 000000000..44f1a5f08
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ParallelScheduler.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ParallelScheduler.h"
+
+#include <cassert>
+
+#include "cpp14/memory.h"
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+WorkQueue::~WorkQueue()
+{
+ {
+ std::unique_lock<std::mutex> lock(_mu);
+ _state = State::FORCE_FINISHING;
+ }
+ _cv.notify_all();
+}
+
+void WorkQueue::operator()()
+{
+ while (true)
+ {
+ std::unique_ptr<IFunction> fn = nullptr;
+
+ {
+ std::unique_lock<std::mutex> lock{_mu};
+ _cv.wait(lock, [this] {
+ return (_state == State::FORCE_FINISHING) || (_state == State::FINISHING) ||
+ (_state == State::ONLINE && !_functions.empty());
+ });
+
+ if (_state == State::FORCE_FINISHING)
+ {
+ assert(_functions.empty() && "Terminating with unfinished jobs");
+ return;
+ }
+ else if (_state == State::FINISHING && _functions.empty())
+ {
+ return;
+ }
+ else
+ {
+ assert(((_state == State::FINISHING) || (_state == State::ONLINE)) && !_functions.empty());
+ fn = std::move(_functions.front());
+ _functions.pop();
+ }
+ }
+
+ assert(fn);
+ fn->run();
+ }
+}
+
+void WorkQueue::enqueue(std::unique_ptr<IFunction> &&fn)
+{
+ {
+ std::unique_lock<std::mutex> lock{_mu};
+ _functions.emplace(std::move(fn));
+ }
+ _cv.notify_one();
+}
+
+void WorkQueue::terminate()
+{
+ {
+ std::unique_lock<std::mutex> lock{_mu};
+ _state = State::FORCE_FINISHING;
+ }
+ _cv.notify_all();
+}
+
+void WorkQueue::finish()
+{
+ {
+ std::unique_lock<std::mutex> lock{_mu};
+ _state = State::FINISHING;
+ }
+ _cv.notify_all();
+}
+
+uint32_t WorkQueue::numJobsInQueue()
+{
+ std::unique_lock<std::mutex> lock{_mu};
+ return _functions.size();
+}
+
+ThreadPool::ThreadPool(uint32_t num_threads)
+{
+ assert(num_threads >= 1);
+
+ for (uint32_t i = 0; i < num_threads; i++)
+ {
+ _threads.emplace_back(std::ref(_worker));
+ }
+}
+
+ThreadPool::~ThreadPool()
+{
+ if (!_threads.empty())
+ {
+ _worker.terminate();
+ join();
+ }
+}
+
+void ThreadPool::enqueue(std::unique_ptr<IFunction> &&fn) { _worker.enqueue(std::move(fn)); }
+
+uint32_t ThreadPool::numJobsInQueue() { return _worker.numJobsInQueue(); }
+
+void ThreadPool::join()
+{
+ for (auto &thread : _threads)
+ {
+ thread.join();
+ }
+ _threads.clear();
+}
+
+void ThreadPool::finish()
+{
+ _worker.finish();
+ join();
+}
+
+ParallelScheduler::ParallelScheduler(const graph::BackendSet &backends)
+{
+ assert(!backends.empty());
+
+ for (auto backend : backends)
+ {
+ _thread_pools[backend] = nnfw::cpp14::make_unique<ThreadPool>();
+ }
+}
+
+void ParallelScheduler::assign(std::unique_ptr<IFunction> &&fn, const backend::Backend *backend)
+{
+ assert(!_thread_pools.empty());
+
+ _thread_pools.at(backend)->enqueue(std::move(fn));
+}
+
+void ParallelScheduler::finish()
+{
+ for (auto &itr : _thread_pools)
+ {
+ itr.second->finish();
+ }
+}
+
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/ParallelScheduler.h b/runtimes/neurun/core/src/exec/ParallelScheduler.h
new file mode 100644
index 000000000..9660478e8
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ParallelScheduler.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_PARALLEL_SCHEDULER_H__
+#define __NEURUN_EXEC_PARALLEL_SCHEDULER_H__
+
+#include <unordered_map>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <memory>
+#include <queue>
+#include <vector>
+#include <unordered_set>
+
+#include "exec/IFunction.h"
+#include "graph/BackendSet.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+// TODO Extract this class to a separate file
+class WorkQueue
+{
+public:
+ enum class State
+ {
+ ONLINE,
+ FINISHING,
+ FORCE_FINISHING
+ };
+
+public:
+ /**
+ * @brief Create WorkQueue object
+ */
+ WorkQueue() = default;
+ /**
+ * @brief Destroy WorkQueue object
+ */
+ ~WorkQueue();
+ /**
+ * @brief Thread entry function
+ */
+ void operator()();
+ /**
+ * @brief Push the given Task to the job queue
+ *
+ * @param fn Function to be executed(a job)
+ */
+ void enqueue(std::unique_ptr<IFunction> &&fn);
+ /**
+ * @brief Flag as terminating so all the worker threads can terminate
+ */
+ void terminate();
+ /**
+ * @brief Flag as terminating so all the worker threads can terminate
+ */
+ void finish();
+ /**
+ * @brief Check if it has pending jobs. Even if this returns fals, WorkQueue threads may be still
+ * running
+ *
+ * @return true if the job queue not empty otherwise false
+ */
+ uint32_t numJobsInQueue();
+
+private:
+ State _state{State::ONLINE};
+ std::queue<std::unique_ptr<IFunction>> _functions;
+ std::mutex _mu;
+ std::condition_variable _cv;
+};
+
+// TODO Extract this class to a separate file
+class ThreadPool
+{
+public:
+ /**
+ * @brief Coustruct ThreadPool object
+ *
+ * @param num_threads Number of threads
+ */
+ ThreadPool(uint32_t num_threads = 1);
+ /**
+ * @brief Destroy ThreadPool object
+ */
+ ~ThreadPool();
+ /**
+ * @brief Enqueue a function
+ *
+ * @param fn A function to be queued
+ */
+ void enqueue(std::unique_ptr<IFunction> &&fn);
+ /**
+ * @brief Get number of jobs in worker's queue
+ *
+ * @return Number of jobs
+ */
+ uint32_t numJobsInQueue();
+
+ /**
+ * @brief Block until all jobs are finished
+ */
+ void finish();
+
+private:
+ void join();
+
+private:
+ WorkQueue _worker;
+ std::vector<std::thread> _threads;
+};
+
+class ParallelScheduler
+{
+public:
+ /**
+ * @brief Constructs ParallelScheduler object
+ *
+ * @param backends Backend set
+ */
+ ParallelScheduler(const graph::BackendSet &backends);
+ /**
+ * @brief Assign a task to the given backend
+ *
+ * @param[in] fn Function to be assigned
+ * @param[in] fn Target backend
+ */
+ void assign(std::unique_ptr<IFunction> &&fn, const backend::Backend *backend);
+ /**
+ * @brief Block until all jobs are finished
+ */
+ void finish();
+
+private:
+ std::unordered_map<const backend::Backend *, std::unique_ptr<ThreadPool>> _thread_pools;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_PARALLEL_SCHEDULER_H__
diff --git a/runtimes/neurun/core/src/exec/Sink.h b/runtimes/neurun/core/src/exec/Sink.h
new file mode 100644
index 000000000..7ec3efa22
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/Sink.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_SINK_H__
+#define __NEURUN_EXEC_SINK_H__
+
+#include <cassert>
+
+#include "cpp14/memory.h"
+#include "util/feature/nhwc/View.h"
+#include "util/feature/nchw/View.h"
+#include "util/Utils.h"
+#include <misc/feature/IndexIterator.h>
+
+namespace neurun
+{
+namespace exec
+{
+struct ISink
+{
+ virtual ~ISink() = default;
+
+ virtual void pull(::neurun::backend::operand::ITensor &tensor) const = 0;
+};
+
+// Create second lever inheritance: the first lever is used as a reference type in use-case places
+template <typename T> class ITemplSink : public ISink
+{
+public:
+ ITemplSink(void *output_buffer, const size_t &output_size, const model::Shape &shape,
+ const bool copy)
+ : _output_buffer{reinterpret_cast<T *>(output_buffer)}, _output_size{output_size},
+ _shape{shape}, _copy{copy}
+ {
+ }
+
+protected:
+ void pullUnif(neurun::backend::operand::ITensor &tensor) const
+ {
+ auto input_buffer = tensor.buffer();
+ auto rank = _shape.rank();
+
+ if (!tensor.has_padding() && rank < 4 + _copy)
+ {
+ memcpy(_output_buffer, input_buffer, _output_size);
+ return;
+ }
+
+ switch (rank)
+ {
+ case 0:
+ case 1:
+ {
+ memcpy(_output_buffer, input_buffer, _output_size);
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = _shape.dim(1);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ neurun::util::Coordinates coords{i, 0};
+ memcpy(_output_buffer + i * copy_len, input_buffer + tensor.calcOffset(coords),
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t dim1 = _shape.dim(1);
+ const int32_t dim2 = _shape.dim(2);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ neurun::util::Coordinates coords{i, j, 0};
+ memcpy(_output_buffer + i * dim1 * dim2 + j * dim2,
+ input_buffer + tensor.calcOffset(coords), dim2 * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ if (_copy)
+ {
+ const int32_t dim1 = _shape.dim(1);
+ const int32_t dim2 = _shape.dim(2);
+ const int32_t dim3 = _shape.dim(3);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ for (auto k = 0; k < _shape.dim(2); ++k)
+ {
+ neurun::util::Coordinates coords{i, j, k, 0};
+ memcpy(_output_buffer + i * dim1 * dim2 * dim3 + j * dim2 * dim3 + k * dim3,
+ input_buffer + tensor.calcOffset(coords), dim3 * sizeof(T));
+ }
+ }
+ }
+ }
+ else
+ {
+ // TODO Support from nhwc to nchw
+ auto feature = _shape.asFeature(model::Layout::NHWC);
+
+ const util::feature::nchw::View<T> from{&tensor};
+ util::feature::nhwc::View<T> into{feature, _output_buffer, _output_size};
+
+ ::nnfw::misc::feature::iterate(feature)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI");
+ break;
+ }
+ }
+
+private:
+ T *_output_buffer;
+ const size_t _output_size;
+ const model::Shape _shape;
+ const bool _copy;
+};
+
+template <typename T> class PermutateSink final : public ITemplSink<T>
+{
+public:
+ PermutateSink(void *output_buffer, const size_t &output_size, const model::Shape &shape)
+ : ITemplSink<T>(output_buffer, output_size, shape, false)
+ {
+ }
+
+public:
+ void pull(neurun::backend::operand::ITensor &tensor) const override
+ {
+ ITemplSink<T>::pullUnif(tensor);
+ }
+};
+
+// Only supports NHWC format front-end(NNAPI) now
+template <typename T> class CopySink final : public ITemplSink<T>
+{
+public:
+ CopySink(void *output_buffer, const size_t &output_size, const model::Shape &shape)
+ : ITemplSink<T>(output_buffer, output_size, shape, true)
+ {
+ }
+
+public:
+ void pull(neurun::backend::operand::ITensor &tensor) const override
+ {
+ ITemplSink<T>::pullUnif(tensor);
+ }
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_SINK_H__
diff --git a/runtimes/neurun/core/src/exec/Source.h b/runtimes/neurun/core/src/exec/Source.h
new file mode 100644
index 000000000..5b914f714
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/Source.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_SOURCE_H__
+#define __NEURUN_EXEC_SOURCE_H__
+
+#include <cassert>
+
+#include "cpp14/memory.h"
+#include "util/feature/nchw/View.h"
+#include "util/feature/nhwc/Reader.h"
+#include "util/Utils.h"
+#include <misc/feature/IndexIterator.h>
+#include "model/Shape.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+struct ISource
+{
+ virtual ~ISource() = default;
+
+ virtual void push(::neurun::backend::operand::ITensor &tensor) const = 0;
+};
+
+// Create second lever inheritance: the first lever is used as a reference type in use-case places
+template <typename T> class ITemplSource : public ISource
+{
+public:
+ ITemplSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape,
+ const bool copy)
+ : _input_buffer{reinterpret_cast<const T *>(input_buffer)}, _input_size{input_size},
+ _shape{shape}, _copy(copy)
+ {
+ }
+
+ virtual void push(::neurun::backend::operand::ITensor &tensor) const = 0;
+
+protected:
+ void pushUnif(neurun::backend::operand::ITensor &tensor) const
+ {
+ auto output_buffer = tensor.buffer();
+ auto rank = _shape.rank();
+
+ if (!tensor.has_padding() && rank < 4 + _copy)
+ {
+ memcpy(output_buffer, _input_buffer, _input_size);
+ return;
+ }
+
+ switch (rank)
+ {
+ case 0:
+ case 1:
+ {
+ memcpy(output_buffer, _input_buffer, _input_size);
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = _shape.dim(1);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ neurun::util::Coordinates coords{i, 0};
+ memcpy(output_buffer + tensor.calcOffset(coords), _input_buffer + i * copy_len,
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t dim1 = _shape.dim(1);
+ const int32_t dim2 = _shape.dim(2);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ neurun::util::Coordinates coords{i, j, 0};
+ memcpy(output_buffer + tensor.calcOffset(coords),
+ _input_buffer + i * dim1 * dim2 + j * dim2, dim2 * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ if (_copy)
+ {
+ const int32_t dim1 = _shape.dim(1);
+ const int32_t dim2 = _shape.dim(2);
+ const int32_t dim3 = _shape.dim(3);
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ for (auto k = 0; k < _shape.dim(2); ++k)
+ {
+ neurun::util::Coordinates coords{i, j, k, 0};
+ memcpy(output_buffer + tensor.calcOffset(coords),
+ _input_buffer + i * dim1 * dim2 * dim3 + j * dim2 * dim3 + k * dim3,
+ dim3 * sizeof(T));
+ }
+ }
+ }
+ }
+ else
+ {
+ auto feature = _shape.asFeature(model::Layout::NHWC);
+
+ const util::feature::nhwc::Reader<T> from{feature, _input_buffer, _input_size};
+ util::feature::nchw::View<T> into{&tensor};
+
+ ::nnfw::misc::feature::iterate(feature)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI");
+ break;
+ }
+ }
+
+private:
+ const T *_input_buffer;
+ const size_t _input_size;
+ const model::Shape _shape;
+ const bool _copy;
+};
+
+template <typename T> class PermutateSource final : public ITemplSource<T>
+{
+public:
+ PermutateSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape)
+ : ITemplSource<T>(input_buffer, input_size, shape, false)
+ {
+ }
+
+public:
+ void push(neurun::backend::operand::ITensor &tensor) const override
+ {
+ // do NHWC_TO_NCHW permutation
+ ITemplSource<T>::pushUnif(tensor);
+ }
+};
+
+// Only supports NHWC format front-end(NNAPI) now
+template <typename T> class CopySource final : public ITemplSource<T>
+{
+public:
+ CopySource(const void *input_buffer, const size_t &input_size, const model::Shape &shape)
+ : ITemplSource<T>(input_buffer, input_size, shape, true)
+ {
+ }
+
+public:
+ void push(neurun::backend::operand::ITensor &tensor) const override
+ {
+ ITemplSource<T>::pushUnif(tensor);
+ }
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_SOURCE_H__
diff --git a/runtimes/neurun/core/src/exec/interp/Buffer.h b/runtimes/neurun/core/src/exec/interp/Buffer.h
new file mode 100644
index 000000000..3528e0819
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Buffer.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Buffer.h
+ * @brief This file contains Buffer interface and InternalBuffer, ExternalBuffer class
+ */
+#ifndef __NEURUN_EXEC_INTERP_BUFFER_H__
+#define __NEURUN_EXEC_INTERP_BUFFER_H__
+
+#include <cpp14/memory.h>
+
+#include "model/Data.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Interface for writable data area
+ */
+class Buffer : public model::Data
+{
+public:
+ /**
+ * @brief Return writable pointer for data area
+ * @return Writable pointer
+ */
+ virtual uint8_t *baseWritable(void) const = 0;
+};
+
+/**
+ * @brief Class for internally allocated data area
+ */
+class InternalBuffer final : public Buffer
+{
+public:
+ InternalBuffer(size_t size) : _base{nnfw::cpp14::make_unique<uint8_t[]>(size)}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base.get(); }
+ uint8_t *baseWritable(void) const override { return _base.get(); }
+
+private:
+ std::unique_ptr<uint8_t[]> _base;
+ size_t _size;
+};
+
+/**
+ * @brief Class for data area from outside
+ */
+class ExternalBuffer final : public Buffer
+{
+public:
+ ExternalBuffer(uint8_t *base, size_t size) : _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base; }
+ uint8_t *baseWritable(void) const override { return _base; }
+
+private:
+ uint8_t *_base;
+ size_t _size;
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_BUFFER_H__
diff --git a/runtimes/neurun/core/src/exec/interp/ExecEnv.h b/runtimes/neurun/core/src/exec/interp/ExecEnv.h
new file mode 100644
index 000000000..c270d723c
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/ExecEnv.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ExecEnv.h
+ * @brief This file contains ExecEnv to access interpreter tensor and execution status
+ */
+#ifndef __NEURUN_EXEC_INTERP_EXEC_ENV_H_
+#define __NEURUN_EXEC_INTERP_EXEC_ENV_H_
+
+#include <unordered_set>
+
+#include "model/Model.h"
+#include "Tensor.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Class to gather interpreter execution environment
+ * Each interpreter instance own execution environment
+ */
+class ExecEnv
+{
+public:
+ /**
+ * @brief Construct a new Exec Env object (deleted)
+ */
+ ExecEnv(void) = delete;
+ /**
+ * @brief Construct a new ExecEnv object
+ * @param[in] model Model to execute by interpreter
+ */
+ ExecEnv(const std::shared_ptr<const model::Model> &model) : _model{model}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return model to execute
+ * @return Model
+ */
+ const model::Model &model(void) const { return *_model; }
+ /**
+ * @brief Assign tensor to environment which have allocated or assigned buffer
+ * @param[in] index Tensor index
+ * @param[in] tensor Tensor
+ */
+ void assignTensor(const model::OperandIndex index, std::shared_ptr<ITensor> tensor)
+ {
+ assert(tensor->bufferRO() != nullptr);
+ _tensors.emplace(index, tensor);
+ }
+
+ /**
+ * @brief Return tensor pointer in environment
+ * @param[in] index Tensor index
+ * @return Tensor pointer
+ */
+ const ITensor *tensorAt(const model::OperandIndex index) const
+ {
+ return _tensors.at(index).get();
+ }
+
+ /**
+ * @brief Check environment contains tensor
+ * @param[in] index Tensor index
+ * @return @c true if environment contain tensor, otherwise @c false
+ */
+ bool contains(const model::OperandIndex index) const
+ {
+ return (_tensors.find(index) != _tensors.end());
+ }
+
+ /**
+ * @brief Allocate tensor using operand info
+ * @param[in] index Tensor index
+ * @param[in] info Operand info
+ * @note If already allocated, just return
+ * @TODO More smart allocation policy
+ */
+ void allocateIfNeeded(const model::OperandIndex index, const model::OperandInfo &info)
+ {
+ // already allocated, or constant
+ if (contains(index))
+ {
+ return;
+ }
+
+ auto tensor = std::make_shared<Tensor>(info);
+ tensor->setBuffer(std::make_shared<InternalBuffer>(tensor->total_size()));
+ assignTensor(index, tensor);
+ _buffers.insert(index);
+ }
+
+ /**
+ * @brief Allocate read-only tensor and share data with other tensor
+ * @param[in] index Tensor index
+ * @param[in] info Operand info
+ * @param[in] index_to_share Tensor index that have data to share
+ */
+ void allocateAndShareIfNeeded(const model::OperandIndex index, const model::OperandInfo &info,
+ const model::OperandIndex index_to_share)
+ {
+ if (!contains(index_to_share))
+ {
+ throw std::runtime_error{"Cannot find tensor to share data"};
+ }
+
+ // already allocated
+ if (contains(index))
+ {
+ return;
+ }
+ else
+ {
+ auto tensor = std::make_shared<ROTensor>(info);
+ tensor->setData(tensorAt(index_to_share)->shareData());
+ assignTensor(index, tensor);
+ _buffers.insert(index);
+ }
+ }
+
+ /**
+ * @brief Free buffer if allocated by allocateIfNeed
+ * @param[in] index Tensor index
+ * @note If allocated by outside, just return
+ */
+ void freeIfAllocated(const model::OperandIndex index)
+ {
+ if (_buffers.find(index) != _buffers.end())
+ {
+ _tensors.at(index)->releaseData();
+ }
+ }
+
+private:
+ std::shared_ptr<const model::Model> _model;
+ // Tensor map to use in interpreter
+ // It should map tensors that have allocated or assigned buffer pointer
+ std::unordered_map<model::OperandIndex, std::shared_ptr<ITensor>> _tensors;
+ // Tensors allocated by allocateIfNeed (buffer)
+ std::unordered_set<model::OperandIndex> _buffers;
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_EXEC_ENV_H_
diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.cc b/runtimes/neurun/core/src/exec/interp/ExecManager.cc
new file mode 100644
index 000000000..96f503eea
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/ExecManager.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecManager.h"
+#include "ExecEnv.h"
+#include "Interpreter.h"
+
+#include "util/logging.h"
+
+#include <cpp14/memory.h>
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+void ExecManager::execute(const IODescription &desc)
+{
+ /************************************************************************
+ * Prepare execution model (submodel)
+ It may execute divided model
+ but now consider model inference is done at interpreter
+ ***********************************************************************/
+ model::OperandIndexMap<std::shared_ptr<ITensor>> tensor_map;
+
+ for (uint32_t n = 0; n < _model->inputs.size(); n++)
+ {
+ neurun::model::IOIndex index{n};
+ const auto input_index = _model->inputs.at(index);
+ const auto &input = *desc.inputs.at(n);
+
+ auto input_tensor = std::make_shared<ROTensor>(input.info);
+ input_tensor->setData(std::make_shared<const model::ExternalData>(
+ reinterpret_cast<const uint8_t *>(input.buffer), input.size));
+ tensor_map[input_index] = input_tensor;
+ }
+
+ for (uint32_t n = 0; n < _model->outputs.size(); n++)
+ {
+ neurun::model::IOIndex index{n};
+ const auto output_index = _model->outputs.at(index);
+ const auto &output = *desc.outputs.at(n);
+
+ auto output_tensor = std::make_shared<Tensor>(output.info);
+ output_tensor->setBuffer(
+ std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(output.buffer), output.size));
+ tensor_map[output_index] = output_tensor;
+ }
+
+ /************************************************************************
+ * Prepare execution environment
+ Execution environment will be assigned to invoked interpreter instance
+ ***********************************************************************/
+
+ std::unique_ptr<ExecEnv> interp_env = nnfw::cpp14::make_unique<ExecEnv>(_model);
+
+ // Assign input tensor into interpreter execution environment
+ for (auto index : _model->inputs)
+ {
+ if (tensor_map.find(index) != tensor_map.end())
+ {
+ VERBOSE(INTERPRETER) << "Assign input tensor. operand index:" << index.value() << std::endl;
+ interp_env->assignTensor(index, tensor_map.at(index));
+ }
+ }
+
+ // Assign output tensor into interpreter execution environment
+ for (auto index : _model->outputs)
+ {
+ if (tensor_map.find(index) != tensor_map.end())
+ {
+ VERBOSE(INTERPRETER) << "Assign output tensor. operand index: " << index.value() << std::endl;
+ interp_env->assignTensor(index, tensor_map.at(index));
+ }
+ }
+
+ // Allocate constant tensor
+ _model->operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ if (obj.isConstant())
+ {
+ VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value()
+ << std::endl;
+
+ auto const_tensor = std::make_shared<ROTensor>(obj.info());
+ // Assume that interpreter's tensor layout is same with model (NHWC)
+ const_tensor->setData(
+ std::make_shared<model::ExternalData>(obj.data().base(), obj.info().total_size()));
+ interp_env->assignTensor(ind, const_tensor);
+ }
+ });
+
+ /*****************************************************************************
+ * Invoke interpreter
+ ****************************************************************************/
+
+ Interpreter interp(std::move(interp_env));
+ interp.run();
+
+ /*****************************************************************************
+ * Invoked interpreter run is finished
+ ****************************************************************************/
+
+ // If interpreter execute submodel
+ // 1. Get tensor output of submodel into tensor_map to save result
+ // 2. Generate new ExecEnv for next interpretation
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.h b/runtimes/neurun/core/src/exec/interp/ExecManager.h
new file mode 100644
index 000000000..77486dcaf
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/ExecManager.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ExecManager.h
+ * @brief This file contains ExecManager class\n
+ * to manage interpreter execution and environment
+ */
+#ifndef __NEURUN_EXEC_INTERP_EXEC_MANAGER_H_
+#define __NEURUN_EXEC_INTERP_EXEC_MANAGER_H_
+
+#include "model/OperandIndexMap.h"
+#include "model/OperationIndexMap.h"
+#include "exec/IExecutor.h"
+#include "Tensor.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Class to execute model using interpreter
+ */
+class ExecManager final : public IExecutor
+{
+public:
+ ExecManager(const std::shared_ptr<const model::Model> &model) : _model{model}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return graph model
+ * @return Graph model
+ */
+ const model::Model &model() override { return *_model; }
+ void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>>) override{
+ // Not implemented
+ };
+ /**
+ * @brief Start execution
+ * @note It should be called after setting input and output buffer
+ */
+ void execute(const IODescription &desc) final;
+
+private:
+ std::shared_ptr<const model::Model> _model;
+ model::OperandIndexMap<std::shared_ptr<ITensor>> _tensor_map;
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_EXEC_MANAGER_H_
diff --git a/runtimes/neurun/core/src/exec/interp/Interpreter.cc b/runtimes/neurun/core/src/exec/interp/Interpreter.cc
new file mode 100644
index 000000000..81de27c36
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Interpreter.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Interpreter.h"
+
+#include <stack>
+#include <unordered_set>
+
+#include "Registration.h"
+
+#include "model/OperandIndexMap.h"
+#include "util/logging.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+// TODO more structured execution kernel implementation
+// TODO use cker for execution
+// TODO divide tensor prepare and execution
+// TODO introduce memory manager (buffer allocate and free)
+class OperationExecutor : model::OperationVisitor
+{
+public:
+#define OP(InternalName, IsNnApi) InternalName,
+ enum class NodeName
+ {
+#include "model/Operations.lst"
+ };
+#undef OP
+
+public:
+ OperationExecutor(ExecEnv *env) : _env{env}
+ {
+ _kernels[NodeName::AddNode] = getAddNode();
+ _kernels[NodeName::Conv2DNode] = getConv2DNode();
+ _kernels[NodeName::MaxPool2DNode] = getMaxPool2DNode();
+ _kernels[NodeName::ConcatNode] = getConcatNode();
+ _kernels[NodeName::AvgPool2DNode] = getAvgPool2DNode();
+ _kernels[NodeName::FullyConnectedNode] = getFullyConnectedNode();
+ _kernels[NodeName::SoftmaxNode] = getSoftMaxNode();
+ _kernels[NodeName::ReshapeNode] = getReshapeNode();
+ _kernels[NodeName::DepthwiseConv2DNode] = getDepthwiseConvNode();
+ }
+
+ void execute(const model::OperationIndex &idx)
+ {
+ const auto nodeName = _env->model().operations.at(idx).getName();
+ VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName
+ << " operation (id: " << idx.value() << ")" << std::endl;
+ _env->model().operations.at(idx).accept(*this);
+ }
+
+private:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &node) override \
+ { \
+ if (_kernels[NodeName::InternalName]->prepare != nullptr) \
+ { \
+ _kernels[NodeName::InternalName]->prepare(_env, node); \
+ } \
+ _kernels[NodeName::InternalName]->invoke(_env, node); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+private:
+ ExecEnv *_env;
+ std::unordered_map<NodeName, OpKernel *> _kernels;
+};
+
+void Interpreter::run()
+{
+ VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl;
+
+ // operand_stack: save operands prepared to use
+ std::stack<model::OperandIndex> operand_stack;
+
+ // Note: We should push input first, then constant.
+ // We use use-def for find operators ready to execution,
+ // but Use-Def cannot handle parameters (maybe constant, but not always)
+ // Note: If all model inputs are constant, it may not work (depend on tensors' order).
+ // But that scenario may not exist
+ for (auto ind : _env->model().inputs)
+ {
+ VERBOSE(INTERPRETER) << "Input: Push to operand stack " << ind.value() << std::endl;
+
+ operand_stack.push(ind);
+ }
+
+ _env->model().operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ if (obj.isConstant())
+ {
+ VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
+
+ operand_stack.push(ind);
+ }
+ });
+
+ // Execution
+ std::unordered_set<model::OperandIndex> ready_check;
+ std::unordered_set<model::OperationIndex> executed;
+ OperationExecutor executor{_env.get()};
+ while (!operand_stack.empty())
+ {
+ const auto current_operand_index = operand_stack.top();
+ operand_stack.pop();
+ VERBOSE(INTERPRETER) << "Poped operand " << current_operand_index.value()
+ << " is checked ready to use" << std::endl;
+
+ assert(ready_check.find(current_operand_index) == ready_check.end());
+ ready_check.insert(current_operand_index);
+
+ // Find prepared operations by scan use of current operand
+ std::stack<model::OperationIndex> operation_stack;
+ const auto use_operators = _env->model().operands.at(current_operand_index).getUses();
+ for (auto use_operator : use_operators.list())
+ {
+ // Assumption: all parameters are ready to use
+ bool operator_ready = true;
+ for (auto input_index : _env->model().operations.at(use_operator).getInputs())
+ {
+ if (ready_check.find(input_index) == ready_check.end())
+ {
+ operator_ready = false;
+ break;
+ }
+ }
+
+ if (operator_ready)
+ {
+ VERBOSE(INTERPRETER) << "Ready to execute operation " << use_operator.value() << std::endl;
+ operation_stack.push(use_operator);
+ }
+ }
+
+ while (!operation_stack.empty())
+ {
+ const auto current_operation_index = operation_stack.top();
+ operation_stack.pop();
+ VERBOSE(INTERPRETER) << "Poped operation: " << current_operation_index.value() << "("
+ << _env->model().operations.at(current_operation_index).getName() << ")"
+ << std::endl;
+
+ // execution
+ // 1. Prepare output tensor
+ // 2. Call operation kernel
+ executor.execute(current_operation_index);
+ executed.insert(current_operation_index);
+
+ // 3. Push each output into operand stack
+ const auto def_operands = _env->model().operations.at(current_operation_index).getOutputs();
+ for (auto def_operand : def_operands)
+ {
+ VERBOSE(INTERPRETER) << "Buffer: Push to operand stack " << def_operand.value()
+ << std::endl;
+ operand_stack.push(def_operand);
+ }
+
+ // 4. Free if lifetime of buffer operands used by input is finished
+ for (auto input_index : _env->model().operations.at(current_operation_index).getInputs())
+ {
+ const auto use_operators = _env->model().operands.at(input_index).getUses();
+ bool dead_buffer = true;
+ for (auto use_operator : use_operators.list())
+ {
+ if (executed.find(use_operator) == executed.end())
+ {
+ dead_buffer = false;
+ break;
+ }
+ }
+
+ if (dead_buffer)
+ {
+ _env->freeIfAllocated(input_index);
+ }
+ }
+ }
+ }
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/Interpreter.h b/runtimes/neurun/core/src/exec/interp/Interpreter.h
new file mode 100644
index 000000000..1b73592b3
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Interpreter.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Interpreter.h
+ * @brief This file contains Interpreter class for interpretation
+ */
+#ifndef __NEURUN_EXEC_INTERP_INTERPRETER_H__
+#define __NEURUN_EXEC_INTERP_INTERPRETER_H__
+
+#include "ExecEnv.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Class for interpretation
+ */
+class Interpreter
+{
+
+public:
+ /**
+ * @brief Construct a new Interpreter object (deleted)
+ */
+ Interpreter() = delete;
+ /**
+ * @brief Construct a new Interpreter object
+ * @param[in] env Execution environment variable for interpreter object
+ */
+ Interpreter(std::unique_ptr<ExecEnv> env) : _env{std::move(env)}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Run interpreter until there is no operation to execute
+ */
+ void run();
+
+private:
+ std::unique_ptr<ExecEnv> _env;
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_INTERPRETER_H__
diff --git a/runtimes/neurun/core/src/exec/interp/Registration.h b/runtimes/neurun/core/src/exec/interp/Registration.h
new file mode 100644
index 000000000..37c591f9d
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Registration.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_INTERP_REGISTRATION_H__
+#define __NEURUN_EXEC_INTERP_REGISTRATION_H__
+
+#include "ExecEnv.h"
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+struct OpKernel
+{
+ std::function<void(ExecEnv *, const model::Operation &)> prepare;
+ std::function<void(const ExecEnv *, const model::Operation &)> invoke;
+};
+
+// Defined in operations/ directory
+OpKernel *getAddNode();
+OpKernel *getConv2DNode();
+OpKernel *getMaxPool2DNode();
+OpKernel *getConcatNode();
+OpKernel *getAvgPool2DNode();
+OpKernel *getFullyConnectedNode();
+OpKernel *getSoftMaxNode();
+OpKernel *getDepthwiseConvNode();
+OpKernel *getReshapeNode();
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_REGISTRATION_H__
diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.cc b/runtimes/neurun/core/src/exec/interp/Tensor.cc
new file mode 100644
index 000000000..becb73786
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Tensor.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tensor.h"
+
+#define NO_USE(a) (void)(a)
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+size_t ROTensor::calcOffset(const neurun::util::Coordinates &coords) const
+{
+ NO_USE(coords);
+ throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
+}
+
+size_t Tensor::calcOffset(const neurun::util::Coordinates &coords) const
+{
+ NO_USE(coords);
+ throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
+}
+
+model::Layout ROTensor::layout() const
+{
+ // TODO Changes to return frontend layout
+ return model::Layout::NHWC;
+}
+
+model::Layout Tensor::layout() const
+{
+ // TODO Changes to return frontend layout
+ return model::Layout::NHWC;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.h b/runtimes/neurun/core/src/exec/interp/Tensor.h
new file mode 100644
index 000000000..c8237de1e
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Tensor.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Tensor.h
+ * @brief This file contains ITensor interface, ROTensor class, and Tensor class
+ */
+#ifndef __NEURUN_EXEC_INTERP_TENSOR_H__
+#define __NEURUN_EXEC_INTERP_TENSOR_H__
+
+#include "Buffer.h"
+
+#include "model/OperandInfo.h"
+#include "backend/operand/ITensor.h"
+#include "model/Layout.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Interface to handle Tensor in interpreter
+ */
+class ITensor : public backend::operand::ITensor
+{
+public:
+ virtual ~ITensor() = default;
+
+public:
+ virtual uint8_t *buffer() const = 0;
+ /**
+ * @brief Return shared pointer for buffer
+ * @return Buffer shared pointer
+ */
+ virtual std::shared_ptr<const Buffer> shareBuffer() const = 0;
+ /**
+ * @brief Return read-only buffer pointer
+ * @return Read-only buffer pointer
+ */
+ virtual const uint8_t *bufferRO() const = 0;
+ /**
+ * @brief Return shared pointer for data
+ * @return Data shared pointer
+ */
+ virtual std::shared_ptr<const model::Data> shareData() const = 0;
+ /**
+ * @brief Set internal/external buffer
+ * @param[in] buffer Buffer pointer
+ */
+ virtual void setBuffer(std::shared_ptr<const Buffer> buffer) = 0;
+ /**
+ * @brief Set data reference (including constant, input)
+ * @param[in] data Data pointer
+ */
+ virtual void setData(std::shared_ptr<const model::Data> data) = 0;
+ virtual void releaseData() = 0;
+
+ virtual size_t total_size() const = 0;
+ virtual size_t dimension(size_t index) const = 0;
+ virtual size_t num_dimensions() const = 0;
+ virtual size_t calcOffset(const util::Coordinates &coords) const = 0;
+
+ virtual bool has_padding() const = 0;
+ /**
+ * @brief Return data type of tensor
+ * @return Data type of tensor
+ */
+ virtual model::DataType data_type() const = 0;
+ /**
+ * @brief Return TensorInfo
+ * @return TensorInfo
+ */
+ virtual const model::OperandInfo &tensorInfo() const = 0;
+ /**
+ * @brief Return number of elements
+ * @return Number of elements
+ */
+ virtual uint64_t num_elements() const = 0;
+};
+
+/**
+ * @brief Class to handle tensor in interpreter as read-only
+ */
+class ROTensor final : public ITensor
+{
+public:
+ ROTensor() = delete;
+ ROTensor(const model::OperandInfo &info) : _info(info)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; }
+ std::shared_ptr<const Buffer> shareBuffer() const override
+ {
+ throw std::runtime_error{"Read only tensor"};
+ }
+ const uint8_t *bufferRO() const override { return _data->base(); }
+ std::shared_ptr<const model::Data> shareData() const override { return _data; }
+ void setBuffer(std::shared_ptr<const Buffer> buffer) override { _data = buffer; }
+ void setData(std::shared_ptr<const model::Data> data) override { _data = data; }
+ void releaseData() override { _data = nullptr; }
+
+ size_t total_size() const override { return _info.total_size(); }
+ size_t dimension(size_t index) const override { return _info.shape().dim(index); }
+ size_t num_dimensions() const override { return _info.shape().rank(); }
+ size_t calcOffset(const util::Coordinates &coords) const override;
+ model::Layout layout() const override;
+ bool has_padding() const override { return false; }
+ model::DataType data_type() const override { return _info.typeInfo().type(); }
+ const model::OperandInfo &tensorInfo() const override { return _info; }
+ uint64_t num_elements() const override { return _info.shape().num_elements(); };
+
+private:
+ const model::OperandInfo _info;
+ std::shared_ptr<const model::Data> _data{nullptr};
+};
+
+/**
+ * @brief Class to handle tensor in interpreter as writable
+ */
+class Tensor final : public ITensor
+{
+public:
+ Tensor() = delete;
+ Tensor(const model::OperandInfo &info) : _info(info)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint8_t *buffer() const override { return _buffer->baseWritable(); }
+ std::shared_ptr<const Buffer> shareBuffer() const override { return _buffer; };
+ const uint8_t *bufferRO() const override { return _buffer->base(); }
+ std::shared_ptr<const model::Data> shareData() const override { return _buffer; }
+ void setBuffer(std::shared_ptr<const Buffer> buffer) override { _buffer = buffer; }
+ void setData(std::shared_ptr<const model::Data>) override
+ {
+ throw std::runtime_error{"Passed data may read-only"};
+ }
+ void releaseData() override { _buffer = nullptr; }
+
+ size_t total_size() const override { return _info.total_size(); }
+ size_t dimension(size_t index) const override { return _info.shape().dim(index); }
+ size_t num_dimensions() const override { return _info.shape().rank(); }
+ size_t calcOffset(const util::Coordinates &coords) const override;
+ model::Layout layout() const override;
+ bool has_padding() const override { return false; }
+ model::DataType data_type() const override { return _info.typeInfo().type(); }
+ const model::OperandInfo &tensorInfo() const override { return _info; }
+ uint64_t num_elements() const override { return _info.shape().num_elements(); };
+
+private:
+ const model::OperandInfo _info;
+ std::shared_ptr<const Buffer> _buffer{nullptr};
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_TENSOR_H__
diff --git a/runtimes/neurun/core/src/exec/interp/operations/Add.cc b/runtimes/neurun/core/src/exec/interp/operations/Add.cc
new file mode 100644
index 000000000..666c3cba6
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/Add.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/Add.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/AddNode.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace add
+{
+
+void prepareAdd(ExecEnv *env, const model::Operation &node)
+{
+ const auto &add_node = nnfw::misc::polymorphic_downcast<const model::operation::AddNode &>(node);
+
+ const auto lhs_index = node.getInputs().at(add_node.LHS);
+ const auto rhs_index = node.getInputs().at(add_node.RHS);
+ const auto out_index = node.getOutputs().at(0);
+
+ // Check lhs shape is same with rhs (with broadcast)
+ const auto lhs_tensor = env->tensorAt(lhs_index);
+ const auto rhs_tensor = env->tensorAt(rhs_index);
+ UNUSED_RELEASE(rhs_tensor);
+
+ // Check shape and type lhs is same with rhs
+ // TODO Util function to compare TensorInfo
+ // TODO Handle broadcasting
+ assert(lhs_tensor->data_type() == rhs_tensor->data_type());
+ assert(lhs_tensor->num_dimensions() == rhs_tensor->num_dimensions());
+ for (uint32_t i = 0; i < lhs_tensor->num_dimensions(); i++)
+ {
+ assert(lhs_tensor->dimension(i) == rhs_tensor->dimension(i));
+ }
+
+ // Output's shape and type should be same with input (don't consider broadcast)
+ auto output_info = lhs_tensor->tensorInfo();
+ // We can handle already allocated (ex. model output)
+ env->allocateIfNeeded(out_index, output_info);
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Check shape and type lhs is same with output
+ // TODO Util function to compare TensorInfo
+ // TODO Handle broadcasting
+ assert(lhs_tensor->data_type() == out_tensor->data_type());
+ assert(lhs_tensor->num_dimensions() == out_tensor->num_dimensions());
+ for (uint32_t i = 0; i < lhs_tensor->num_dimensions(); i++)
+ {
+ assert(lhs_tensor->dimension(i) == out_tensor->dimension(i));
+ }
+}
+
+inline void setActivationParams(float min, float max, nnfw::cker::AddParam *params)
+{
+ params->float_activation_min = min;
+ params->float_activation_max = max;
+}
+
+inline void setActivationParams(int32_t min, int32_t max, nnfw::cker::AddParam *params)
+{
+ params->quantized_activation_min = min;
+ params->quantized_activation_max = max;
+}
+
+template <typename raw_type>
+void invoke(const ITensor *lhs_tensor, const ITensor *rhs_tensor, const ITensor *out_tensor,
+ const model::operation::AddNode::Param &param)
+{
+ const auto lhs_buffer = lhs_tensor->bufferRO();
+ const auto rhs_buffer = rhs_tensor->bufferRO();
+ auto out_buffer = out_tensor->buffer();
+
+ nnfw::cker::AddParam cker_param;
+ raw_type activation_min, activation_max;
+ calculateActivationRange(param.activation, &activation_min, &activation_max);
+ setActivationParams(activation_min, activation_max, &cker_param);
+ const auto lhs_shape = convertShape(lhs_tensor->tensorInfo().shape());
+ const auto rhs_shape = convertShape(rhs_tensor->tensorInfo().shape());
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+ const raw_type *lhs_ptr = reinterpret_cast<const raw_type *>(lhs_buffer);
+ const raw_type *rhs_ptr = reinterpret_cast<const raw_type *>(rhs_buffer);
+ raw_type *out_ptr = reinterpret_cast<raw_type *>(out_buffer);
+
+ // Calculate
+ nnfw::cker::Add(cker_param, lhs_shape, lhs_ptr, rhs_shape, rhs_ptr, out_shape, out_ptr);
+}
+
+void invokeAdd(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &add_node = nnfw::misc::polymorphic_downcast<const model::operation::AddNode &>(node);
+
+ const auto lhs_index = node.getInputs().at(add_node.LHS);
+ const auto rhs_index = node.getInputs().at(add_node.RHS);
+ const auto out_index = node.getOutputs().at(0);
+ const auto lhs_tensor = env->tensorAt(lhs_index);
+ const auto rhs_tensor = env->tensorAt(rhs_index);
+ const auto out_tensor = env->tensorAt(out_index);
+ const auto data_type = lhs_tensor->data_type();
+
+ if (data_type == model::DataType::INT32)
+ {
+ invoke<int32_t>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
+ }
+ else if (data_type == model::DataType::FLOAT32)
+ {
+ invoke<float>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Unsupported data type"};
+ }
+}
+} // namespace add
+
+OpKernel *getAddNode()
+{
+ static OpKernel kernel = {add::prepareAdd, add::invokeAdd};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc
new file mode 100644
index 000000000..b6dfba85c
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/AveragePool.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/AvgPool2DNode.h"
+#include "util/Utils.h"
+#include "util/Padding.h"
+#include "util/ShapeInference.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace avgpool2d
+{
+
+void prepareAvgPool2D(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ UNUSED_RELEASE(in_tensor);
+
+ assert(in_tensor->num_dimensions() == 4);
+
+ const auto output_info = env->model().operands.at(out_index).info();
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &avgpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::AvgPool2DNode &>(node);
+ const auto infered_output_shapes =
+ shape_inference::inferAvgPoolShape(in_tensor->tensorInfo().shape(), avgpool_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Handle same ifm & ofm data type only
+ assert(in_tensor->data_type() == out_tensor->data_type());
+ assert(out_tensor->num_dimensions() == 4);
+}
+
+void invoke(const ITensor *in_tensor, const ITensor *out_tensor,
+ const model::operation::AvgPool2DNode::Param &param)
+{
+ // TODO Support NCHW frontend
+ const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape,
+ param.stride, param.kw, param.kh);
+ // Calculate
+ nnfw::cker::AveragePoolParams cker_param;
+ calculateActivationRange(param.activation, &cker_param.float_activation_min,
+ &cker_param.float_activation_max);
+ cker_param.filter_width = param.kw;
+ cker_param.filter_height = param.kh;
+ cker_param.padding_values.width = padding.left;
+ cker_param.padding_values.height = padding.top;
+ cker_param.stride_width = param.stride.horizontal;
+ cker_param.stride_height = param.stride.vertical;
+
+ const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+ const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
+ float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
+
+ nnfw::cker::AveragePool(cker_param, in_shape, in_ptr, out_shape, out_ptr);
+}
+
+void invokeAvgPool2D(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &avgpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::AvgPool2DNode &>(node);
+
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ // Check lhs shape is same with rhs (with broadcast)
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto out_tensor = env->tensorAt(out_index);
+
+ const auto data_type = in_tensor->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(in_tensor, out_tensor, avgpool_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float only"};
+ }
+}
+} // namespace avgpool2d
+
+OpKernel *getAvgPool2DNode()
+{
+ static OpKernel kernel = {avgpool2d::prepareAvgPool2D, avgpool2d::invokeAvgPool2D};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/Concat.cc b/runtimes/neurun/core/src/exec/interp/operations/Concat.cc
new file mode 100644
index 000000000..09a86c179
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/Concat.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/Concatenation.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/ConcatNode.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace concat
+{
+
+void prepareConcat(ExecEnv *env, const model::Operation &node)
+{
+ const auto &concat_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::ConcatNode &>(node);
+
+ const auto first_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto first_tensor = env->tensorAt(first_index);
+ uint32_t out_axis_dimension = 0;
+ const int32_t axis_raw = concat_node.param().axis;
+ const uint32_t axis = (axis_raw < 0) ? (axis_raw + first_tensor->num_dimensions()) : axis_raw;
+
+ // All inputs shape should be same except axis dimension
+ // All inputs type should be same
+ for (auto input : node.getInputs())
+ {
+ assert(first_tensor->num_dimensions() == env->tensorAt(input)->num_dimensions());
+ assert(first_tensor->data_type() == env->tensorAt(input)->data_type());
+ for (uint32_t i = 0; i < first_tensor->num_dimensions(); i++)
+ {
+ if (i == axis)
+ {
+ out_axis_dimension += env->tensorAt(input)->dimension(i);
+ continue;
+ }
+ assert(first_tensor->dimension(i) == env->tensorAt(input)->dimension(i));
+ }
+ }
+
+ // Make output tensor info using first input tensor info, and accumulated axis dimension value
+ auto out_shape = first_tensor->tensorInfo().shape();
+ out_shape.dim(axis) = out_axis_dimension;
+ env->allocateIfNeeded(out_index,
+ model::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()});
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Output shape should be same with input except axis dimension
+ // Output type should be same with input
+ assert(first_tensor->data_type() == out_tensor->data_type());
+ for (uint32_t i = 0; i < first_tensor->num_dimensions(); i++)
+ {
+ if (i == axis)
+ {
+ continue;
+ }
+ assert(first_tensor->dimension(i) == out_tensor->dimension(i));
+ }
+}
+
+void invoke(const std::vector<const ITensor *> in_tensors, const ITensor *out_tensor, uint32_t axis)
+{
+ const uint32_t count = in_tensors.size();
+
+ // Calculate
+ nnfw::cker::ConcatenationParams cker_param;
+ cker_param.axis = (int8_t)axis;
+ cker_param.inputs_count = count;
+
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+
+ std::vector<nnfw::cker::Shape> in_shapes;
+ std::vector<const nnfw::cker::Shape *> in_shape_ptrs;
+ in_shapes.reserve(count);
+ in_shape_ptrs.reserve(count);
+ std::vector<const float *> in_ptrs;
+ for (uint32_t i = 0; i < count; i++)
+ {
+ in_shapes.push_back(convertShape(in_tensors[i]->tensorInfo().shape()));
+ in_shape_ptrs.push_back(&in_shapes[i]);
+ in_ptrs.push_back(reinterpret_cast<const float *>(in_tensors[i]->bufferRO()));
+ }
+
+ auto out_buffer = out_tensor->buffer();
+ float *out_ptr = reinterpret_cast<float *>(out_buffer);
+
+ nnfw::cker::Concatenation<float>(cker_param, in_shape_ptrs.data(), in_ptrs.data(), out_shape,
+ out_ptr);
+}
+
+void invokeConcat(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &concat_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::ConcatNode &>(node);
+ const int32_t axis_raw = concat_node.param().axis;
+
+ std::vector<const ITensor *> in_tensors;
+ for (const auto &e : concat_node.getInputs())
+ {
+ in_tensors.emplace_back(env->tensorAt(e));
+ }
+
+ const auto out_index = node.getOutputs().at(0);
+ const auto out_tensor = env->tensorAt(out_index);
+ const uint32_t axis = (axis_raw < 0) ? (axis_raw + out_tensor->num_dimensions()) : axis_raw;
+
+ const auto data_type = in_tensors[0]->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(in_tensors, out_tensor, axis);
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float32 only"};
+ }
+}
+} // namespace concat
+
+OpKernel *getConcatNode()
+{
+ static OpKernel kernel = {concat::prepareConcat, concat::invokeConcat};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc b/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc
new file mode 100644
index 000000000..92f4f6415
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/Conv.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/Conv2DNode.h"
+#include "util/Utils.h"
+#include "util/Padding.h"
+#include "util/ShapeInference.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace conv2d
+{
+
+void prepareConv2D(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(model::operation::Conv2DNode::INPUT);
+ const auto kernel_index = node.getInputs().at(model::operation::Conv2DNode::KERNEL);
+ const auto bias_index = node.getInputs().at(model::operation::Conv2DNode::BIAS);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto kernel_tensor = env->tensorAt(kernel_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+
+ assert(in_tensor->num_dimensions() == 4);
+ assert(kernel_tensor->num_dimensions() == 4);
+ assert(bias_tensor->num_dimensions() == 1);
+
+ UNUSED_RELEASE(in_tensor);
+ UNUSED_RELEASE(kernel_tensor);
+ UNUSED_RELEASE(bias_tensor);
+
+ const auto output_info = env->model().operands.at(out_index).info();
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::Conv2DNode &>(node);
+ const auto infered_output_shapes = shape_inference::inferConv2DShape(
+ in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Handle same ifm & ofm data type only
+ assert(in_tensor->data_type() == out_tensor->data_type());
+ assert(out_tensor->num_dimensions() == 4);
+}
+
+void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
+ const ITensor *ofm_tensor, const model::operation::Conv2DNode::Param &param)
+{
+ // TODO Support NCHW frontned
+ const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
+ const auto &ker_shape = ker_tensor->tensorInfo().shape();
+ const auto ker_height = ker_shape.dim(1);
+ const auto ker_width = ker_shape.dim(2);
+ const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape,
+ param.stride, ker_width, ker_height);
+
+ // Calculate
+ float activation_min, activation_max;
+ calculateActivationRange(param.activation, &activation_min, &activation_max);
+
+ nnfw::cker::ConvParams cker_param;
+ cker_param.padding_values.width = padding.left;
+ cker_param.padding_values.height = padding.top;
+ cker_param.stride_width = param.stride.horizontal;
+ cker_param.stride_height = param.stride.vertical;
+ cker_param.dilation_width_factor = 1;
+ cker_param.dilation_height_factor = 1;
+ cker_param.float_activation_min = activation_min;
+ cker_param.float_activation_max = activation_max;
+
+ const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
+ const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
+ const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
+ const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
+ const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
+ const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
+ const float *bias_ptr = reinterpret_cast<const float *>(bias_tensor->bufferRO());
+ float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
+
+ nnfw::cker::Conv(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr, cker_bias_shape,
+ bias_ptr, cker_ofm_shape, ofm_ptr);
+}
+
+void invokeConv2D(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::Conv2DNode &>(node);
+
+ const auto ifm_index = node.getInputs().at(model::operation::Conv2DNode::INPUT);
+ const auto ker_index = node.getInputs().at(model::operation::Conv2DNode::KERNEL);
+ const auto bias_index = node.getInputs().at(model::operation::Conv2DNode::BIAS);
+ const auto ofm_index = node.getOutputs().at(0);
+
+ const auto ifm_tensor = env->tensorAt(ifm_index);
+ const auto ker_tensor = env->tensorAt(ker_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+ const auto ofm_tensor = env->tensorAt(ofm_index);
+
+ const auto data_type = ifm_tensor->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float32 only"};
+ }
+}
+} // namespace conv2d
+
+OpKernel *getConv2DNode()
+{
+ static OpKernel kernel = {conv2d::prepareConv2D, conv2d::invokeConv2D};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc b/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc
new file mode 100644
index 000000000..e1e7c0674
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/DepthwiseConv.h>
+#include <misc/polymorphic_downcast.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/DepthwiseConv2DNode.h"
+#include "util/Padding.h"
+#include "util/Utils.h"
+#include "util/ShapeInference.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+namespace
+{
+
+void prepareDepthwiseConv(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::INPUT);
+ const auto kernel_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::KERNEL);
+ const auto bias_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::BIAS);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto kernel_tensor = env->tensorAt(kernel_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+
+ assert(in_tensor->num_dimensions() == 4);
+ assert(kernel_tensor->num_dimensions() == 4);
+ assert(bias_tensor->num_dimensions() == 1);
+
+ UNUSED_RELEASE(in_tensor);
+ UNUSED_RELEASE(kernel_tensor);
+ UNUSED_RELEASE(bias_tensor);
+
+ // TODO handle unspecified output shape:
+ // calculate output shape using ifm shape, kernel shape, padding, stride
+ const auto output_info = env->model().operands.at(out_index).info();
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &depth_conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::DepthwiseConv2DNode &>(node);
+ const auto infered_output_shapes = shape_inference::inferDepthwiseConv2DShape(
+ in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(),
+ depth_conv_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Handle same ifm & ofm data type only
+ assert(in_tensor->data_type() == out_tensor->data_type());
+ assert(out_tensor->num_dimensions() == 4);
+}
+
+void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
+ const ITensor *ofm_tensor, const model::operation::DepthwiseConv2DNode::Param &param)
+{
+ // TODO Support NCHW frontend
+ const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ // Kernel format is [1, kernel_height, kernel_width, depth_out].
+ const auto &ker_shape = ker_tensor->tensorInfo().shape();
+ const auto ker_height = ker_shape.dim(1);
+ const auto ker_width = ker_shape.dim(2);
+ const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape,
+ param.stride, ker_width, ker_height);
+
+ // Calculate
+ float activation_min, activation_max;
+ calculateActivationRange(param.activation, &activation_min, &activation_max);
+
+ nnfw::cker::DepthwiseConvParams cker_param;
+ cker_param.padding_values.width = padding.left;
+ cker_param.padding_values.height = padding.top;
+ cker_param.depth_multiplier = param.multiplier;
+ cker_param.stride_width = param.stride.horizontal;
+ cker_param.stride_height = param.stride.vertical;
+ cker_param.dilation_width_factor = 1;
+ cker_param.dilation_height_factor = 1;
+ cker_param.float_activation_min = activation_min;
+ cker_param.float_activation_max = activation_max;
+
+ const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
+ const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
+ const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
+ const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
+ const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
+ const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
+ const float *bias_ptr = reinterpret_cast<const float *>(bias_tensor->bufferRO());
+ float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
+
+ nnfw::cker::DepthwiseConv(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr,
+ cker_bias_shape, bias_ptr, cker_ofm_shape, ofm_ptr);
+}
+
+void invokeDepthwiseConv(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &conv_node = static_cast<const model::operation::DepthwiseConv2DNode &>(node);
+
+ const auto ifm_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::INPUT);
+ const auto ker_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::KERNEL);
+ const auto bias_index = node.getInputs().at(model::operation::DepthwiseConv2DNode::BIAS);
+ const auto ofm_index = node.getOutputs().at(0);
+
+ const auto ifm_tensor = env->tensorAt(ifm_index);
+ const auto ker_tensor = env->tensorAt(ker_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+ const auto ofm_tensor = env->tensorAt(ofm_index);
+
+ const auto data_type = ifm_tensor->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float32 only"};
+ }
+}
+
+} // namespace
+
+OpKernel *getDepthwiseConvNode()
+{
+ static OpKernel kernel = {prepareDepthwiseConv, invokeDepthwiseConv};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtimes/neurun/core/src/exec/interp/operations/FullyConnected.cc
new file mode 100644
index 000000000..466c220b1
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/FullyConnected.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/FullyConnected.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/FullyConnectedNode.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace fc
+{
+
+void prepareFC(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(model::operation::FullyConnectedNode::INPUT);
+ const auto kernel_index = node.getInputs().at(model::operation::FullyConnectedNode::WEIGHT);
+ const auto bias_index = node.getInputs().at(model::operation::FullyConnectedNode::BIAS);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto kernel_tensor = env->tensorAt(kernel_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+
+ UNUSED_RELEASE(in_tensor);
+ UNUSED_RELEASE(kernel_tensor);
+ UNUSED_RELEASE(bias_tensor);
+
+ assert(in_tensor->num_dimensions() >= 2);
+ assert(kernel_tensor->num_dimensions() == 2);
+ assert(bias_tensor->num_dimensions() == 1);
+
+ const auto input_size_with_batch = in_tensor->num_elements();
+ const auto num_units = kernel_tensor->dimension(0);
+ const auto input_size = kernel_tensor->dimension(1);
+ const auto batch_size = input_size_with_batch / input_size;
+ assert(input_size_with_batch % input_size == 0);
+ assert(num_units == bias_tensor->dimension(0));
+
+ // Make output tensor info
+ model::Shape output_shape(2);
+ output_shape.dim(0) = batch_size;
+ output_shape.dim(1) = num_units;
+ const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()};
+ env->allocateIfNeeded(out_index, out_info);
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Handle same ifm & ofm data type only
+ assert(in_tensor->data_type() == out_tensor->data_type());
+ assert(out_tensor->num_dimensions() == 2);
+ assert(out_tensor->dimension(0) == batch_size);
+ assert(out_tensor->dimension(1) == num_units);
+}
+
+void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
+ const ITensor *ofm_tensor, const model::operation::FullyConnectedNode::Param &param)
+{
+ const auto ifm_buffer = ifm_tensor->bufferRO();
+ const auto ker_buffer = ker_tensor->bufferRO();
+ const auto bias_buffer = bias_tensor->bufferRO();
+ auto ofm_buffer = ofm_tensor->buffer();
+
+ // Calculate
+ nnfw::cker::FullyConnectedParams cker_param;
+ calculateActivationRange(param.activation, &cker_param.float_activation_min,
+ &cker_param.float_activation_max);
+ const auto cker_ifm_shape = convertExtendShape(ifm_tensor->tensorInfo().shape());
+ const auto cker_ker_shape = convertExtendShape(ker_tensor->tensorInfo().shape());
+ const auto cker_bias_shape = convertExtendShape(bias_tensor->tensorInfo().shape());
+ const auto cker_ofm_shape = convertExtendShape(ofm_tensor->tensorInfo().shape());
+ const float *ifm_ptr = reinterpret_cast<const float *>(ifm_buffer);
+ const float *ker_ptr = reinterpret_cast<const float *>(ker_buffer);
+ const float *bias_ptr = reinterpret_cast<const float *>(bias_buffer);
+ float *ofm_ptr = reinterpret_cast<float *>(ofm_buffer);
+
+ nnfw::cker::FullyConnected(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr,
+ cker_bias_shape, bias_ptr, cker_ofm_shape, ofm_ptr);
+}
+
+void invokeFC(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::FullyConnectedNode &>(node);
+
+ const auto ifm_index = node.getInputs().at(model::operation::FullyConnectedNode::INPUT);
+ const auto ker_index = node.getInputs().at(model::operation::FullyConnectedNode::WEIGHT);
+ const auto bias_index = node.getInputs().at(model::operation::FullyConnectedNode::BIAS);
+ const auto ofm_index = node.getOutputs().at(0);
+
+ const auto ifm_tensor = env->tensorAt(ifm_index);
+ const auto ker_tensor = env->tensorAt(ker_index);
+ const auto bias_tensor = env->tensorAt(bias_index);
+ const auto ofm_tensor = env->tensorAt(ofm_index);
+
+ const auto data_type = ifm_tensor->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float only"};
+ }
+}
+} // namespace fc
+
+OpKernel *getFullyConnectedNode()
+{
+ static OpKernel kernel = {fc::prepareFC, fc::invokeFC};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc
new file mode 100644
index 000000000..e53fa1473
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/MaxPool.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/MaxPool2DNode.h"
+#include "util/Utils.h"
+#include "util/Padding.h"
+#include "util/ShapeInference.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace
+{
+
+void prepareMaxPool2D(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+
+ assert(in_tensor->num_dimensions() == 4);
+ UNUSED_RELEASE(in_tensor);
+
+ const auto output_info = env->model().operands.at(out_index).info();
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &maxpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::MaxPool2DNode &>(node);
+ const auto infered_output_shapes =
+ shape_inference::inferMaxPoolShape(in_tensor->tensorInfo().shape(), maxpool_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Handle same ifm & ofm data type only
+ assert(in_tensor->data_type() == out_tensor->data_type());
+ assert(out_tensor->num_dimensions() == 4);
+}
+
+void invoke(const ITensor *in_tensor, const ITensor *out_tensor,
+ const model::operation::MaxPool2DNode::Param &param)
+{
+ // TODO support NCHW frontend
+ const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC);
+ const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape,
+ param.stride, param.kw, param.kh);
+ // Calculate
+ nnfw::cker::MaxPoolParams cker_param;
+ calculateActivationRange(param.activation, &cker_param.float_activation_min,
+ &cker_param.float_activation_max);
+ cker_param.filter_width = param.kw;
+ cker_param.filter_height = param.kh;
+ cker_param.padding_values.width = padding.left;
+ cker_param.padding_values.height = padding.top;
+ cker_param.stride_width = param.stride.horizontal;
+ cker_param.stride_height = param.stride.vertical;
+
+ const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+ const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
+ float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
+
+ nnfw::cker::MaxPool(cker_param, in_shape, in_ptr, out_shape, out_ptr);
+}
+
+void invokeMaxPool2D(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &maxpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::MaxPool2DNode &>(node);
+
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto out_tensor = env->tensorAt(out_index);
+
+ const auto data_type = in_tensor->data_type();
+ if (data_type == model::DataType::FLOAT32)
+ {
+ invoke(in_tensor, out_tensor, maxpool_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float32 only"};
+ }
+}
+} // namespace
+
+OpKernel *getMaxPool2DNode()
+{
+ static OpKernel kernel = {prepareMaxPool2D, invokeMaxPool2D};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h b/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h
new file mode 100644
index 000000000..4d2b4e1d8
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
+#define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
+
+#include "model/Shape.h"
+#include "model/InternalType.h"
+
+#include <cker/Shape.h>
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+inline nnfw::cker::Shape convertShape(const model::Shape &shape)
+{
+ auto dimensions = std::vector<uint32_t>(shape.dims().begin(), shape.dims().end());
+
+ std::vector<int32_t> raw_shape;
+ raw_shape.resize(4);
+
+ for (uint32_t i = 0; i < 4; ++i)
+ {
+ if (i >= dimensions.size())
+ {
+ raw_shape[i] = 1;
+ }
+ else
+ {
+ raw_shape[i] = dimensions[i];
+ }
+ }
+
+ return nnfw::cker::GetShape(raw_shape);
+}
+
+inline nnfw::cker::Shape convertExtendShape(const model::Shape &shape)
+{
+ auto dimensions = std::vector<uint32_t>(shape.dims().begin(), shape.dims().end());
+
+ std::vector<int32_t> raw_shape;
+ raw_shape.resize(4);
+ uint32_t start = 4 - dimensions.size();
+
+ for (uint32_t i = 0; i < 4; ++i)
+ {
+ if (i < start)
+ {
+ raw_shape[i] = 1;
+ }
+ else
+ {
+ raw_shape[i] = dimensions[i - start];
+ }
+ }
+
+ return nnfw::cker::GetShape(raw_shape);
+}
+
+template <typename T>
+void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max)
+{
+ if (activation == model::Activation::RELU)
+ {
+ *activation_min = 0;
+ *activation_max = std::numeric_limits<T>::max();
+ }
+ else if (activation == model::Activation::RELU6)
+ {
+ *activation_min = 0;
+ *activation_max = 6;
+ }
+ else if (activation == model::Activation::RELU1)
+ {
+ *activation_min = -1;
+ *activation_max = 1;
+ }
+ else if (activation == model::Activation::NONE)
+ {
+ *activation_min = std::numeric_limits<T>::lowest();
+ *activation_max = std::numeric_limits<T>::max();
+ }
+ else
+ {
+ throw std::runtime_error{"Unsupported activation type"};
+ }
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
diff --git a/runtimes/neurun/core/src/exec/interp/operations/Reshape.cc b/runtimes/neurun/core/src/exec/interp/operations/Reshape.cc
new file mode 100644
index 000000000..a45c3b3f2
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/Reshape.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exec/interp/Registration.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace
+{
+
+void prepare(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ // Unspecified shape is not supported in operation node spec now
+ const auto output_info = env->model().operands.at(out_index).info();
+ env->allocateAndShareIfNeeded(out_index, output_info, in_index);
+
+ assert(output_info.total_size() == env->model().operands.at(in_index).info().total_size());
+}
+
+void invoke(const ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ if (env->tensorAt(in_index)->bufferRO() == env->tensorAt(out_index)->bufferRO())
+ {
+ // Same data
+ return;
+ }
+
+ const auto output_info = env->model().operands.at(out_index).info();
+ memcpy(env->tensorAt(out_index)->buffer(), env->tensorAt(in_index)->bufferRO(),
+ output_info.total_size());
+}
+
+} // namespace {anonymous}
+
+OpKernel *getReshapeNode()
+{
+ static OpKernel kernel = {prepare, invoke};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/exec/interp/operations/SoftMax.cc b/runtimes/neurun/core/src/exec/interp/operations/SoftMax.cc
new file mode 100644
index 000000000..07865969b
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/operations/SoftMax.cc
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/SoftMax.h>
+
+#include "OperationUtil.h"
+
+#include "exec/interp/Registration.h"
+#include "model/operation/SoftmaxNode.h"
+#include "misc/polymorphic_downcast.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+namespace
+{
+
+void Softmax2D(const float *in, const int input_size, const int batch_size, const float beta,
+ float *out)
+{
+ assert(input_size > 0);
+
+ // For each batch
+ for (int b = 0; b < batch_size; b++)
+ {
+ // Find the max coeff.
+ float max_coeff = in[0];
+ for (int i = 1; i < input_size; i++)
+ {
+ if (in[i] > max_coeff)
+ max_coeff = in[i];
+ }
+
+ // Compute the normalized sum of exps.
+ float exp_sum = 0.0;
+ for (int i = 0; i < input_size; i++)
+ {
+ out[i] = std::exp((in[i] - max_coeff) * beta);
+ exp_sum += out[i];
+ }
+
+ // Divide by the sum of exps.
+ float reciprocal_sum_exp = 1.f / exp_sum;
+ for (int i = 0; i < input_size; i++)
+ {
+ out[i] *= reciprocal_sum_exp;
+ }
+
+ // Advance in and out pointers for the next batch.
+ in += input_size;
+ out += input_size;
+ }
+}
+
+void prepareSoftMax(ExecEnv *env, const model::Operation &node)
+{
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ UNUSED_RELEASE(in_tensor);
+
+ assert((in_tensor->num_dimensions() == 4) || (in_tensor->num_dimensions() == 2));
+
+ // Output shape should be same with input
+ // Output type is pre-defined in model
+ const auto output_shape = env->model().operands.at(in_index).info().shape();
+ const auto output_type = env->model().operands.at(out_index).info().typeInfo();
+
+ const model::OperandInfo output_info{output_shape, output_type};
+ env->allocateIfNeeded(out_index, output_info);
+
+ auto out_tensor = env->tensorAt(out_index);
+ UNUSED_RELEASE(out_tensor);
+
+ // Check output shape is same with input
+ assert(out_tensor->num_dimensions() == out_tensor->num_dimensions());
+ for (uint32_t i = 0; i < in_tensor->num_dimensions(); i++)
+ {
+ assert(in_tensor->dimension(i) == out_tensor->dimension(i));
+ }
+}
+
+void invoke(const ITensor *in_tensor, const ITensor *out_tensor,
+ const model::operation::SoftmaxNode::Param &param)
+{
+ const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
+ float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
+
+ float beta = param.beta;
+
+ if (in_tensor->num_dimensions() == 2)
+ {
+ uint32_t batch_size = in_tensor->dimension(0);
+ uint32_t input_size = in_tensor->dimension(1);
+
+ Softmax2D(in_ptr, input_size, batch_size, beta, out_ptr);
+ }
+ else if (in_tensor->num_dimensions() == 4)
+ {
+ const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+
+ nnfw::cker::SoftmaxParams cker_param;
+ cker_param.beta = beta;
+
+ nnfw::cker::Softmax(cker_param, in_shape, in_ptr, out_shape, out_ptr);
+ }
+ else
+ {
+ throw std::runtime_error{"Unsuported input dimension: support 2D or 4D"};
+ }
+}
+
+void invokeSoftMax(const ExecEnv *env, const model::Operation &node)
+{
+ const auto &softmax_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::SoftmaxNode &>(node);
+
+ const auto in_index = node.getInputs().at(0);
+ const auto out_index = node.getOutputs().at(0);
+
+ const auto in_tensor = env->tensorAt(in_index);
+ const auto out_tensor = env->tensorAt(out_index);
+
+ const auto in_data_type = in_tensor->data_type();
+ const auto out_data_type = out_tensor->data_type();
+ if ((in_data_type == model::DataType::FLOAT32) && (out_data_type == model::DataType::FLOAT32))
+ {
+ invoke(in_tensor, out_tensor, softmax_node.param());
+ }
+ else
+ {
+ throw std::runtime_error{"NYI: Support float32 only"};
+ }
+}
+
+} // namespace
+
+OpKernel *getSoftMaxNode()
+{
+ static OpKernel kernel = {prepareSoftMax, invokeSoftMax};
+ return &kernel;
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/Graph.cc b/runtimes/neurun/core/src/graph/Graph.cc
new file mode 100644
index 000000000..4264b1a8a
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/Graph.cc
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph/Graph.h"
+
+#include <algorithm>
+#include <bitset>
+
+#include "util/logging.h"
+#include "verifier/Verifier.h"
+#include "cpp14/memory.h"
+#include "compiler/Linear.h"
+#include "graph/operation/LowerInfo.h"
+#include "graph/operand/LowerInfo.h"
+#include "graph/operand/PermuteFactor.h"
+#include "operand/Shape4DConvert.h"
+#include "compiler/BackendResolver.h"
+#include "backend/IConfig.h"
+#include "pass/PermutationInsertionPass.h"
+#include "pass/PermutationEliminationPass.h"
+
+namespace neurun
+{
+namespace graph
+{
+
+Graph::Graph(std::unique_ptr<model::Model> &&model) : _model{std::move(model)}
+{
+ // DO NOTHING
+}
+
+Graph::~Graph(void) = default;
+
+model::OperandIndex Graph::addOperand(const model::Shape &shape, const model::TypeInfo &type)
+{
+ return _model->operands.emplace(shape, type);
+}
+
+model::OperationIndex Graph::addOperation(std::unique_ptr<model::Operation> &&node)
+{
+ assert(isBuildingPhase());
+ return _model->operations.push(std::move(node));
+}
+
+void Graph::setOperandValue(const model::OperandIndex &ind, std::unique_ptr<model::Data> &&data)
+{
+ assert(isBuildingPhase());
+ assert(_model->operands.exist(ind));
+ _model->operands.at(ind).data(std::move(data));
+}
+
+void Graph::addInput(const model::OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _model->inputs.append(ind);
+}
+
+void Graph::addOutput(const model::OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _model->outputs.append(ind);
+}
+
+void Graph::finishBuilding(void)
+{
+ assert(isBuildingPhase());
+ _phase = Phase::MODEL;
+
+ // Initialize operand use-def
+ initializeUseDef();
+
+ // Call graph verifications for the MODEL phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+void Graph::lower(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ _subgraphs = nnfw::cpp14::make_unique<model::Subgraphs>();
+ bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
+
+ // Lower
+ {
+ // operand::LowerInfo holder
+ model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operands_lower_info;
+
+ _model->operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) {
+ operands_lower_info[index] =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(graph::operand::asShape4D(object.shape()));
+ });
+
+ _lower_info_map = nnfw::cpp14::make_unique<LowerInfoMap>();
+
+ // Are they mergeable?
+ // 1. the same backend id and layout?
+ // 2. if 1 is true, the subg and a node are connected?
+ auto mergeable = [&](const model::SubgraphIndex &subg_index,
+ const model::OperationIndex &node_index, model::Layout layout) {
+ const auto &subg = _subgraphs->at(subg_index);
+ const auto &node = _model->operations.at(node_index);
+
+ // The same backend id and layout?
+ {
+ const auto subg_backend_layout = getLowerInfo(subg_index)->layout();
+ const auto &subg_backend_id = getLowerInfo(subg_index)->backend()->config()->id();
+ const auto &node_backend_id = _backend_resolver->getBackend(node_index)->config()->id();
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " { " << subg_backend_id << "("
+ << model::to_string(subg_backend_layout) << ") } "
+ << " NODE#" << node_index.value() << " (" << node.getName() << ") { "
+ << node_backend_id << "(" << model::to_string(layout) << ") } " << std::endl;
+ if (subg_backend_id != node_backend_id || subg_backend_layout != layout)
+ return false;
+ }
+
+ // Connected?
+ // an input of one node is an output of the other node? or vice-versa?
+ {
+ const auto &node_inputs = node.getInputs();
+ const auto &node_outputs = node.getOutputs();
+
+ // subg's operations are in order so that we just check the first and the last
+ std::vector<model::Element> subg_ops{subg.operations()[0]};
+ if (subg.operations().size() > 1)
+ subg_ops.emplace_back(subg.operations()[subg.operations().size() - 1]);
+
+ for (const auto &elem : subg_ops)
+ {
+ const auto &n_index = elem.index;
+ const auto &n = *elem.node;
+
+ // node's output == subg's input?
+ const auto &n_inputs = n.getInputs();
+ for (auto input : n_inputs)
+ {
+ if (node_outputs.contains(input))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value()
+ << "(" << n.getName() << ") is connected to NODE#"
+ << node_index.value() << "(" << node.getName() << ")" << std::endl;
+ return true;
+ }
+ }
+
+ // node's input == subg's output?
+ const auto &n_outputs = n.getOutputs();
+ for (auto output : n_outputs)
+ {
+ if (node_inputs.contains(output))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value()
+ << " (" << n.getName() << ") is connected to NODE#"
+ << node_index.value() << std::endl;
+ return true;
+ }
+ }
+ }
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is not connected to NODE#"
+ << node_index.value() << "(" << node.getName() << ")" << std::endl;
+ }
+
+ return false;
+ };
+
+ // Create a fresh subgraph with one operation, and append it to subgraphs
+ auto append_fresh_single_op_subgraph = [&](const model::OperationIndex &node_index,
+ const model::Operation &node, model::Layout layout) {
+ // Create a fresh subgraph
+ auto subg = nnfw::cpp14::make_unique<model::Subgraph>(layout);
+
+ // Add an operation
+ subg->appendOperation(node_index, node);
+
+ // Update input/output
+ subg->setOutputs(node.getOutputs());
+ subg->setInputs(node.getInputs());
+
+ return _subgraphs->emplace(std::move(subg));
+ };
+
+ model::Subgraph *subg = nullptr;
+ model::SubgraphIndex subg_index;
+
+ // Make subgraphs while checking whether a node can be merged into a subgraph.
+ // NOTE: The below method appends nodes while making one subgraph if needed. If something better
+ // ways, happy to update this code.
+ Graph::PostDfsConstIterator().iterate(*this, [&](const model::OperationIndex &node_index,
+ const model::Operation &node) {
+ // LowerInfo for in/output operands
+ auto backend = _backend_resolver->getBackend(node_index);
+ // TODO How to get layout of this node from IR
+ auto frontend_layout = model::Layout::NHWC;
+ auto backend_layout = frontend_layout;
+ const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT);
+ if (acl_layout_str == "NHWC")
+ {
+ backend_layout = model::Layout::NHWC;
+ }
+ else if (acl_layout_str == "NCHW")
+ {
+ backend_layout = model::Layout::NCHW;
+ }
+
+ // CPU supports only NHWC now
+ if (backend->config()->id() == "cpu")
+ {
+ backend_layout = model::Layout::NHWC;
+ }
+
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addUsePermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+ for (auto operand : node.getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+ /*for profiling each subgraph must contain just one node,
+ so that we can measure a node separately*/
+ if (!subg || is_profiling || !mergeable(subg_index, node_index, backend_layout))
+ {
+ auto new_subg_index = append_fresh_single_op_subgraph(node_index, node, frontend_layout);
+
+ // Subgraph LowerInfo
+ setLowerInfo(new_subg_index, nnfw::cpp14::make_unique<graph::operation::LowerInfo>(
+ backend, backend_layout));
+
+ subg_index = new_subg_index;
+ subg = &(_subgraphs->at(new_subg_index));
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is created for "
+ << "NODE#" << node_index.value() << "(" << node.getName() << ")"
+ << std::endl;
+ }
+ else
+ {
+ subg->appendOperation(node_index, node);
+ subg->setInputs(node.getInputs());
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " merges "
+ << "NODE#" << node_index.value() << "(" << node.getName() << ")"
+ << std::endl;
+ }
+
+ bool finish = false;
+ {
+ size_t prev_op_cnt = 0;
+ for (auto input : node.getInputs())
+ {
+ // only valid_inputs
+ const auto &operand = _model->operands.at(input);
+ if (operand.isConstant())
+ continue;
+
+ // This operand is input of operation, not weight or bias
+ if (operand.getDef().list().size() > 0)
+ ++prev_op_cnt;
+
+ // Test the node is Concat or BeginningBranch
+ // About (1)isConcat and (2)isBeginningBranch
+ // (1) Current node has multiple inputs as concat?
+ // - Does current node have two or more than previous operation?
+ //
+ // [CONV] [CONV] [CONV] [MAX_POOL]
+ // | | | |
+ // [0] [1] [2] [3]
+ // \ | | /
+ // [ C O N C A T ] # current node
+ //
+ // (2) Current node is on the separated branch at the beginning?
+ // - Does current node's input operand's uses have two or more than?
+ //
+ // [CONV]
+ // |
+ // [0]----.
+ // | |
+ // [CONV] [CONV] # current node
+ // | |
+ // [1] [2]
+ // \ /
+ // [CONCAT]
+ if (prev_op_cnt > 1 || operand.getUses().list().size() > 1)
+ {
+ finish = true;
+ break;
+ }
+ }
+ }
+
+ if (finish)
+ subg = nullptr;
+ });
+
+ _subgraphs->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) {
+ assert(subg.operations().size() > 0);
+ std::reverse(std::begin(subg.operations()), std::end(subg.operations()));
+ });
+
+ _subgraphs->dump("merged and sorted operations without permutation");
+
+// NOTE This is desired way to handle model input and outputs however getDefaultBackend() is
+// cpu backend dependent for now we cannot use it.
+#if 0
+ // Add def backend to model input/output operand as default backend
+ for (auto index : getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(index);
+ lower_info->addDefBackend(_backend_resolver->getDefaultBackend());
+ }
+
+ for (auto index : getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(index);
+ lower_info->addUseBackend(_backend_resolver->getDefaultBackend());
+ }
+#endif
+
+ // Add DefFactor constants same as UseFactor
+ // NOTE This assumes a constant operand is used by only one operation
+ _model->operations.iterate([&](const model::OperationIndex &, model::Operation &node) {
+ // LowerInfo for input operands
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ if (lower_info->def_factors().empty())
+ {
+ // NOTE Handling model inputs here is not ideal. See above NOTE comment.
+ // If it is a model input, not a constant
+ if (_model->inputs.contains(operand))
+ {
+ // If one or more elements then any PermuteFactor is OK so pick first one
+ if (!lower_info->use_factors().empty())
+ {
+ lower_info->addDefPermuteFactor(*lower_info->use_factors().begin());
+ }
+ }
+ // If it is a constant
+ else
+ {
+ lower_info->addDefPermuteFactor(lower_info->use_factors().getOnlyElement());
+ }
+ }
+ }
+ });
+
+ // Set LowerInfo for each operand from the operand::LowerInfo holder
+ _model->operands.iterate([&](const model::OperandIndex &index, model::Operand &object) {
+ setLowerInfo(index, std::move(operands_lower_info[index]));
+
+ // Dump operand LowerInfo
+ // TODO Extract this dumping procedure to be reusable
+ if (!getLowerInfo(index)->def_factors().empty() ||
+ !getLowerInfo(index)->use_factors().empty())
+ {
+ auto factors_to_string = [](const operand::PermuteFactorSet &factors) {
+ std::string str;
+ for (auto factor : factors)
+ {
+ str += factor.backend()->config()->id();
+ str += "(" + model::to_string(factor.layout()) + ")";
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ auto operation_index_to_string = [](const model::OperationIndexList &operations) {
+ std::string str;
+ for (auto op : operations.list())
+ {
+ str += std::to_string(op.value());
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ const auto lower_info = getLowerInfo(index);
+ const auto &shape = object.shape();
+ const auto &lower_shape = lower_info->shape();
+ std::string def_ops = operation_index_to_string(object.getDef());
+ std::string use_ops = operation_index_to_string(object.getUses());
+ std::string def_layouts = factors_to_string(lower_info->def_factors());
+ std::string use_layouts = factors_to_string(lower_info->use_factors());
+ VERBOSE(Lower) << "* Operand #" << index.value() << " LowerInfo" << std::endl;
+ VERBOSE(Lower) << " - Shape : { " << shape.dim(0) << " "
+ << (shape.rank() > 1 ? shape.dim(1) : 0) << " "
+ << (shape.rank() > 2 ? shape.dim(2) : 0) << " "
+ << (shape.rank() > 3 ? shape.dim(3) : 0) << " "
+ << "}" << std::endl;
+ VERBOSE(Lower) << " - Def Operations : " << def_ops << std::endl;
+ VERBOSE(Lower) << " - Use Operations : " << use_ops << std::endl;
+ VERBOSE(Lower) << " - Lower Info" << std::endl;
+ VERBOSE(Lower) << " - 4D Shape (NHWC) : { " << lower_shape.n() << " " << lower_shape.h()
+ << " " << lower_shape.w() << " " << lower_shape.c() << " "
+ << "}" << std::endl;
+ VERBOSE(Lower) << " - Def Backends : " << def_layouts << std::endl;
+ VERBOSE(Lower) << " - Use Backends : " << use_layouts << std::endl;
+ }
+ });
+ }
+
+ // Run PermutationInsertionPass
+ {
+ pass::PermutationInsertionPass pi_pass(*this);
+ pi_pass.run();
+ // Implemented code no longer works.
+ // pass::PermutationEliminationPass pe_pass(*this);
+ // pe_pass.run();
+
+ // TODO merge perm subgraphs if possible
+ _subgraphs->dump("merged and sorted operations with permutation");
+ }
+
+ // Graph verifications for the LOWERED phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+std::unique_ptr<compiler::Linear> Graph::linearize(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ auto linear = nnfw::cpp14::make_unique<compiler::Linear>(
+ shareModel(), releaseSubgraphs(), releaseLowerInfo(), releaseBackendResolver());
+
+ // TODO Move the operations and operands to linear object
+ return linear;
+}
+
+void Graph::initializeUseDef()
+{
+ operations().iterate(
+ [&](const model::OperationIndex &index, const model::Operation &node) -> void {
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ operands().at(output).appendDef(index);
+ }
+
+ auto inputs = node.getInputs();
+ for (auto input : inputs)
+ {
+ operands().at(input).appendUse(index);
+ }
+ });
+}
+
+const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg_index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operation.find(subg_index);
+ if (itr == _lower_info_map->operation.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const model::SubgraphIndex &subg_index,
+ std::unique_ptr<operation::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info)));
+}
+
+const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index)
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const model::OperandIndex &index,
+ std::unique_ptr<operand::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info)));
+}
+
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+// Explicit instantiations to have implementation in the source file.
+
+template class Graph::DefaultIterator<true>;
+template class Graph::DefaultIterator<false>;
+
+template class Graph::PostDfsIterator<true>;
+template class Graph::PostDfsIterator<false>;
+
+//
+// Graph::DefaultIterator
+//
+
+template <bool is_const>
+void Graph::DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ graph.operations().iterate(
+ [&](const model::OperationIndex &index, NodeRef node) -> void { fn(index, node); });
+}
+
+//
+// Graph::PostDfsIterator
+//
+
+template <bool is_const>
+void Graph::PostDfsIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ assert(!graph.isBuildingPhase()); // Restrict iteration condition
+
+ model::OperationIndexMap<bool> visited;
+ graph.operations().iterate(
+ [&](const model::OperationIndex &index, NodeRef) { visited[index] = false; });
+
+ std::function<void(const model::OperationIndex &, NodeRef)> dfs_recursive =
+ [&](const model::OperationIndex &index, NodeRef node) -> void {
+ if (visited[index])
+ return;
+ visited[index] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph.operands().at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph.operations().at(use));
+ }
+ }
+
+ fn(index, node);
+ };
+
+ graph.operations().iterate(dfs_recursive);
+
+ // All of the operations(nodes) must have been visited.
+ assert(
+ std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const model::OperationIndex, bool> &v) { return v.second; }));
+}
+
+void Graph::setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br)
+{
+ _backend_resolver = std::move(br);
+}
+
+std::unique_ptr<compiler::BackendResolver> Graph::releaseBackendResolver()
+{
+ return std::move(_backend_resolver);
+}
+
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/dumper/Dumper.cc b/runtimes/neurun/core/src/graph/dumper/Dumper.cc
new file mode 100644
index 000000000..315e2cebf
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/dumper/Dumper.cc
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dumper.h"
+
+#include <string>
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace dumper
+{
+
+using namespace neurun::model::operation;
+
+void Dumper::visit(const AbsNode &node)
+{
+ VERBOSE(LIR) << "* Abs" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(AbsNode::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const AddNode &node)
+{
+ VERBOSE(LIR) << "* Add" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(AddNode::Input::LHS).value() << ", "
+ << node.getInputs().at(AddNode::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ArgMaxNode &node)
+{
+ VERBOSE(LIR) << "* ArgMax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ArgMaxNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const AvgPool2DNode &node)
+{
+ VERBOSE(LIR) << "* AvgPool2D(Implicit)" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(AvgPool2DNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const CastNode &node)
+{
+ VERBOSE(LIR) << "* Cast" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(CastNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ComparisonNode &node)
+{
+ VERBOSE(LIR) << "* Comparison" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(ComparisonNode::Input::INPUT0).value() << ", "
+ << node.getInputs().at(ComparisonNode::Input::INPUT1).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ConcatNode &node)
+{
+ VERBOSE(LIR) << "* Concat" << std::endl;
+ std::string inputs;
+ for (auto i : node.getInputs())
+ {
+ inputs += std::to_string(i.value()) + ",";
+ }
+ VERBOSE(LIR) << " - Inputs : IFM(" << inputs << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Conv2DNode &node)
+{
+ std::string padding_type =
+ node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* Conv2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Conv2DNode::Input::INPUT).value()
+ << ") Kernel(" << node.getInputs().at(Conv2DNode::Input::KERNEL).value() << ") Bias("
+ << node.getInputs().at(Conv2DNode::Input::BIAS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DepthToSpaceNode &node)
+{
+ VERBOSE(LIR) << "* DepthToSpace" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(DepthToSpaceNode::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DepthwiseConv2DNode &node)
+{
+ std::string padding_type =
+ node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* DepthwiseConv2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM("
+ << node.getInputs().at(DepthwiseConv2DNode::Input::INPUT).value() << ") Kernel("
+ << node.getInputs().at(DepthwiseConv2DNode::Input::KERNEL).value() << ") Bias("
+ << node.getInputs().at(DepthwiseConv2DNode::Input::BIAS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DequantizeNode &node)
+{
+ VERBOSE(LIR) << "* Dequantize" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(DequantizeNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DivNode &node)
+{
+ VERBOSE(LIR) << "* Div" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(DivNode::Input::LHS).value() << ", "
+ << node.getInputs().at(DivNode::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const EmbeddingLookupNode &node)
+{
+ VERBOSE(LIR) << "* EmbeddingLookup" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Lookups("
+ << node.getInputs().at(EmbeddingLookupNode::Input::LOOKUPS).value() << ") VALUES("
+ << node.getInputs().at(EmbeddingLookupNode::Input::VALUES).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ExpNode &node)
+{
+ VERBOSE(LIR) << "* Exp" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ExpNode::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const FloorNode &node)
+{
+ VERBOSE(LIR) << "* Floor" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(FloorNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const FullyConnectedNode &node)
+{
+ VERBOSE(LIR) << "* FullyConnected" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM("
+ << node.getInputs().at(FullyConnectedNode::Input::INPUT).value() << ") Weight("
+ << node.getInputs().at(FullyConnectedNode::Input::WEIGHT).value() << ") Bias("
+ << node.getInputs().at(FullyConnectedNode::Input::BIAS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const GatherNode &node)
+{
+ VERBOSE(LIR) << "* Gather" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(GatherNode::Input::INPUT).value()
+ << ") Indices(" << node.getInputs().at(GatherNode::Input::INDICES).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const HashtableLookupNode &node)
+{
+ VERBOSE(LIR) << "* HashTableLookup" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Lookups("
+ << node.getInputs().at(HashtableLookupNode::Input::LOOKUPS).value() << ") Keys("
+ << node.getInputs().at(HashtableLookupNode::Input::KEYS).value() << ") Values("
+ << node.getInputs().at(HashtableLookupNode::Input::VALUES).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Outputs : Output("
+ << node.getInputs().at(HashtableLookupNode::Output::OUTPUT).value() << ") Hits("
+ << node.getInputs().at(HashtableLookupNode::Output::HITS).value() << ")"
+ << std::endl;
+}
+
+void Dumper::visit(const L2NormalizationNode &node)
+{
+ VERBOSE(LIR) << "* L2Normalization" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(L2NormalizationNode::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const L2Pool2DNode &node)
+{
+ VERBOSE(LIR) << "* L2Pool2D" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(L2Pool2DNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LocalResponseNormalizationNode &node)
+{
+ VERBOSE(LIR) << "* LocalResponseNormalization" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(LocalResponseNormalizationNode::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LSTMNode &node)
+{
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LSTMNode::Input::INPUT).value()
+ << ") Input To Input Weights("
+ << node.getInputs().at(LSTMNode::Input::INPUT_TO_INPUT_WEIGHTS).value()
+ << ") Input To Forget Weights("
+ << node.getInputs().at(LSTMNode::Input::INPUT_TO_FORGET_WEIGHTS).value()
+ << ") Input To Cell Weights("
+ << node.getInputs().at(LSTMNode::Input::INPUT_TO_CELL_WEIGHTS).value()
+ << ") Input To Output Weights("
+ << node.getInputs().at(LSTMNode::Input::INPUT_TO_OUTPUT_WEIGHTS).value()
+ << ") Recurrent To Input Weights("
+ << node.getInputs().at(LSTMNode::Input::RECURRENT_TO_INPUT_WEIGHTS).value()
+ << ") Recurrent To Forget Weights("
+ << node.getInputs().at(LSTMNode::Input::RECURRENT_TO_FORGET_WEIGHTS).value()
+ << ") Recurrent To Cell Weights("
+ << node.getInputs().at(LSTMNode::Input::RECURRENT_TO_CELL_WEIGHTS).value()
+ << ") Recurrent To Output Weights("
+ << node.getInputs().at(LSTMNode::Input::RECURRENT_TO_OUTPUT_WEIGHTS).value()
+ << ") Cell To Input Weights("
+ << node.getInputs().at(LSTMNode::Input::CELL_TO_INPUT_WEIGHTS).value()
+ << ") Cell To Forget Weights("
+ << node.getInputs().at(LSTMNode::Input::CELL_TO_FORGET_WEIGHTS).value()
+ << ") Cell To OUTPUT Weights("
+ << node.getInputs().at(LSTMNode::Input::CELL_TO_OUTPUT_WEIGHTS).value()
+ << ") Input Gate Bias("
+ << node.getInputs().at(LSTMNode::Input::INPUT_GATE_BIAS).value()
+ << ") Forget Gate Bias("
+ << node.getInputs().at(LSTMNode::Input::FORGET_GATE_BIAS).value() << ") Cell Bias("
+ << node.getInputs().at(LSTMNode::Input::CELL_BIAS).value() << ") Output Gate Bias("
+ << node.getInputs().at(LSTMNode::Input::OUTPUT_GATE_BIAS).value()
+ << ") Projection Weights("
+ << node.getInputs().at(LSTMNode::Input::PROJECTION_WEIGHTS).value()
+ << ") Projection Bias("
+ << node.getInputs().at(LSTMNode::Input::PROJECTION_BIAS).value()
+ << ") Output State In("
+ << node.getInputs().at(LSTMNode::Input::OUTPUT_STATE_IN).value()
+ << ") Cell State In(" << node.getInputs().at(LSTMNode::Input::CELL_STATE_IN).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Scratch Buffer("
+ << node.getOutputs().at(LSTMNode::Output::SCRATCH_BUFFER).value()
+ << ") Output State Out("
+ << node.getInputs().at(LSTMNode::Output::OUTPUT_STATE_OUT).value()
+ << ") Cell State Out("
+ << node.getInputs().at(LSTMNode::Output::CELL_STATE_OUT).value() << ") Output("
+ << node.getInputs().at(LSTMNode::Output::OUTPUT).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalAndNode &node)
+{
+ VERBOSE(LIR) << "* LogicalAnd" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(LogicalAndNode::Input::INPUT0).value() << ", "
+ << node.getInputs().at(LogicalAndNode::Input::INPUT1).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalNotNode &node)
+{
+ VERBOSE(LIR) << "* LogicalNot" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogicalNotNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalOrNode &node)
+{
+ VERBOSE(LIR) << "* LogicalOr" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogicalOrNode::Input::INPUT0).value()
+ << ", " << node.getInputs().at(LogicalOrNode::Input::INPUT1).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogisticNode &node)
+{
+ VERBOSE(LIR) << "* Logistic" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogisticNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const MaxPool2DNode &node)
+{
+ std::string padding_type =
+ node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* MaxPool2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(MaxPool2DNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const MeanNode &node)
+{
+ VERBOSE(LIR) << "* Mean" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(MeanNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const MulNode &node)
+{
+ VERBOSE(LIR) << "* Mul" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(MulNode::Input::LHS).value() << ", "
+ << node.getInputs().at(MulNode::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const NegNode &node)
+{
+ VERBOSE(LIR) << "* Neg" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(NegNode::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const PermuteNode &node)
+{
+ std::string permute_type = "Unknown";
+ switch (node.getPermuteType())
+ {
+ case PermuteNode::Type::COPY:
+ permute_type = "Copy";
+ break;
+ case PermuteNode::Type::NHWC_TO_NCHW:
+ permute_type = "NHWC to NCHW";
+ break;
+ case PermuteNode::Type::NCHW_TO_NHWC:
+ permute_type = "NCHW to NHWC";
+ break;
+ }
+
+ VERBOSE(LIR) << "* Permute(" + permute_type + ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const PReLUNode &node)
+{
+ VERBOSE(LIR) << "* PReLU" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(PReLUNode::Input::INPUT).value()
+ << ") Alpha(" << node.getInputs().at(PReLUNode::Input::ALPHA).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceMaxNode &node)
+{
+ VERBOSE(LIR) << "* ReduceMax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceMaxNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceMinNode &node)
+{
+ VERBOSE(LIR) << "* ReduceMin" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceMinNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceSumNode &node)
+{
+ VERBOSE(LIR) << "* ReduceSum" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceSumNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLUNode &node)
+{
+ VERBOSE(LIR) << "* ReLU" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLUNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLU1Node &node)
+{
+ VERBOSE(LIR) << "* ReLU1" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLU1Node::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLU6Node &node)
+{
+ VERBOSE(LIR) << "* ReLU6" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLU6Node::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReshapeNode &node)
+{
+ VERBOSE(LIR) << "* Reshape" << std::endl;
+ // TODO The shape index should be "node.getInputs().at(1).value()" but not valid for now
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReshapeNode::Input::INPUT).value()
+ << ") Shape("
+ << "?"
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ResizeBilinearNode &node)
+{
+ VERBOSE(LIR) << "* ResizeBilinear" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(ResizeBilinearNode::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const RNNNode &node)
+{
+ VERBOSE(LIR) << "* RNN" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(RNNNode::Input::INPUT).value()
+ << ") Weights" << node.getInputs().at(RNNNode::Input::WEIGHTS).value()
+ << ") Recurrent Weights"
+ << node.getInputs().at(RNNNode::Input::RECURRENT_WEIGHTS).value() << ") Bias"
+ << node.getInputs().at(RNNNode::Input::BIAS).value() << ") Hidden State"
+ << node.getInputs().at(RNNNode::Input::HIDDEN_STATE_IN).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(RNNNode::Output::OUTPUT).value()
+ << ") Hidden State" << node.getInputs().at(RNNNode::Output::HIDDEN_STATE_OUT).value()
+ << ")" << std::endl;
+}
+
+void Dumper::visit(const RSQRTNode &node)
+{
+ VERBOSE(LIR) << "* RSQRT" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(RSQRTNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SoftmaxNode &node)
+{
+ VERBOSE(LIR) << "* Softmax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SoftmaxNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SpaceToDepthNode &node)
+{
+ VERBOSE(LIR) << "* SpaceToDepth" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(SpaceToDepthNode::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SplitNode &node)
+{
+ VERBOSE(LIR) << "* Split" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SplitNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SQRTNode &node)
+{
+ VERBOSE(LIR) << "* SQRT" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SQRTNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SquaredDifferenceNode &node)
+{
+ VERBOSE(LIR) << "* SquaredDifference" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(SquaredDifferenceNode::Input::LHS).value() << ", "
+ << node.getInputs().at(SquaredDifferenceNode::Input::RHS).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SqueezeNode &node)
+{
+ VERBOSE(LIR) << "* Squeeze" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SqueezeNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const StridedSliceNode &node)
+{
+ VERBOSE(LIR) << "* StridedSlice" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(StridedSliceNode::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SubNode &node)
+{
+ VERBOSE(LIR) << "* Sub" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SubNode::Input::LHS).value() << ", "
+ << node.getInputs().at(SubNode::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const TanhNode &node)
+{
+ VERBOSE(LIR) << "* TanH" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(TanhNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const TopKV2Node &node)
+{
+ VERBOSE(LIR) << "* TopKV2" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(TopKV2Node::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Outputs : Values("
+ << node.getOutputs().at(TopKV2Node::Output::OUTPUT_VALUES).value() << ") Indices("
+ << node.getOutputs().at(TopKV2Node::Output::OUTPUT_INDICES).value() << ")"
+ << std::endl;
+}
+
+void Dumper::visit(const TransposeConvNode &node)
+{
+ std::string padding_type =
+ node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Output Shape("
+ << node.getInputs().at(TransposeConvNode::Input::OUTPUT_SHAPE).value() << ") KERNEL("
+ << node.getInputs().at(TransposeConvNode::Input::KERNEL).value() << ") IFM("
+ << node.getInputs().at(TransposeConvNode::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const TransposeNode &node)
+{
+ VERBOSE(LIR) << "* Transpose" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(TransposeNode::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const model::operation::UnpackNode &node)
+{
+ VERBOSE(LIR) << "* Unpack" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(UnpackNode::Input::INPUT).value()
+ << ")" << std::endl;
+ std::string outputs;
+ const auto &output_indices = node.getOutputs();
+ for (auto it = std::begin(output_indices); it != std::end(output_indices); ++it)
+ {
+ outputs += std::to_string(it->value());
+ if (std::next(it) != std::end(output_indices))
+ outputs += ", ";
+ }
+ VERBOSE(LIR) << " - Outputs : Outputs(" << outputs << ")" << std::endl;
+}
+
+} // namespace dumper
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/dumper/Dumper.h b/runtimes/neurun/core/src/graph/dumper/Dumper.h
new file mode 100644
index 000000000..882108ad7
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/dumper/Dumper.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_DUMPER_H__
+#define __NEURUN_GRAPH_DUMPER_H__
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace dumper
+{
+
+class Dumper : public model::OperationVisitor
+{
+public:
+ Dumper() = default;
+
+public:
+ void visit(const model::operation::AbsNode &) override;
+ void visit(const model::operation::AddNode &node) override;
+ void visit(const model::operation::ArgMaxNode &) override;
+ void visit(const model::operation::AvgPool2DNode &node) override;
+ void visit(const model::operation::CastNode &) override;
+ void visit(const model::operation::ComparisonNode &) override;
+ void visit(const model::operation::ConcatNode &node) override;
+ void visit(const model::operation::Conv2DNode &node) override;
+ void visit(const model::operation::DepthToSpaceNode &) override;
+ void visit(const model::operation::DepthwiseConv2DNode &node) override;
+ void visit(const model::operation::DequantizeNode &) override;
+ void visit(const model::operation::DivNode &) override;
+ void visit(const model::operation::EmbeddingLookupNode &) override;
+ void visit(const model::operation::ExpNode &) override;
+ void visit(const model::operation::FloorNode &) override;
+ void visit(const model::operation::FullyConnectedNode &node) override;
+ void visit(const model::operation::GatherNode &) override;
+ void visit(const model::operation::HashtableLookupNode &) override;
+ void visit(const model::operation::L2NormalizationNode &) override;
+ void visit(const model::operation::L2Pool2DNode &) override;
+ void visit(const model::operation::LocalResponseNormalizationNode &) override;
+ void visit(const model::operation::LogicalAndNode &) override;
+ void visit(const model::operation::LogicalNotNode &) override;
+ void visit(const model::operation::LogicalOrNode &) override;
+ void visit(const model::operation::LogisticNode &) override;
+ void visit(const model::operation::LSTMNode &) override;
+ void visit(const model::operation::MaxPool2DNode &node) override;
+ void visit(const model::operation::MeanNode &) override;
+ void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::NegNode &) override;
+ void visit(const model::operation::PermuteNode &node) override;
+ void visit(const model::operation::PReLUNode &) override;
+ void visit(const model::operation::ReduceMaxNode &) override;
+ void visit(const model::operation::ReduceMinNode &) override;
+ void visit(const model::operation::ReduceSumNode &) override;
+ void visit(const model::operation::ReLUNode &) override;
+ void visit(const model::operation::ReLU1Node &) override;
+ void visit(const model::operation::ReLU6Node &) override;
+ void visit(const model::operation::ReshapeNode &node) override;
+ void visit(const model::operation::ResizeBilinearNode &) override;
+ void visit(const model::operation::RNNNode &) override;
+ void visit(const model::operation::RSQRTNode &) override;
+ void visit(const model::operation::SoftmaxNode &node) override;
+ void visit(const model::operation::SpaceToDepthNode &) override;
+ void visit(const model::operation::SplitNode &) override;
+ void visit(const model::operation::SQRTNode &) override;
+ void visit(const model::operation::SquaredDifferenceNode &) override;
+ void visit(const model::operation::SqueezeNode &) override;
+ void visit(const model::operation::StridedSliceNode &) override;
+ void visit(const model::operation::SubNode &) override;
+ void visit(const model::operation::TanhNode &) override;
+ void visit(const model::operation::TopKV2Node &) override;
+ void visit(const model::operation::TransposeConvNode &) override;
+ void visit(const model::operation::TransposeNode &) override;
+ void visit(const model::operation::UnpackNode &) override;
+};
+
+} // namespace dumper
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_DUMPER_H__
diff --git a/runtimes/neurun/core/src/graph/operand/LowerInfo.cc b/runtimes/neurun/core/src/graph/operand/LowerInfo.cc
new file mode 100644
index 000000000..e8a4fe553
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/operand/LowerInfo.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph/operand/LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/operand/Shape4DConvert.h b/runtimes/neurun/core/src/graph/operand/Shape4DConvert.h
new file mode 100644
index 000000000..9b8d44e1f
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/operand/Shape4DConvert.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+#define __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+
+#include "graph/operand/LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+inline LowerInfo::Shape4D asShape4D(const model::Shape &shape)
+{
+ switch (shape.rank())
+ {
+ case 0u:
+ return LowerInfo::Shape4D(1, 1, 1, 1);
+
+ case 1u:
+ return LowerInfo::Shape4D(1, 1, 1, shape.dim(0));
+
+ case 2u:
+ return LowerInfo::Shape4D(1, 1, shape.dim(0), shape.dim(1));
+
+ case 3u:
+ return LowerInfo::Shape4D(1, shape.dim(0), shape.dim(1), shape.dim(2));
+
+ case 4u:
+ return LowerInfo::Shape4D(shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3));
+
+ default:
+ throw "Unsupported rank > 4";
+ }
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
diff --git a/runtimes/neurun/core/src/graph/operation/LowerInfo.cc b/runtimes/neurun/core/src/graph/operation/LowerInfo.cc
new file mode 100644
index 000000000..507dcc7d0
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/operation/LowerInfo.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph/operation/LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+LowerInfo::LowerInfo(const backend::Backend *backend, model::Layout layout)
+ : _permute_factor{backend, layout}
+{
+ // DO NOTHING
+}
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/OperandPass.cc b/runtimes/neurun/core/src/graph/pass/OperandPass.cc
new file mode 100644
index 000000000..237833cf4
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/OperandPass.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperandPass.h"
+
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+void OperandPass::run()
+{
+ _graph.operands().iterate(
+ [&](const model::OperandIndex &index, model::Operand &object) { callback(index, object); });
+}
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/OperandPass.h b/runtimes/neurun/core/src/graph/pass/OperandPass.h
new file mode 100644
index 000000000..4b25929c5
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/OperandPass.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
+#define __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
+
+#include "Pass.h"
+#include "model/Index.h"
+
+namespace neurun
+{
+namespace model
+{
+class Operand;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+class OperandPass : public Pass
+{
+public:
+ using Pass::Pass;
+
+public:
+ std::string id() override = 0;
+ void run() override final;
+ virtual void callback(const model::OperandIndex &i, model::Operand &o) = 0;
+};
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
diff --git a/runtimes/neurun/core/src/graph/pass/OperationPass.cc b/runtimes/neurun/core/src/graph/pass/OperationPass.cc
new file mode 100644
index 000000000..8e6709873
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/OperationPass.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationPass.h"
+
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+void OperationPass::run()
+{
+ _graph.operations().iterate(
+ [&](const model::OperationIndex &index, model::Operation &node) { callback(index, node); });
+}
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/OperationPass.h b/runtimes/neurun/core/src/graph/pass/OperationPass.h
new file mode 100644
index 000000000..ac6a85345
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/OperationPass.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OperationPass.h
+ * @brief This file contains OperationPass class
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
+#define __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
+
+#include "Pass.h"
+
+#include "model/Index.h"
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+/**
+ * @brief Class to iterate over operations and calls callback() method
+ */
+class OperationPass : public Pass
+{
+public:
+ using Pass::Pass;
+
+public:
+ /**
+ * @brief Returns string id for this pass. Same with class name.
+ *
+ * @return string id
+ */
+ std::string id() override = 0;
+
+ /**
+ * @brief Run the pass
+ */
+ void run() override final;
+
+ /**
+ * @brief The function that will be executed for each operations
+ *
+ * @param i[in] Index of the operation node
+ * @param n[in] The operation node
+ */
+ virtual void callback(const model::OperationIndex &i, model::Operation &n) = 0;
+};
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
diff --git a/runtimes/neurun/core/src/graph/pass/Pass.cc b/runtimes/neurun/core/src/graph/pass/Pass.cc
new file mode 100644
index 000000000..4c3436961
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/Pass.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pass.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/Pass.h b/runtimes/neurun/core/src/graph/pass/Pass.h
new file mode 100644
index 000000000..4200936d1
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/Pass.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PASS_H__
+#define __NEURUN_GRAPH_PASS_PASS_H__
+
+#include <string>
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+class Pass
+{
+public:
+ Pass(Graph &graph) : _graph{graph} {}
+ virtual ~Pass() = default;
+
+public:
+ virtual std::string id() = 0;
+ virtual void run() = 0;
+
+protected:
+ Graph &_graph;
+};
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PASS_H__
diff --git a/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.cc b/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.cc
new file mode 100644
index 000000000..1fc9b69cf
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PermutationEliminationPass.h"
+
+#include "model/Operand.h"
+#include "graph/operand/LowerInfo.h"
+#include "graph/Graph.h"
+#include "backend/IConfig.h"
+#include "util/logging.h"
+#include "compiler/BackendResolver.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+void PermutationEliminationPass::callback(const model::OperandIndex &inp_index,
+ model::Operand &object)
+{
+ if (_graph.getInputs().contains(inp_index))
+ {
+ eliminateInput(inp_index, object);
+ }
+ else if (_graph.getOutputs().contains(inp_index))
+ {
+ eliminateOutput(inp_index, object);
+ }
+}
+
+void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_index,
+ model::Operand &object)
+{
+ auto &model_inputs = _graph.getInputs();
+
+ // get uses of the model's given input
+ auto uses = object.getUses();
+
+ // input must be used just by permutation
+ if (uses.size() != 1)
+ {
+ return;
+ }
+
+ for (auto input_use : uses.list())
+ {
+ auto &perm_operation = _graph.operations().at(input_use);
+ auto perm_inputs = perm_operation.getInputs();
+
+ auto perm_outputs = perm_operation.getOutputs();
+
+ if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, true))
+ {
+ return;
+ }
+
+ assert(perm_inputs.at(0) == inp_index);
+
+ VERBOSE(PermutationEliminationPass::EliminateInput) << "remove NHWC_TO_NCHW permutation\n";
+
+ // set model's new input, which was output of permutation
+ model_inputs.replace(inp_index, perm_outputs.at(0));
+
+ // remove model's input, which is also input of permutation
+ _graph.removeOperand(inp_index);
+
+ // remove permutation operation
+ assert(_graph.subgraphs().containsOperation(input_use));
+ auto subg_idx = _graph.subgraphs().getOperation(input_use);
+ _graph.subgraphs().remove(subg_idx);
+ _graph.operations().remove(input_use);
+
+ VERBOSE(PermutationEliminationPass::EliminateInput)
+ << inp_index.value() << " is model's input and is removed. New input is "
+ << perm_outputs.at(0).value() << "\n"
+ << input_use.value() << " is removed permutation operation\n";
+ }
+}
+
+void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_index,
+ model::Operand &object)
+{
+ auto &model_outputs = _graph.getOutputs();
+
+ // get defs of the model's given output
+ auto defs = object.getDef();
+
+ // output must use just permutation
+ if (defs.size() != 1)
+ {
+ return;
+ }
+
+ for (auto output_def : defs.list())
+ {
+ auto &perm_operation = _graph.operations().at(output_def);
+ auto perm_outputs = perm_operation.getOutputs();
+
+ auto perm_inputs = perm_operation.getInputs();
+ if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, false))
+ {
+ return;
+ }
+
+ assert(perm_outputs.at(0) == out_index);
+
+ VERBOSE(PermutationEliminationPass::EliminateOutput) << "remove NCHW_TO_NHWC permutation\n";
+
+ // Update operations' output that is used by permute operand
+ for (auto perm_input_index : perm_inputs)
+ {
+ auto &perm_input_operand = _graph.operands().at(perm_input_index);
+ perm_input_operand.removeUse(output_def);
+ }
+
+ // set model's new output, which was input of permutation
+ model_outputs.replace(out_index, perm_inputs.at(0));
+
+ // remove model's output, which is also output of permutation
+ _graph.removeOperand(out_index);
+
+ // remove permutation operation
+ assert(_graph.subgraphs().containsOperation(output_def));
+ auto subg_idx = _graph.subgraphs().getOperation(output_def);
+ _graph.subgraphs().remove(subg_idx);
+ _graph.operations().remove(output_def);
+
+ VERBOSE(PermutationEliminationPass::EliminateOutput)
+ << out_index.value() << " is model's output and is removed. New output is "
+ << perm_inputs.at(0).value() << "\n"
+ << output_def.value() << " is removed permutation operation\n";
+ }
+}
+
+bool PermutationEliminationPass::isPermuteLayerToEliminate(
+ const model::OperandIndexSequence &inp_indexes, const model::OperandIndexSequence &out_indexes,
+ bool is_for_model_input)
+{
+ auto input_def_factors = _graph.getLowerInfo(inp_indexes.at(0))->def_factors();
+ auto output_def_factors = _graph.getLowerInfo(out_indexes.at(0))->def_factors();
+
+ auto input_layout = input_def_factors.getOnlyElement().layout();
+ auto output_layout = output_def_factors.getOnlyElement().layout();
+
+ if (input_def_factors.size() != 1 || output_def_factors.size() != 1)
+ {
+ return false;
+ }
+
+ // all operands' factor must be the same
+ for (auto index : inp_indexes)
+ {
+ auto op_factor_set = _graph.getLowerInfo(index)->def_factors();
+ if (op_factor_set.size() != 1 ||
+ input_layout != _graph.getLowerInfo(index)->def_factors().getOnlyElement().layout())
+ {
+ return false;
+ }
+ }
+ // all operands' factor must be the same
+ for (auto index : out_indexes)
+ {
+ auto op_factor_set = _graph.getLowerInfo(index)->def_factors();
+ if (op_factor_set.size() != 1 ||
+ output_layout != _graph.getLowerInfo(index)->def_factors().getOnlyElement().layout())
+ {
+ return false;
+ }
+ }
+
+ if (is_for_model_input)
+ {
+ // check if this is NHWC_TO_NCHW permutation: must have single input, which is model's input
+ return (inp_indexes.size() == 1 && input_layout == model::Layout::NHWC &&
+ output_layout == model::Layout::NCHW);
+ }
+
+ // check if this is NCHW_TO_NHWC permutation: must have single output, which is model's output
+ return (out_indexes.size() == 1 && input_layout == model::Layout::NCHW &&
+ output_layout == model::Layout::NHWC);
+}
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.h b/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.h
new file mode 100644
index 000000000..332eeb6f4
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/PermutationEliminationPass.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
+#define __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
+
+#include "OperandPass.h"
+#include "model/Operand.h"
+#include "model/OperandIndexSequence.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+class PermutationEliminationPass : public OperandPass
+{
+public:
+ using OperandPass::OperandPass;
+
+public:
+ std::string id() override { return "PermutationEliminationPass"; }
+
+ void callback(const model::OperandIndex &index, model::Operand &object) override;
+
+private:
+ /**
+ * @brief Remove Permute operation that permutates input
+ *
+ * Note: This function aslo removes model's input and
+ * sets output of permutation as model's new input
+ *
+ * @param inp_index is the target operand index for the elimination
+ * @param object is the target operand object for the elimination
+ *
+ * @return
+ */
+ void eliminateInput(const model::OperandIndex &inp_index, model::Operand &object);
+
+ /**
+ * @brief Remove Permute operation that permutates output of a model
+ *
+ * Note: This function aslo removes model's output and
+ * sets input of permutation as model's new output
+ *
+ * @param out_index is the target operand index for the elimination
+ * @param object is the target operand object for the elimination
+ *
+ * @return
+ */
+ void eliminateOutput(const model::OperandIndex &out_index, model::Operand &object);
+
+ /**
+ * @brief Determine if passed operands are permute layer's input and output, that must be
+ * eliminated
+ *
+ * @param inp_index indexes of the input operand to operation
+ * @param out_index indexes of the output operand to operation
+ * @param is_for_model_input checking for model's input or output
+ *
+ * @return if it is permutation layer
+ */
+ bool isPermuteLayerToEliminate(const model::OperandIndexSequence &inp_indexes,
+ const model::OperandIndexSequence &out_indexes,
+ bool is_for_model_input);
+};
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
diff --git a/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc b/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc
new file mode 100644
index 000000000..0f07b47fe
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PermutationInsertionPass.h"
+
+#include <cassert>
+#include <utility>
+#include <unordered_map>
+
+#include "model/Operand.h"
+#include "graph/operation/LowerInfo.h"
+#include "graph/Graph.h"
+#include "backend/IConfig.h"
+#include "util/logging.h"
+#include "cpp14/memory.h"
+#include "model/operation/PermuteNode.h"
+#include "graph/operand/Shape4DConvert.h"
+#include "compiler/BackendResolver.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+void PermutationInsertionPass::callback(const model::OperandIndex &index, model::Operand &object)
+{
+ auto &&operand_li = _graph.getLowerInfo(index);
+ assert(operand_li);
+
+ // NOTE Later, constants also will have Def
+ // Ignore constants
+ if (operand_li->def_factors().size() == 0)
+ {
+ return;
+ }
+
+ std::list<model::OperationIndex> permute_indexes;
+
+ // Build a map for all necessary type of operands
+ std::unordered_map<operand::PermuteFactor, model::OperandIndex> factor_to_index;
+ {
+ assert(operand_li->def_factors().size() == 1);
+ for (auto factor : operand_li->def_factors())
+ {
+ factor_to_index.emplace(factor, index);
+ }
+
+ auto insert_set = operand_li->use_factors() - operand_li->def_factors();
+ for (auto factor : insert_set)
+ {
+ const auto permute_operation_index = insertPermute(index, factor);
+ permute_indexes.push_back(permute_operation_index);
+ VERBOSE(PermutationInsertionPass) << "Insert 'Permute' operation for operand "
+ << index.value() << std::endl;
+ const auto &permute_operation = _graph.operations().at(permute_operation_index);
+ const auto permuted_operand_index = permute_operation.getOutputs().at(0);
+ factor_to_index.emplace(factor, permuted_operand_index);
+ }
+ }
+
+ // Update operations' input that uses this operand
+ {
+ std::list<model::OperationIndex> remove_list;
+
+ auto uses = object.getUses();
+ for (auto use : uses.list())
+ {
+ // If permute operation, ignore it
+ if (std::find(permute_indexes.begin(), permute_indexes.end(), use) != permute_indexes.end())
+ continue;
+
+ auto &operation = _graph.operations().at(use);
+ assert(_graph.subgraphs().containsOperation(use));
+ auto subg_index = _graph.subgraphs().getOperation(use);
+ auto subg_li = _graph.getLowerInfo(subg_index);
+ assert(subg_li);
+ const auto subg_layout = subg_li->layout();
+ const backend::Backend *backend = subg_li->backend();
+ assert(backend);
+ auto use_node_inputs = operation.getInputs();
+ assert(use_node_inputs.contains(index));
+
+ auto new_index = factor_to_index.at({backend, subg_layout});
+ if (index != new_index)
+ {
+ // Update from subgraph
+ _graph.subgraphs().at(subg_index).replaceInput(index, new_index);
+
+ // Update from operation
+ operation.replaceInput(index, new_index);
+
+ // Update from operand
+ remove_list.push_back(
+ use); // Removal should be done in another loop since we are in the loop
+ _graph.operands().at(new_index).appendUse(use);
+ }
+ }
+
+ for (auto &operation : remove_list)
+ {
+ object.removeUse(operation);
+ }
+ }
+}
+
+model::OperationIndex
+PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index,
+ const operand::PermuteFactor &factor)
+{
+ assert(!_graph.isBuildingPhase());
+
+ auto &operand = _graph.operands().at(operand_index);
+
+ // Generate output operand and permute operation
+ auto out_operand_index = _graph.addOperand(operand.shape(), operand.typeInfo());
+ // change model output if operand_index is model output index
+ auto &model_outputs = _graph.getOutputs();
+ if (model_outputs.contains(operand_index))
+ {
+ model_outputs.replace(operand_index, out_operand_index);
+ }
+
+ // Find PermuteNode information
+ auto input_backend = _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().backend();
+ auto output_backend = factor.backend();
+ // NOTE PermuteNode may not have specific layout because the layout of input and output may be
+ // different.
+ const auto permute_node_layout = model::Layout::UNKNOWN;
+ const auto permute_node_backend = backend::BackendManager::instance().getDefault();
+ const operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout};
+
+ // Update LowerInfo of input operand
+ auto operand_lower_info = _graph.getLowerInfo(operand_index);
+ operand_lower_info->removeUsePermuteFactor(factor);
+ operand_lower_info->addUsePermuteFactor(permute_node_factor);
+
+ // Update LowerInfo of output operand
+ auto out_operand_li =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
+
+ // The input and output factors of all nodes will be the same except PermuteNode. So Tensor's
+ // allocators allocates memory using only the information of def permutation factor now.
+ // TODO Change param to permute_node_factor
+ out_operand_li->addDefPermuteFactor(factor);
+ out_operand_li->addUsePermuteFactor(factor);
+ _graph.setLowerInfo(out_operand_index, std::move(out_operand_li));
+
+ auto input_backend_ctx = _graph.backend_resolver()->getBackendContext(input_backend);
+ auto output_backend_ctx = _graph.backend_resolver()->getBackendContext(output_backend);
+
+ // Insert permute operation to the graph
+ const auto input_layout =
+ _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().layout();
+ const auto output_layout = factor.layout();
+ using PermuteNode = model::operation::PermuteNode;
+ const auto permute_type = [&]() {
+ if (input_layout == model::Layout::NHWC && output_layout == model::Layout::NCHW)
+ {
+ return PermuteNode::Type::NHWC_TO_NCHW;
+ }
+ else if (input_layout == model::Layout::NCHW && output_layout == model::Layout::NHWC)
+ {
+ return PermuteNode::Type::NCHW_TO_NHWC;
+ }
+ else
+ {
+ return PermuteNode::Type::COPY;
+ }
+ }();
+ auto insert_node = nnfw::cpp14::make_unique<PermuteNode>(
+ operand_index, out_operand_index, input_backend_ctx, output_backend_ctx, permute_type);
+
+ auto node_index = _graph.operations().push(std::move(insert_node));
+ const auto &node = _graph.operations().at(node_index);
+
+ // Subgraph
+ {
+ auto subg_index = _graph.subgraphs().emplace(node_index, node, permute_node_layout);
+ auto &subg = _graph.subgraphs().at(subg_index);
+ subg.setInputs(node.getInputs());
+ subg.setOutputs(node.getOutputs());
+ _graph.setLowerInfo(subg_index, nnfw::cpp14::make_unique<graph::operation::LowerInfo>(
+ permute_node_backend, permute_node_layout));
+ }
+
+ // Update Use/Def info
+ {
+ _graph.operands().at(operand_index).appendUse(node_index);
+ _graph.operands().at(out_operand_index).appendDef(node_index);
+ }
+ return node_index;
+}
+} // namespace pass
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.h b/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.h
new file mode 100644
index 000000000..b430be8b3
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
+#define __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
+
+#include "OperandPass.h"
+#include "model/Operand.h" //for model::OperationIndex
+#include "backend/BackendManager.h"
+#include "graph/operand/PermuteFactor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace pass
+{
+
+class PermutationInsertionPass : public OperandPass
+{
+public:
+ using OperandPass::OperandPass;
+
+public:
+ std::string id() override { return "PermutationInsertionPass"; }
+ void callback(const model::OperandIndex &index, model::Operand &object) override;
+
+ /**
+ * @brief Insert Permute operation that has given operand as input
+ *
+ * @param operand_index is the target operand index for the insertion
+ * @param factor is the output operand's backend type and layout
+ *
+ * @return model::OperationIndex
+ */
+ model::OperationIndex insertPermute(const model::OperandIndex &operand_index,
+ const operand::PermuteFactor &factor);
+
+private:
+};
+
+} // namespace pass
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
diff --git a/runtimes/neurun/core/src/graph/verifier/Verifier.cc b/runtimes/neurun/core/src/graph/verifier/Verifier.cc
new file mode 100644
index 000000000..46e2ead55
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/verifier/Verifier.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Verifier.h"
+
+#include "graph/Graph.h"
+#include "model/OperationIndexMap.h"
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+//
+// DAGChecker
+//
+
+bool DAGChecker::verify(const Graph &graph) const
+{
+ auto &operations = graph.operations();
+ bool cyclic = false;
+
+ model::OperationIndexMap<bool> visited;
+ operations.iterate([&](const model::OperationIndex &index, const model::Operation &) {
+ visited[index] = false;
+ });
+ model::OperationIndexMap<bool> on_stack = visited; // Copy from visited
+
+ std::function<void(const model::OperationIndex &index, const model::Operation &)> dfs_recursive =
+ [&](const model::OperationIndex &index, const model::Operation &node) -> void {
+ if (on_stack[index])
+ cyclic = true;
+ if (visited[index])
+ return;
+ visited[index] = true;
+ on_stack[index] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph.operands().at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph.operations().at(use));
+ }
+ }
+
+ on_stack[index] = false;
+ };
+
+ operations.iterate(dfs_recursive);
+
+ return !cyclic;
+}
+
+//
+// EdgeConsistencyVerifier
+//
+
+bool EdgeConsistencyChecker::verify(const Graph &graph) const
+{
+ auto &operations = graph.operations();
+ uint32_t mismatches = 0;
+ operations.iterate([&](const model::OperationIndex &index, const model::Operation &node) {
+ for (auto operand_index : node.getInputs())
+ {
+ auto &operand = graph.operands().at(operand_index);
+ mismatches += (operand.getUses().contains(index) ? 0 : 1);
+ }
+ for (auto operand_index : node.getOutputs())
+ {
+ auto &operand = graph.operands().at(operand_index);
+ mismatches += (operand.getDef().contains(index) ? 0 : 1);
+ }
+ });
+ return mismatches == 0;
+}
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/graph/verifier/Verifier.h b/runtimes/neurun/core/src/graph/verifier/Verifier.h
new file mode 100644
index 000000000..ebd908832
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/verifier/Verifier.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_VERIFIER_VERIFIER_H__
+#define __NEURUN_GRAPH_VERIFIER_VERIFIER_H__
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+struct IVerifier
+{
+ virtual ~IVerifier() = default;
+ virtual bool verify(const Graph &graph) const = 0;
+};
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+class DAGChecker : public IVerifier
+{
+public:
+ bool verify(const Graph &graph) const override;
+};
+
+class EdgeConsistencyChecker : public IVerifier
+{
+public:
+ bool verify(const Graph &graph) const override;
+};
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_VERIFIER_VERIFIER_H__
diff --git a/runtimes/neurun/core/src/library_info.cc b/runtimes/neurun/core/src/library_info.cc
new file mode 100644
index 000000000..601d09185
--- /dev/null
+++ b/runtimes/neurun/core/src/library_info.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+volatile const char info[] = "library information : runtime=neurun";
diff --git a/runtimes/neurun/core/src/model/LayoutSet.cc b/runtimes/neurun/core/src/model/LayoutSet.cc
new file mode 100644
index 000000000..fec6138a7
--- /dev/null
+++ b/runtimes/neurun/core/src/model/LayoutSet.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LayoutSet.h"
+
+namespace neurun
+{
+namespace model
+{
+
+LayoutSet::LayoutSet(std::initializer_list<Layout> layouts)
+{
+ for (auto layout : layouts)
+ {
+ _set.insert(layout);
+ }
+}
+
+LayoutSet LayoutSet::operator|(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.add(layout);
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator&(const LayoutSet &other) const
+{
+ LayoutSet ret;
+ for (auto layout : other)
+ {
+ if (contains(layout))
+ {
+ ret.add(layout);
+ }
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator-(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.remove(layout);
+ }
+ return ret;
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/LayoutSet.h b/runtimes/neurun/core/src/model/LayoutSet.h
new file mode 100644
index 000000000..be75c8ee5
--- /dev/null
+++ b/runtimes/neurun/core/src/model/LayoutSet.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
+#define __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
+
+#include <initializer_list>
+#include <unordered_set>
+
+#include "model/Layout.h"
+
+namespace neurun
+{
+namespace model
+{
+
+class LayoutSet
+{
+public:
+ LayoutSet() = default;
+ LayoutSet(std::initializer_list<Layout> layouts);
+
+public:
+ void add(const Layout &layout) { _set.insert(layout); }
+ void remove(const Layout &layout) { _set.erase(layout); }
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ bool contains(const Layout &layout) const { return _set.find(layout) != _set.end(); }
+
+public:
+ LayoutSet operator|(const LayoutSet &other) const; // Union
+ LayoutSet operator&(const LayoutSet &other) const; // Intersect
+ LayoutSet operator-(const LayoutSet &other) const; // Minus
+
+public:
+ std::unordered_set<Layout>::const_iterator begin() const { return _set.begin(); }
+ std::unordered_set<Layout>::const_iterator end() const { return _set.end(); }
+
+private:
+ std::unordered_set<Layout> _set;
+};
+
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
diff --git a/runtimes/neurun/core/src/model/Operand.cc b/runtimes/neurun/core/src/model/Operand.cc
new file mode 100644
index 000000000..4d72fac8c
--- /dev/null
+++ b/runtimes/neurun/core/src/model/Operand.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Operand.h"
+
+namespace neurun
+{
+namespace model
+{
+
+size_t Operand::operandSize(void) const
+{
+ const uint32_t ranks = shape().rank();
+ int32_t elements = 1;
+
+ for (uint32_t rank = 0; rank < ranks; rank++)
+ {
+ elements *= shape().dim(rank);
+ }
+
+ DataType type = typeInfo().type();
+ size_t element_size = sizeOfDataType(type);
+
+ // Value of type is matched with OperandCode enum in NeuralNetworks.h
+ return element_size * elements;
+}
+
+void Operand::appendUse(const ::neurun::model::OperationIndex &idx)
+{
+ assert(!_uses.contains(idx));
+
+ _uses.append(idx);
+}
+
+void Operand::removeUse(const ::neurun::model::OperationIndex &idx)
+{
+ assert(_uses.contains(idx));
+
+ _uses.remove(idx);
+}
+
+void Operand::appendDef(const ::neurun::model::OperationIndex &idx)
+{
+ assert(!isConstant());
+ assert(_def.size() == 0);
+
+ _def.append(idx);
+}
+
+void Operand::removeDef(const ::neurun::model::OperationIndex &idx)
+{
+ assert(_def.contains(idx));
+
+ _def.remove(idx);
+}
+
+void Operand::parent_info(std::unique_ptr<graph::operand::ParentInfo> &&parent_info)
+{
+ _parent_info = std::move(parent_info);
+}
+
+const graph::operand::ParentInfo *Operand::parent_info() const { return _parent_info.get(); }
+
+graph::operand::ParentInfo *Operand::parent_info() { return _parent_info.get(); }
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/OperandConstraint.cc b/runtimes/neurun/core/src/model/OperandConstraint.cc
new file mode 100644
index 000000000..2730f712a
--- /dev/null
+++ b/runtimes/neurun/core/src/model/OperandConstraint.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/OperandConstraint.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/OperandIndexSequence.cc b/runtimes/neurun/core/src/model/OperandIndexSequence.cc
new file mode 100644
index 000000000..a9454df24
--- /dev/null
+++ b/runtimes/neurun/core/src/model/OperandIndexSequence.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/OperandIndexSequence.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace model
+{
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<OperandIndex> list) : _set(list)
+{
+ // DO NOTHING
+}
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<int32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(static_cast<uint32_t>(val));
+ }
+}
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<uint32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(val);
+ }
+}
+
+bool OperandIndexSequence::contains(const OperandIndex &index) const
+{
+ return std::find(_set.begin(), _set.end(), index) != _set.end();
+}
+
+void OperandIndexSequence::replace(const OperandIndex &from, const OperandIndex &to)
+{
+ std::replace(_set.begin(), _set.end(), from, to);
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/Operation.cc b/runtimes/neurun/core/src/model/Operation.cc
new file mode 100644
index 000000000..fc1bd599e
--- /dev/null
+++ b/runtimes/neurun/core/src/model/Operation.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Operation.h"
+
+#include <cassert>
+
+#include "graph/operation/LowerInfo.h"
+
+namespace neurun
+{
+namespace model
+{
+
+Operation::Operation(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : _input_constr{input_constr}, _inputs{inputs}, _outputs{outputs}
+{
+}
+
+Operation::Operation(OperandConstraint input_constr) : _input_constr{input_constr} {}
+
+Operation::~Operation() = default;
+
+void Operation::setInputs(const OperandIndexSequence &indexes)
+{
+ assert(_input_constr.check(indexes.size()));
+ _inputs = indexes;
+}
+
+void Operation::setOutputs(const OperandIndexSequence &indexes) { _outputs = indexes; }
+
+void Operation::replaceInput(const OperandIndex &from, const OperandIndex &to)
+{
+ _inputs.replace(from, to);
+}
+
+void Operation::replaceOutput(const OperandIndex &from, const OperandIndex &to)
+{
+ _outputs.replace(from, to);
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/OperationIndexList.cc b/runtimes/neurun/core/src/model/OperationIndexList.cc
new file mode 100644
index 000000000..e2c077ed4
--- /dev/null
+++ b/runtimes/neurun/core/src/model/OperationIndexList.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/OperationIndexList.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace model
+{
+
+OperationIndexList::OperationIndexList(std::initializer_list<OperationIndex> list) : _list(list)
+{
+ // DO NOTHING
+}
+
+bool OperationIndexList::contains(const ::neurun::model::OperationIndex &index) const
+{
+ return std::find(_list.begin(), _list.end(), index) != _list.end();
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/Shape.cc b/runtimes/neurun/core/src/model/Shape.cc
new file mode 100644
index 000000000..b7f7bff68
--- /dev/null
+++ b/runtimes/neurun/core/src/model/Shape.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Shape.h"
+#include "util/Utils.h"
+
+#include <cassert>
+#include <functional>
+#include <numeric>
+
+namespace neurun
+{
+namespace model
+{
+
+FeatureShape Shape::asFeature(Layout layout) const
+{
+ assert(rank() == 4);
+ assert(layout == Layout::NHWC || layout == Layout::NCHW);
+
+ if (layout == Layout::NHWC)
+ {
+ // Feature Map in NHWC layout
+ // - Dimension(0) -> Batch
+ // - Dimension(1) -> Height
+ // - Dimension(2) -> Width
+ // - Dimension(3) -> Depth
+ const auto batch = dim(0);
+ const auto depth = dim(3);
+ const auto height = dim(1);
+ const auto width = dim(2);
+
+ return {batch, depth, height, width};
+ }
+ else if (layout == Layout::NCHW)
+ {
+ // Feature Map in NHWC layout
+ // - Dimension(0) -> Batch
+ // - Dimension(1) -> Depth
+ // - Dimension(2) -> Height
+ // - Dimension(3) -> Width
+ const auto batch = dim(0);
+ const auto depth = dim(1);
+ const auto height = dim(2);
+ const auto width = dim(3);
+
+ return {batch, depth, height, width};
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+}
+
+// Extended dimension is filled with 1.
+void Shape::extendRank(int to_rank)
+{
+ assert(to_rank - rank() >= 0);
+ _dimensions.insert(_dimensions.cbegin(), to_rank - rank(), 1);
+}
+
+uint64_t Shape::num_elements() const
+{
+ // All of the nodes must have non-negative dimension
+ assert(std::all_of(_dimensions.begin(), _dimensions.end(),
+ [](const int32_t &v) { return (v >= 0); }));
+
+ return std::accumulate(_dimensions.cbegin(), _dimensions.cend(), UINT64_C(1),
+ std::multiplies<uint64_t>());
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/Subgraph.cc b/runtimes/neurun/core/src/model/Subgraph.cc
new file mode 100644
index 000000000..4b8402720
--- /dev/null
+++ b/runtimes/neurun/core/src/model/Subgraph.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Subgraph.h"
+#include "model/OperationVisitor.h"
+#include <sstream>
+
+namespace neurun
+{
+namespace model
+{
+
+Subgraph::Subgraph(Layout layout) : Operation{OperandConstraint::createAny()}, _layout{layout}
+{
+ // DO NOTHING
+}
+
+void Subgraph::accept(OperationVisitor &v) const { v.visit(*this); }
+
+// TODO: Impl Dumper instead of this method
+std::string Subgraph::getStr() const
+{
+ // " subgraph IN(xx,xx,xx) -> { op0, op1, op2 } -> OUT(yy,yy,yy)"
+ std::stringstream ss;
+ ss << " subgraph IN(";
+ for (const auto &index : getInputs())
+ {
+ ss << " " << index.value();
+ }
+ ss << " ) -> {";
+ for (const auto &elem : _operations)
+ {
+ ss << " " << elem.index.value() << "(" << elem.node->getName() << ")";
+ }
+ ss << " } -> OUT(";
+ for (const auto &index : getOutputs())
+ {
+ ss << " " << index.value();
+ }
+ ss << " )";
+ return ss.str();
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/Subgraphs.cc b/runtimes/neurun/core/src/model/Subgraphs.cc
new file mode 100644
index 000000000..64d806dfa
--- /dev/null
+++ b/runtimes/neurun/core/src/model/Subgraphs.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/Subgraphs.h"
+#include "util/logging.h"
+#include "cpp14/memory.h"
+
+#include <cassert>
+#include <string>
+
+namespace neurun
+{
+namespace model
+{
+
+SubgraphIndex Subgraphs::emplace(const OperationIndex &index, const Operation &node, Layout layout)
+{
+ std::unique_ptr<Subgraph> subg = nnfw::cpp14::make_unique<model::Subgraph>(layout);
+ subg->appendOperation(index, node);
+ return push(std::move(subg));
+}
+
+SubgraphIndex Subgraphs::emplace(std::unique_ptr<Subgraph> &&subg) { return push(std::move(subg)); }
+
+bool Subgraphs::containsOperation(const OperationIndex &operation_index) const
+{
+ return findOperation(operation_index).valid();
+}
+
+SubgraphIndex Subgraphs::getOperation(const OperationIndex &operation_index) const
+{
+ SubgraphIndex ret = findOperation(operation_index);
+ assert(ret.valid());
+ return ret;
+}
+
+// TODO: Extract this into external helper function
+void Subgraphs::dump(const std::string &msg) const
+{
+ VERBOSE(Subgraphs) << "Subgraphs(" << msg << ")" << std::endl;
+ iterate([&](const SubgraphIndex &idx, const model::Subgraph &subg) {
+ VERBOSE(Subgraphs) << idx.value() << "] " << subg.getStr() << std::endl;
+ });
+}
+
+SubgraphIndex Subgraphs::findOperation(const OperationIndex &operation_index) const
+{
+ SubgraphIndex ret;
+ iterate([&](const SubgraphIndex &index, const Subgraph &object) {
+ for (const auto &elem : object.operations())
+ {
+ if (elem.index == operation_index)
+ ret = index;
+ }
+ });
+ return ret;
+}
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/TypeInfo.cc b/runtimes/neurun/core/src/model/TypeInfo.cc
new file mode 100644
index 000000000..46ac2d4de
--- /dev/null
+++ b/runtimes/neurun/core/src/model/TypeInfo.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/TypeInfo.h"
+
+namespace neurun
+{
+namespace model
+{
+
+bool operator==(const TypeInfo &lhs, const TypeInfo &rhs)
+{
+ if (lhs.type() != rhs.type())
+ {
+ return false;
+ }
+
+ if (lhs.offset() != rhs.offset())
+ {
+ return false;
+ }
+
+ if (lhs.scale() != rhs.scale())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs) { return !(lhs == rhs); }
+
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/AbsNode.cc b/runtimes/neurun/core/src/model/operation/AbsNode.cc
new file mode 100644
index 000000000..dd9566da9
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/AbsNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/AbsNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void AbsNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+AbsNode::AbsNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/AddNode.cc b/runtimes/neurun/core/src/model/operation/AddNode.cc
new file mode 100644
index 000000000..43ad7241f
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/AddNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/AddNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void AddNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+AddNode::AddNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ArgMaxNode.cc b/runtimes/neurun/core/src/model/operation/ArgMaxNode.cc
new file mode 100644
index 000000000..2486f54b0
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ArgMaxNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ArgMaxNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ArgMaxNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ArgMaxNode::ArgMaxNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/AvgPool2DNode.cc b/runtimes/neurun/core/src/model/operation/AvgPool2DNode.cc
new file mode 100644
index 000000000..4c625f973
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/AvgPool2DNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/AvgPool2DNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void AvgPool2DNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+AvgPool2DNode::AvgPool2DNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/CastNode.cc b/runtimes/neurun/core/src/model/operation/CastNode.cc
new file mode 100644
index 000000000..85d11e3d1
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/CastNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/CastNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void CastNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+CastNode::CastNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ComparisonNode.cc b/runtimes/neurun/core/src/model/operation/ComparisonNode.cc
new file mode 100644
index 000000000..598e61969
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ComparisonNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ComparisonNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ComparisonNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ComparisonNode::ComparisonNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ConcatNode.cc b/runtimes/neurun/core/src/model/operation/ConcatNode.cc
new file mode 100644
index 000000000..195952637
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ConcatNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ConcatNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ConcatNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ConcatNode::ConcatNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/Conv2DNode.cc b/runtimes/neurun/core/src/model/operation/Conv2DNode.cc
new file mode 100644
index 000000000..218c5d193
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/Conv2DNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/Conv2DNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void Conv2DNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Conv2DNode::Conv2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/CustomNode.cc b/runtimes/neurun/core/src/model/operation/CustomNode.cc
new file mode 100644
index 000000000..059786218
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/CustomNode.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/CustomNode.h"
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void CustomNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+CustomNode::CustomNode(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, std::string id,
+ const Userdata &userdata)
+ : model::Operation{input_constr, inputs, outputs}, _id(std::move(id)), _userdata(userdata)
+{
+}
+
+const std::string &CustomNode::id() const { return _id; }
+
+const CustomNode::Userdata &CustomNode::userdata() const { return _userdata; }
+
+CustomNode::~CustomNode() { delete[] _userdata.data; }
+
+std::string CustomNode::getName() const { return id(); }
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/DepthToSpaceNode.cc b/runtimes/neurun/core/src/model/operation/DepthToSpaceNode.cc
new file mode 100644
index 000000000..ec3e5433e
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/DepthToSpaceNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/DepthToSpaceNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void DepthToSpaceNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DepthToSpaceNode::DepthToSpaceNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/DepthwiseConv2DNode.cc b/runtimes/neurun/core/src/model/operation/DepthwiseConv2DNode.cc
new file mode 100644
index 000000000..70d107aa7
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/DepthwiseConv2DNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/DepthwiseConv2DNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void DepthwiseConv2DNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DepthwiseConv2DNode::DepthwiseConv2DNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/DequantizeNode.cc b/runtimes/neurun/core/src/model/operation/DequantizeNode.cc
new file mode 100644
index 000000000..634d36b26
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/DequantizeNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/DequantizeNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void DequantizeNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DequantizeNode::DequantizeNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/DivNode.cc b/runtimes/neurun/core/src/model/operation/DivNode.cc
new file mode 100644
index 000000000..814491aa7
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/DivNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/DivNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void DivNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DivNode::DivNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/EmbeddingLookupNode.cc b/runtimes/neurun/core/src/model/operation/EmbeddingLookupNode.cc
new file mode 100644
index 000000000..d49ca19b1
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/EmbeddingLookupNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/EmbeddingLookupNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void EmbeddingLookupNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+EmbeddingLookupNode::EmbeddingLookupNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ExpNode.cc b/runtimes/neurun/core/src/model/operation/ExpNode.cc
new file mode 100644
index 000000000..3f420f8b5
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ExpNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ExpNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ExpNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ExpNode::ExpNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/FloorNode.cc b/runtimes/neurun/core/src/model/operation/FloorNode.cc
new file mode 100644
index 000000000..47b56fbaa
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/FloorNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/FloorNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void FloorNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+FloorNode::FloorNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/FullyConnectedNode.cc b/runtimes/neurun/core/src/model/operation/FullyConnectedNode.cc
new file mode 100644
index 000000000..42f18c72d
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/FullyConnectedNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/FullyConnectedNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void FullyConnectedNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+FullyConnectedNode::FullyConnectedNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/GatherNode.cc b/runtimes/neurun/core/src/model/operation/GatherNode.cc
new file mode 100644
index 000000000..1ecb6f8ec
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/GatherNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/GatherNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void GatherNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+GatherNode::GatherNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/HashtableLookupNode.cc b/runtimes/neurun/core/src/model/operation/HashtableLookupNode.cc
new file mode 100644
index 000000000..d2c144e71
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/HashtableLookupNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/HashtableLookupNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void HashtableLookupNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+HashtableLookupNode::HashtableLookupNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/L2NormalizationNode.cc b/runtimes/neurun/core/src/model/operation/L2NormalizationNode.cc
new file mode 100644
index 000000000..1169785c6
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/L2NormalizationNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/L2NormalizationNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void L2NormalizationNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+L2NormalizationNode::L2NormalizationNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/L2Pool2DNode.cc b/runtimes/neurun/core/src/model/operation/L2Pool2DNode.cc
new file mode 100644
index 000000000..fb53f52c6
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/L2Pool2DNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/L2Pool2DNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void L2Pool2DNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+L2Pool2DNode::L2Pool2DNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LSTMNode.cc b/runtimes/neurun/core/src/model/operation/LSTMNode.cc
new file mode 100644
index 000000000..31443e8ae
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LSTMNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LSTMNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LSTMNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LSTMNode::LSTMNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(23u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LocalResponseNormalizationNode.cc b/runtimes/neurun/core/src/model/operation/LocalResponseNormalizationNode.cc
new file mode 100644
index 000000000..4b1dded76
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LocalResponseNormalizationNode.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LocalResponseNormalizationNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LocalResponseNormalizationNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LocalResponseNormalizationNode::LocalResponseNormalizationNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LogicalAndNode.cc b/runtimes/neurun/core/src/model/operation/LogicalAndNode.cc
new file mode 100644
index 000000000..9e9a3dbbf
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LogicalAndNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogicalAndNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogicalAndNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalAndNode::LogicalAndNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LogicalNotNode.cc b/runtimes/neurun/core/src/model/operation/LogicalNotNode.cc
new file mode 100644
index 000000000..1a3c324a5
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LogicalNotNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogicalNotNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogicalNotNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalNotNode::LogicalNotNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LogicalOrNode.cc b/runtimes/neurun/core/src/model/operation/LogicalOrNode.cc
new file mode 100644
index 000000000..53fa305ae
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LogicalOrNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogicalOrNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogicalOrNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalOrNode::LogicalOrNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/LogisticNode.cc b/runtimes/neurun/core/src/model/operation/LogisticNode.cc
new file mode 100644
index 000000000..358ce6acd
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/LogisticNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogisticNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogisticNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogisticNode::LogisticNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/MaxPool2DNode.cc b/runtimes/neurun/core/src/model/operation/MaxPool2DNode.cc
new file mode 100644
index 000000000..596aa2df9
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/MaxPool2DNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/MaxPool2DNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void MaxPool2DNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+MaxPool2DNode::MaxPool2DNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/MeanNode.cc b/runtimes/neurun/core/src/model/operation/MeanNode.cc
new file mode 100644
index 000000000..22b23b27e
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/MeanNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/MeanNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void MeanNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+MeanNode::MeanNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/MulNode.cc b/runtimes/neurun/core/src/model/operation/MulNode.cc
new file mode 100644
index 000000000..23a66848a
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/MulNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/MulNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void MulNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+MulNode::MulNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/NegNode.cc b/runtimes/neurun/core/src/model/operation/NegNode.cc
new file mode 100644
index 000000000..6f3cf5a0a
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/NegNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/NegNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void NegNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+NegNode::NegNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/PReLUNode.cc b/runtimes/neurun/core/src/model/operation/PReLUNode.cc
new file mode 100644
index 000000000..aa8aecdd5
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/PReLUNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/PReLUNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void PReLUNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+PReLUNode::PReLUNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/PadNode.cc b/runtimes/neurun/core/src/model/operation/PadNode.cc
new file mode 100644
index 000000000..9947c9e71
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/PadNode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/PadNode.h"
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void PadNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+PadNode::PadNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/PermuteNode.cc b/runtimes/neurun/core/src/model/operation/PermuteNode.cc
new file mode 100644
index 000000000..8affca184
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/PermuteNode.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/PermuteNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void PermuteNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+PermuteNode::PermuteNode(const OperandIndex &input, const OperandIndex &output,
+ const backend::BackendContext *input_backend_ctx,
+ const backend::BackendContext *output_backend_ctx, Type type,
+ model::DataType data_type)
+ : model::Operation{OperandConstraint::createExact(1u)},
+ _param{input_backend_ctx, output_backend_ctx}, _type{type}, _dataType{data_type}
+{
+ setInputs({input});
+ setOutputs({output});
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/RNNNode.cc b/runtimes/neurun/core/src/model/operation/RNNNode.cc
new file mode 100644
index 000000000..fa32059d0
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/RNNNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/RNNNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void RNNNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+RNNNode::RNNNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(5u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/RSQRTNode.cc b/runtimes/neurun/core/src/model/operation/RSQRTNode.cc
new file mode 100644
index 000000000..faed11663
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/RSQRTNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/RSQRTNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void RSQRTNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+RSQRTNode::RSQRTNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReLU1Node.cc b/runtimes/neurun/core/src/model/operation/ReLU1Node.cc
new file mode 100644
index 000000000..b1fe14e09
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReLU1Node.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReLU1Node.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReLU1Node::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLU1Node::ReLU1Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReLU6Node.cc b/runtimes/neurun/core/src/model/operation/ReLU6Node.cc
new file mode 100644
index 000000000..de7c35e1a
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReLU6Node.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReLU6Node.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReLU6Node::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLU6Node::ReLU6Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReLUNode.cc b/runtimes/neurun/core/src/model/operation/ReLUNode.cc
new file mode 100644
index 000000000..d79819d7f
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReLUNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReLUNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReLUNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLUNode::ReLUNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReduceMaxNode.cc b/runtimes/neurun/core/src/model/operation/ReduceMaxNode.cc
new file mode 100644
index 000000000..486646ac9
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReduceMaxNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReduceMaxNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReduceMaxNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceMaxNode::ReduceMaxNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReduceMinNode.cc b/runtimes/neurun/core/src/model/operation/ReduceMinNode.cc
new file mode 100644
index 000000000..9f55251c4
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReduceMinNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReduceMinNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReduceMinNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceMinNode::ReduceMinNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReduceSumNode.cc b/runtimes/neurun/core/src/model/operation/ReduceSumNode.cc
new file mode 100644
index 000000000..5a06ef81b
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReduceSumNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReduceSumNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReduceSumNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceSumNode::ReduceSumNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ReshapeNode.cc b/runtimes/neurun/core/src/model/operation/ReshapeNode.cc
new file mode 100644
index 000000000..cfd987ffb
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ReshapeNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ReshapeNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReshapeNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReshapeNode::ReshapeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/ResizeBilinearNode.cc b/runtimes/neurun/core/src/model/operation/ResizeBilinearNode.cc
new file mode 100644
index 000000000..263668a3c
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/ResizeBilinearNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/ResizeBilinearNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ResizeBilinearNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ResizeBilinearNode::ResizeBilinearNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SQRTNode.cc b/runtimes/neurun/core/src/model/operation/SQRTNode.cc
new file mode 100644
index 000000000..835aa3f97
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SQRTNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SQRTNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SQRTNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SQRTNode::SQRTNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SoftmaxNode.cc b/runtimes/neurun/core/src/model/operation/SoftmaxNode.cc
new file mode 100644
index 000000000..39e6d2bd8
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SoftmaxNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SoftmaxNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SoftmaxNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SoftmaxNode::SoftmaxNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SpaceToDepthNode.cc b/runtimes/neurun/core/src/model/operation/SpaceToDepthNode.cc
new file mode 100644
index 000000000..2622881f4
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SpaceToDepthNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SpaceToDepthNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SpaceToDepthNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SpaceToDepthNode::SpaceToDepthNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SplitNode.cc b/runtimes/neurun/core/src/model/operation/SplitNode.cc
new file mode 100644
index 000000000..9a542f418
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SplitNode.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "model/operation/SplitNode.h"
+#include <cassert>
+#include "model/OperationVisitor.h"
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+void SplitNode::accept(OperationVisitor &v) const { v.visit(*this); }
+SplitNode::SplitNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SquaredDifferenceNode.cc b/runtimes/neurun/core/src/model/operation/SquaredDifferenceNode.cc
new file mode 100644
index 000000000..6672e08c1
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SquaredDifferenceNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SquaredDifferenceNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SquaredDifferenceNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SquaredDifferenceNode::SquaredDifferenceNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SqueezeNode.cc b/runtimes/neurun/core/src/model/operation/SqueezeNode.cc
new file mode 100644
index 000000000..1a82d65b3
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SqueezeNode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SqueezeNode.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SqueezeNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SqueezeNode::SqueezeNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param(param)
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/StridedSliceNode.cc b/runtimes/neurun/core/src/model/operation/StridedSliceNode.cc
new file mode 100644
index 000000000..9d60645a8
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/StridedSliceNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/StridedSliceNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void StridedSliceNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+StridedSliceNode::StridedSliceNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/SubNode.cc b/runtimes/neurun/core/src/model/operation/SubNode.cc
new file mode 100644
index 000000000..6a64c4b76
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/SubNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/SubNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void SubNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SubNode::SubNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/TanhNode.cc b/runtimes/neurun/core/src/model/operation/TanhNode.cc
new file mode 100644
index 000000000..6372b4c73
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/TanhNode.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/TanhNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void TanhNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TanhNode::TanhNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/TopKV2Node.cc b/runtimes/neurun/core/src/model/operation/TopKV2Node.cc
new file mode 100644
index 000000000..6ebcd50b3
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/TopKV2Node.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/TopKV2Node.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void TopKV2Node::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TopKV2Node::TopKV2Node(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/TransposeConvNode.cc b/runtimes/neurun/core/src/model/operation/TransposeConvNode.cc
new file mode 100644
index 000000000..7ad2d1dca
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/TransposeConvNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/TransposeConvNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void TransposeConvNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TransposeConvNode::TransposeConvNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/TransposeNode.cc b/runtimes/neurun/core/src/model/operation/TransposeNode.cc
new file mode 100644
index 000000000..73542a04d
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/TransposeNode.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/TransposeNode.h"
+
+#include <cassert>
+
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void TransposeNode::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TransposeNode::TransposeNode(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : model::Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/model/operation/UnpackNode.cc b/runtimes/neurun/core/src/model/operation/UnpackNode.cc
new file mode 100644
index 000000000..7717a017a
--- /dev/null
+++ b/runtimes/neurun/core/src/model/operation/UnpackNode.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "model/operation/UnpackNode.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+void UnpackNode::accept(OperationVisitor &v) const { v.visit(*this); }
+UnpackNode::UnpackNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/ConfigSource.cc b/runtimes/neurun/core/src/util/ConfigSource.cc
new file mode 100644
index 000000000..f84e95566
--- /dev/null
+++ b/runtimes/neurun/core/src/util/ConfigSource.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/ConfigSource.h"
+#include "util/GeneralConfigSource.h"
+
+#include <algorithm>
+#include <cassert>
+
+#include "cpp14/memory.h"
+#include "EnvConfigSource.h"
+
+namespace neurun
+{
+namespace util
+{
+
+static std::unique_ptr<IConfigSource> _source;
+
+void config_source(std::unique_ptr<IConfigSource> &&source) { _source = std::move(source); }
+
+static IConfigSource *config_source()
+{
+ if (!_source)
+ {
+#ifdef ENVVAR_FOR_DEFAULT_CONFIG
+ // Default ConfigSource is EnvConfigSource
+ _source = nnfw::cpp14::make_unique<EnvConfigSource>();
+#else
+ _source = nnfw::cpp14::make_unique<GeneralConfigSource>();
+#endif // ENVVAR_FOR_DEFAULT_CONFIG
+ }
+ return _source.get();
+}
+
+static std::string getConfigOrDefault(const std::string &key)
+{
+ static std::unordered_map<std::string, std::string> defaults;
+ if (defaults.empty())
+ {
+#define CONFIG(Name, Type, Default) \
+ { \
+ auto name = std::string{#Name}; \
+ defaults.emplace(name, std::string{Default}); \
+ }
+
+#include "util/Config.lst"
+
+#undef CONFIG
+ }
+
+ // Treat empty string and absence of the value to be the same
+ auto ret = config_source()->get(key);
+ if (ret.empty())
+ {
+ auto itr = defaults.find(key);
+ if (itr != defaults.end())
+ {
+ // Return the default value if exists
+ ret = itr->second;
+ }
+ }
+
+ return ret;
+}
+
+bool getConfigBool(const std::string &key)
+{
+ auto raw = getConfigOrDefault(key);
+ static const std::array<std::string, 5> false_list{"0", "OFF", "FALSE", "N", "NO"};
+ auto false_found = std::find(false_list.begin(), false_list.end(), raw);
+
+ return (false_found == false_list.end());
+}
+
+int getConfigInt(const std::string &key)
+{
+ auto raw = getConfigOrDefault(key);
+ return std::stoi(raw);
+}
+
+std::string getConfigString(const std::string &key) { return getConfigOrDefault(key); }
+
+} // namespace util
+} // namespace neurun
+
+namespace neurun
+{
+namespace util
+{
+namespace config
+{
+
+#define CONFIG(Name, Type, Default) const char *Name = #Name;
+
+#include "util/Config.lst"
+
+#undef CONFIG
+
+} // namespace config
+} // namespace util
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/EnvConfigSource.cc b/runtimes/neurun/core/src/util/EnvConfigSource.cc
new file mode 100644
index 000000000..be8239b9f
--- /dev/null
+++ b/runtimes/neurun/core/src/util/EnvConfigSource.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EnvConfigSource.h"
+
+#include <cstdlib>
+
+namespace neurun
+{
+namespace util
+{
+
+std::string EnvConfigSource::get(const std::string &key) const
+{
+ const char *value = std::getenv(key.c_str());
+ if (value != nullptr)
+ {
+ return value;
+ }
+ else
+ {
+ return "";
+ }
+}
+
+} // namespace util
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/EnvConfigSource.h b/runtimes/neurun/core/src/util/EnvConfigSource.h
new file mode 100644
index 000000000..b187ec772
--- /dev/null
+++ b/runtimes/neurun/core/src/util/EnvConfigSource.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_ENV_CONFIG_SOURCE_H__
+#define __NEURUN_UTIL_ENV_CONFIG_SOURCE_H__
+
+#include <unordered_map>
+
+#include "util/IConfigSource.h"
+
+namespace neurun
+{
+namespace util
+{
+
+class EnvConfigSource final : public IConfigSource
+{
+public:
+ std::string get(const std::string &key) const override;
+
+private:
+ std::unordered_map<std::string, std::string> _default_attributes;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_ENV_CONFIG_SOURCE_H__
diff --git a/runtimes/neurun/core/src/util/GeneralConfigSource.cc b/runtimes/neurun/core/src/util/GeneralConfigSource.cc
new file mode 100644
index 000000000..084e4c109
--- /dev/null
+++ b/runtimes/neurun/core/src/util/GeneralConfigSource.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/GeneralConfigSource.h"
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace util
+{
+
+std::string GeneralConfigSource::get(const std::string &key) const
+{
+ auto itr = _map.find(key);
+ if (itr == _map.end())
+ {
+ return "";
+ }
+ else
+ {
+ return itr->second;
+ }
+}
+
+void GeneralConfigSource::set(const std::string &key, const std::string &val)
+{
+ VERBOSE(GeneralConfigSource) << key << " : " << val << std::endl;
+ _map[key] = val;
+}
+
+} // namespace util
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/Padding.cc b/runtimes/neurun/core/src/util/Padding.cc
new file mode 100644
index 000000000..dd5a3b502
--- /dev/null
+++ b/runtimes/neurun/core/src/util/Padding.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/Padding.h"
+#include "util/Utils.h"
+
+#include <algorithm>
+#include <stdexcept>
+
+namespace neurun
+{
+namespace util
+{
+
+model::ExplicitPadding validPadding(void)
+{
+ //
+ // ANEURALNETWORKS_PADDING_VALID
+ //
+ // VALID padding. No padding.
+ //
+ // When the input size is not evenly divisible by the filter size,
+ // the input at the end that could not fill the whole filter tile
+ // will simply be ignored.
+ //
+ model::ExplicitPadding padding;
+
+ padding.top = 0;
+ padding.bottom = 0;
+ padding.left = 0;
+ padding.right = 0;
+
+ return padding;
+}
+
+model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
+ const model::Stride &stride, uint32_t kw, uint32_t kh)
+{
+ model::ExplicitPadding padding;
+
+ // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
+ //
+ // SAME padding. Padding on both ends are the "same":
+ //
+ // padding_to_beginning = total_padding / 2
+ // padding_to_end = (total_padding + 1)/2.
+ //
+ const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
+ const int32_t horizontal_expected_output =
+ (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
+
+ const int32_t vertical_needed_input = (vertical_expected_output - 1) * stride.vertical + kh;
+ const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
+
+ const int32_t horizontal_needed_input = (horizontal_expected_output - 1) * stride.horizontal + kw;
+ const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
+
+ padding.top = vertical_total_padding / 2;
+ padding.bottom = (vertical_total_padding + 1) / 2;
+ padding.left = horizontal_total_padding / 2;
+ padding.right = (horizontal_total_padding + 1) / 2;
+
+ return padding;
+}
+
+model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape,
+ const model::Stride &stride, uint32_t kw, uint32_t kh)
+{
+ const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
+ const int32_t horizontal_expected_output =
+ (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
+ assert(vertical_expected_output == ofm_shape.H);
+ assert(horizontal_expected_output == ofm_shape.W);
+
+ UNUSED_RELEASE(ofm_shape);
+ UNUSED_RELEASE(vertical_expected_output);
+ UNUSED_RELEASE(horizontal_expected_output);
+
+ return samePaddingUsingIFM(ifm_shape, stride, kw, kh);
+}
+
+model::ExplicitPadding calculatePadding(const model::Padding &padding,
+ const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape,
+ const model::Stride &stride, uint32_t kw, uint32_t kh)
+{
+ if (padding.type == model::PaddingType::EXPLICIT)
+ {
+ return padding.param;
+ }
+ else if (padding.type == model::PaddingType::SAME)
+ {
+ return samePadding(ifm_shape, ofm_shape, stride, kw, kh);
+ }
+ else if (padding.type == model::PaddingType::VALID)
+ {
+ return validPadding();
+ }
+ else
+ {
+ throw std::runtime_error{"Cannot handle padding type"};
+ }
+}
+
+} // namespace util
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/ShapeInference.cc b/runtimes/neurun/core/src/util/ShapeInference.cc
new file mode 100644
index 000000000..5a7bfde41
--- /dev/null
+++ b/runtimes/neurun/core/src/util/ShapeInference.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/Utils.h"
+#include "model/InternalType.h"
+#include "model/Shape.h"
+#include "model/operation/AvgPool2DNode.h"
+#include "model/operation/MaxPool2DNode.h"
+#include "util/ShapeInference.h"
+
+namespace neurun
+{
+namespace shape_inference
+{
+
+//
+// Helper functions
+//
+
+namespace
+{
+
+template <typename T, typename U>
+typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value,
+ typename std::common_type<T, U>::type>::type
+ceil_div(T dividend, U divisor)
+{
+ assert(dividend > 0 && divisor > 0 && "this implementations is for positive numbers only");
+ return (dividend + divisor - 1) / divisor;
+}
+
+// Calculate the result of broadcast of two shapes
+model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape &rhs_shape)
+{
+ model::Shape out_shape;
+ auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
+
+ for (int idx = 0; idx < max_rank; ++idx)
+ {
+ // Go over operands dimensions from right to left
+ int lhs_idx = lhs_shape.rank() - idx - 1;
+ int rhs_idx = rhs_shape.rank() - idx - 1;
+
+ int32_t lhs_dim = lhs_idx >= 0 ? lhs_shape.dim(lhs_idx) : 1;
+ int32_t rhs_dim = rhs_idx >= 0 ? rhs_shape.dim(rhs_idx) : 1;
+
+ if (lhs_dim != 1 && rhs_dim != 1 && lhs_dim != rhs_dim)
+ throw std::runtime_error("Incompatible shapes for broadcast");
+
+ out_shape.prepend(std::max(lhs_dim, rhs_dim));
+ }
+
+ return out_shape;
+}
+
+// Calculate output height and width of convolution-like operation
+std::pair<int, int> calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h,
+ const int ker_w, const model::Padding pad,
+ const model::Stride stride)
+{
+ int32_t out_h = 0, out_w = 0;
+
+ switch (pad.type)
+ {
+ case model::PaddingType::SAME:
+ out_h = ceil_div(in_h, stride.vertical);
+ out_w = ceil_div(in_w, stride.horizontal);
+ break;
+ case model::PaddingType::VALID:
+ out_h = ceil_div(in_h - ker_h + 1, stride.vertical);
+ out_w = ceil_div(in_w - ker_w + 1, stride.horizontal);
+ break;
+ case model::PaddingType::EXPLICIT:
+ out_h = (in_h + pad.param.top + pad.param.bottom - ker_h) / stride.vertical + 1;
+ out_w = (in_w + pad.param.left + pad.param.right - ker_w) / stride.horizontal + 1;
+ break;
+ default:
+ assert(false);
+ }
+
+ return {out_h, out_w};
+}
+
+} // namespace
+
+//
+// Shape inference
+//
+
+Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape)
+{
+ return {broadcastShapes(lhs_shape, rhs_shape)};
+}
+
+Shapes inferAvgPoolShape(const model::Shape &in_shape,
+ const model::operation::AvgPool2DNode::Param &param,
+ const model::Layout layout)
+{
+ assert(layout == model::Layout::NHWC);
+ auto ifm_shape = in_shape.asFeature(layout);
+ const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
+ param.padding, param.stride);
+ // Pooling don't change number of channels and batch size
+ return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}};
+}
+
+Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::ConcatNode::Param &param)
+{
+ const int32_t concat_axis = param.axis;
+ const auto &first_in_shape = in_shapes[0];
+
+ // Check that all shapes are equal except for concat axis dimension
+ for (const auto &in_shape : in_shapes)
+ {
+ assert(in_shape.rank() == first_in_shape.rank());
+ for (int64_t dim_idx = 0; dim_idx < in_shape.rank(); ++dim_idx)
+ assert(dim_idx == concat_axis || in_shape.dim(dim_idx) == first_in_shape.dim(dim_idx));
+ }
+
+ // Calculate output shape
+ model::Shape out_shape(first_in_shape);
+ out_shape.dim(concat_axis) = 0;
+ for (const auto &in_shape : in_shapes)
+ out_shape.dim(concat_axis) += in_shape.dim(concat_axis);
+ return {out_shape};
+}
+
+Shapes inferMaxPoolShape(const model::Shape &in_shape,
+ const model::operation::MaxPool2DNode::Param &param,
+ const model::Layout layout)
+{
+ assert(layout == model::Layout::NHWC);
+ auto ifm_shape = in_shape.asFeature(layout);
+ const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
+ param.padding, param.stride);
+ // Pooling don't change number of channels and batch size
+ return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}};
+}
+
+Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::Conv2DNode::Param &param, model::Layout layout)
+{
+ assert(layout == model::Layout::NHWC);
+ auto ifm_shape = in_shape.asFeature(layout);
+
+ // Kernel format is [depth_out, kernel_height, kernel_width, depth_in]
+ auto kf_shape = ker_shape.asFeature(layout);
+ assert(ifm_shape.C == kf_shape.C);
+
+ const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
+ param.padding, param.stride);
+
+ return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N}};
+}
+
+Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::DepthwiseConv2DNode::Param &param,
+ model::Layout layout)
+{
+ assert(layout == model::Layout::NHWC);
+ auto ifm_shape = in_shape.asFeature(layout);
+
+ // Kernel format is [1, kernel_height, kernel_width, depth_out]
+ auto kf_shape = ker_shape.asFeature(layout);
+ assert(kf_shape.C == static_cast<int32_t>(ifm_shape.C * param.multiplier));
+ assert(kf_shape.N == 1);
+
+ const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
+ param.padding, param.stride);
+
+ return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C}};
+}
+
+Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape)
+{
+ assert(in_shape.rank() >= 2);
+ assert(ker_shape.rank() == 2);
+
+ const auto input_size_with_batch = in_shape.num_elements();
+ const auto num_units = ker_shape.dim(0);
+ const auto input_size = ker_shape.dim(1);
+ const auto batch_size = input_size_with_batch / input_size;
+ assert(input_size_with_batch % input_size == 0);
+
+ return {{model::Shape({static_cast<int32_t>(batch_size), num_units})}};
+}
+
+} // namespace shape_inference
+} // namespace neurun
diff --git a/runtimes/neurun/core/src/util/Utils.cc b/runtimes/neurun/core/src/util/Utils.cc
new file mode 100644
index 000000000..cd912a810
--- /dev/null
+++ b/runtimes/neurun/core/src/util/Utils.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/Utils.h"
+
+#include <cassert>
+
+namespace neurun
+{
+namespace util
+{
+
+const char *to_string(const model::PaddingType &type)
+{
+ assert((type == model::PaddingType::EXPLICIT) || (type == model::PaddingType::SAME) ||
+ (type == model::PaddingType::VALID));
+
+ switch (type)
+ {
+ case model::PaddingType::EXPLICIT:
+ return "Padding::EXPLICIT";
+ case model::PaddingType::SAME:
+ return "Padding::SAME";
+ case model::PaddingType::VALID:
+ return "Padding::VALID";
+ }
+
+ return nullptr;
+}
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, model::Layout from_layout,
+ model::Layout to_layout)
+{
+ assert(from_coordinates.size() == 4);
+ Coordinates to{from_coordinates};
+ if (from_layout == model::Layout::NHWC && to_layout == model::Layout::NCHW)
+ {
+ to.set(0, from_coordinates[0]);
+ to.set(1, from_coordinates[3]);
+ to.set(2, from_coordinates[1]);
+ to.set(3, from_coordinates[2]);
+ }
+ else if (from_layout == model::Layout::NCHW && to_layout == model::Layout::NHWC)
+ {
+ to.set(0, from_coordinates[0]);
+ to.set(1, from_coordinates[2]);
+ to.set(2, from_coordinates[3]);
+ to.set(3, from_coordinates[1]);
+ }
+
+ return to;
+}
+
+} // namespace util
+} // namespace neurun