summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-07-30 11:32:26 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-07-30 11:32:26 +0900
commit05e0ec30a632339a8533082476f27bda31ccde16 (patch)
tree5f220ac83084fe133ffb08a6a17e99f9bb36ec1c /compiler
parente2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (diff)
downloadnnfw-05e0ec30a632339a8533082476f27bda31ccde16.tar.gz
nnfw-05e0ec30a632339a8533082476f27bda31ccde16.tar.bz2
nnfw-05e0ec30a632339a8533082476f27bda31ccde16.zip
Imported Upstream version 1.7.0upstream/1.7.0
Diffstat (limited to 'compiler')
-rw-r--r--compiler/adtidas/README.md1
-rw-r--r--compiler/angkor/src/ADT/feature/Buffer.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/feature/CHWLayout.test.cpp6
-rw-r--r--compiler/angkor/src/ADT/feature/HWCLayout.test.cpp6
-rw-r--r--compiler/angkor/src/ADT/feature/Layout.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/feature/Overlay.test.cpp14
-rw-r--r--compiler/angkor/src/ADT/feature/Shape.test.cpp8
-rw-r--r--compiler/angkor/src/ADT/kernel/Buffer.test.cpp12
-rw-r--r--compiler/angkor/src/ADT/kernel/Layout.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/kernel/NCHWLayout.test.cpp8
-rw-r--r--compiler/angkor/src/ADT/kernel/NHWCLayout.test.cpp8
-rw-r--r--compiler/angkor/src/ADT/kernel/Overlay.test.cpp16
-rw-r--r--compiler/angkor/src/ADT/kernel/Shape.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/tensor/Buffer.test.cpp6
-rw-r--r--compiler/angkor/src/ADT/tensor/Index.test.cpp34
-rw-r--r--compiler/angkor/src/ADT/tensor/IndexEnumerator.test.cpp2
-rw-r--r--compiler/angkor/src/ADT/tensor/Layout.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/tensor/LexicalLayout.test.cpp6
-rw-r--r--compiler/angkor/src/ADT/tensor/Overlay.test.cpp10
-rw-r--r--compiler/angkor/src/ADT/tensor/Shape.test.cpp56
-rw-r--r--compiler/angkor/src/TensorIndex.test.cpp26
-rw-r--r--compiler/angkor/src/TensorShape.test.cpp20
-rw-r--r--compiler/ann-api/README.md1
-rw-r--r--compiler/arser/CMakeLists.txt15
-rw-r--r--compiler/arser/README.md3
-rw-r--r--compiler/arser/include/arser/arser.h507
-rw-r--r--compiler/arser/tests/arser.test.cpp344
-rw-r--r--compiler/caffe2circle/requires.cmake2
-rw-r--r--compiler/circle-inspect/CMakeLists.txt3
-rw-r--r--compiler/circle-inspect/driver/Driver.cpp85
-rw-r--r--compiler/circle-inspect/requires.cmake2
-rw-r--r--compiler/circle-inspect/src/Dump.cpp112
-rw-r--r--compiler/circle-inspect/src/Dump.h9
-rw-r--r--compiler/circle-inspect/src/Model.cpp143
-rw-r--r--compiler/circle-inspect/src/Model.h43
-rw-r--r--compiler/circle-inspect/src/Reader.cpp5
-rw-r--r--compiler/circle-quantizer/CMakeLists.txt17
-rw-r--r--compiler/circle-quantizer/README.md3
-rw-r--r--compiler/circle-quantizer/include/CircleExpContract.h49
-rw-r--r--compiler/circle-quantizer/requires.cmake7
-rw-r--r--compiler/circle-quantizer/src/CircleExpContract.cpp33
-rw-r--r--compiler/circle-quantizer/src/CircleQuantizer.cpp155
-rw-r--r--compiler/circle-tensordump/CMakeLists.txt23
-rw-r--r--compiler/circle-tensordump/README.md73
-rw-r--r--compiler/circle-tensordump/driver/Driver.cpp79
-rw-r--r--compiler/circle-tensordump/requires.cmake4
-rw-r--r--compiler/circle-tensordump/src/Dump.cpp325
-rw-r--r--compiler/circle-tensordump/src/Dump.h57
-rw-r--r--compiler/circle-tensordump/src/Reader.cpp169
-rw-r--r--compiler/circle-tensordump/src/Reader.h91
-rw-r--r--compiler/circle-verify/CMakeLists.txt3
-rw-r--r--compiler/circle-verify/requires.cmake3
-rw-r--r--compiler/circle-verify/src/Driver.cpp24
-rw-r--r--compiler/circle-verify/src/Model.cpp90
-rw-r--r--compiler/circle-verify/src/Model.h38
-rw-r--r--compiler/circle-verify/src/VerifyFlatBuffers.cpp10
-rw-r--r--compiler/circle2circle-dredd-recipe-test/CMakeLists.txt185
-rw-r--r--compiler/circle2circle-dredd-recipe-test/README.md21
-rw-r--r--compiler/circle2circle-dredd-recipe-test/requires.cmake7
-rw-r--r--compiler/circle2circle-dredd-recipe-test/test.lst20
-rwxr-xr-xcompiler/circle2circle-dredd-recipe-test/testall.sh98
-rw-r--r--compiler/circle2circle/CMakeLists.txt8
-rw-r--r--compiler/circle2circle/README.md2
-rw-r--r--compiler/circle2circle/requires.cmake3
-rw-r--r--compiler/circle2circle/src/Circle2Circle.cpp167
-rw-r--r--compiler/circle2circle/src/Circle2Circle.test.cpp2
-rw-r--r--compiler/circle2circle/src/Model.cpp78
-rw-r--r--compiler/circlechef/CMakeLists.txt21
-rw-r--r--compiler/circlechef/README.md8
-rw-r--r--compiler/circlechef/circle/CMakeLists.txt9
-rw-r--r--compiler/circlechef/circle/include/circlechef/RecipeChef.h41
-rw-r--r--compiler/circlechef/circle/src/CircleImport.cpp145
-rw-r--r--compiler/circlechef/circle/src/CircleImport.h140
-rw-r--r--compiler/circlechef/circle/src/CircleOpChef.h44
-rw-r--r--compiler/circlechef/circle/src/CircleOpChefs.h26
-rw-r--r--compiler/circlechef/circle/src/CircleOpRegistry.h71
-rw-r--r--compiler/circlechef/circle/src/Convert.cpp78
-rw-r--r--compiler/circlechef/circle/src/Convert.h58
-rw-r--r--compiler/circlechef/circle/src/Op/BCQFullyConnected.cpp64
-rw-r--r--compiler/circlechef/circle/src/Op/BCQFullyConnected.h39
-rw-r--r--compiler/circlechef/circle/src/Op/BCQGather.cpp68
-rw-r--r--compiler/circlechef/circle/src/Op/BCQGather.h39
-rw-r--r--compiler/circlechef/circle/src/Op/BatchMatMul.cpp48
-rw-r--r--compiler/circlechef/circle/src/Op/BatchMatMul.h39
-rw-r--r--compiler/circlechef/circle/src/Op/InstanceNorm.cpp53
-rw-r--r--compiler/circlechef/circle/src/Op/InstanceNorm.h39
-rw-r--r--compiler/circlechef/circle/src/RecipeChef.cpp248
-rw-r--r--compiler/circlechef/core/CMakeLists.txt9
-rw-r--r--compiler/circlechef/core/include/circlechef/ModelChef.h56
-rw-r--r--compiler/circlechef/core/src/Arguments.h34
-rw-r--r--compiler/circlechef/core/src/Convert.cpp72
-rw-r--r--compiler/circlechef/core/src/Convert.h31
-rw-r--r--compiler/circlechef/core/src/ModelChef.cpp631
-rw-r--r--compiler/circlechef/core/src/Op/BCQFullyConnected.cpp40
-rw-r--r--compiler/circlechef/core/src/Op/BCQFullyConnected.h52
-rw-r--r--compiler/circlechef/core/src/Op/BCQGather.cpp36
-rw-r--r--compiler/circlechef/core/src/Op/BCQGather.h49
-rw-r--r--compiler/circlechef/core/src/Op/BatchMatMul.cpp37
-rw-r--r--compiler/circlechef/core/src/Op/BatchMatMul.h49
-rw-r--r--compiler/circlechef/core/src/Op/InstanceNorm.cpp39
-rw-r--r--compiler/circlechef/core/src/Op/InstanceNorm.h52
-rw-r--r--compiler/circlechef/core/src/OpChef.def10
-rw-r--r--compiler/circlechef/core/src/OpChef.h48
-rw-r--r--compiler/circlechef/core/src/OpChefs.h25
-rw-r--r--compiler/circlechef/log/CMakeLists.txt7
-rw-r--r--compiler/circlechef/log/include/Log.h75
-rw-r--r--compiler/circlechef/log/include/LoggingContext.h35
-rw-r--r--compiler/circlechef/log/src/Log.cpp87
-rw-r--r--compiler/circlechef/log/src/LoggingContext.cpp41
-rw-r--r--compiler/circlechef/proto/CMakeLists.txt5
-rw-r--r--compiler/circlechef/proto/circlechef.proto110
-rw-r--r--compiler/circlechef/requires.cmake9
-rw-r--r--compiler/circlechef/tests/CMakeLists.txt70
-rwxr-xr-xcompiler/circlechef/tests/runvalidate.sh56
-rw-r--r--compiler/circlechef/tools/CMakeLists.txt6
-rw-r--r--compiler/circlechef/tools/console/CMakeLists.txt3
-rw-r--r--compiler/circlechef/tools/console/Driver.cpp58
-rw-r--r--compiler/circlechef/tools/file/CMakeLists.txt4
-rw-r--r--compiler/circlechef/tools/file/Driver.cpp85
-rw-r--r--compiler/circlechef/tools/reverse/CMakeLists.txt5
-rw-r--r--compiler/circlechef/tools/reverse/Driver.cpp72
-rw-r--r--compiler/circledump/CMakeLists.txt2
-rw-r--r--compiler/circledump/driver/Driver.cpp26
-rw-r--r--compiler/circledump/requires.cmake2
-rw-r--r--compiler/circledump/src/Dump.cpp22
-rw-r--r--compiler/circledump/src/OpPrinter.cpp456
-rw-r--r--compiler/common-artifacts/CMakeLists.txt258
-rw-r--r--compiler/common-artifacts/README.md37
-rw-r--r--compiler/common-artifacts/exclude.lst191
-rw-r--r--compiler/common-artifacts/requires.cmake8
-rw-r--r--compiler/common-artifacts/src/TestDataGenerator.cpp234
-rw-r--r--compiler/cwrap/include/cwrap/Fildes.h2
-rw-r--r--compiler/cwrap/src/Fildes.test.cpp35
-rwxr-xr-xcompiler/dredd-rule-lib/rule-lib.sh17
-rw-r--r--compiler/enco-intf/README.md1
-rw-r--r--compiler/exo/src/Circle/CircleTensorExporter.cpp4
-rw-r--r--compiler/exo/src/Conversion/DepthwiseConv2DConverter.cpp2
-rw-r--r--compiler/exo/src/Conversion/TensorBroadcastConverter.cpp2
-rw-r--r--compiler/exo/src/Dialect/IR/CircleDialect.test.cpp2
-rw-r--r--compiler/exo/src/Dialect/IR/CircleNodeImpl.h1
-rw-r--r--compiler/exo/src/Dialect/IR/CircleNodeVisitor.h1
-rw-r--r--compiler/exo/src/Dialect/IR/CircleNodes.test.cpp14
-rw-r--r--compiler/exo/src/Dialect/IR/NodeMixins.h2
-rw-r--r--compiler/exo/src/Dialect/IR/TFLDialect.test.cpp2
-rw-r--r--compiler/exo/src/Dialect/IR/TFLNodeImpl.h1
-rw-r--r--compiler/exo/src/Dialect/IR/TFLNodeVisitor.h1
-rw-r--r--compiler/exo/src/Dialect/IR/TFLNodes.h4
-rw-r--r--compiler/exo/src/Dialect/IR/TFLNodes.test.cpp98
-rw-r--r--compiler/exo/src/Dialect/Service/CircleTypeInferenceRule.cpp2
-rw-r--r--compiler/exo/src/Dialect/Service/TFLShapeInferenceRule.test.cpp52
-rw-r--r--compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.cpp2
-rw-r--r--compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.test.cpp2
-rw-r--r--compiler/exo/src/Pass/FuseBiasAddPass.cpp4
-rw-r--r--compiler/exo/src/Pass/FuseBiasAddPass.test.cpp4
-rw-r--r--compiler/exo/src/Pass/FuseReluPass.cpp2
-rw-r--r--compiler/exo/src/TFLite/TFLExporterImpl.test.cpp42
-rw-r--r--compiler/exo/src/TFLite/TFLExporterUtils.test.cpp4
-rw-r--r--compiler/exo/src/TFLite/TFLTensorExporter.cpp15
-rw-r--r--compiler/exo/src/TFLite/TFLTypeInference.test.cpp6
-rw-r--r--compiler/fipe/README.md1
-rw-r--r--compiler/foder/CMakeLists.txt2
-rw-r--r--compiler/foder/README.md13
-rw-r--r--compiler/foder/include/foder/FileLoader.h69
-rw-r--r--compiler/gen-core/CMakeLists.txt2
-rw-r--r--compiler/i5diff/CMakeLists.txt2
-rw-r--r--compiler/imgdata2hdf5/CMakeLists.txt13
-rw-r--r--compiler/imgdata2hdf5/README.md24
-rwxr-xr-xcompiler/imgdata2hdf5/imgdata2hdf5.py60
-rw-r--r--compiler/loco/include/loco/IR/CastHelpers.h42
-rw-r--r--compiler/loco/include/loco/IR/DataTypeTraits.h33
-rw-r--r--compiler/loco/include/loco/IR/Dimension.h12
-rw-r--r--compiler/loco/include/loco/IR/FeatureCodec.h25
-rw-r--r--compiler/loco/include/loco/IR/FilterCodec.h17
-rw-r--r--compiler/loco/include/loco/IR/Node.h8
-rw-r--r--compiler/loco/include/loco/IR/Nodes.h2
-rw-r--r--compiler/loco/include/loco/IR/TensorShape.h5
-rw-r--r--compiler/loco/src/ADT/AnnotatedItem.test.cpp6
-rw-r--r--compiler/loco/src/IR/Algorithm.test.cpp16
-rw-r--r--compiler/loco/src/IR/CanonicalDialect.test.cpp2
-rw-r--r--compiler/loco/src/IR/CanonicalNode.test.cpp8
-rw-r--r--compiler/loco/src/IR/DataTypeTraits.test.cpp2
-rw-r--r--compiler/loco/src/IR/DepthwiseFilterIndex.test.cpp40
-rw-r--r--compiler/loco/src/IR/DepthwiseFilterShape.test.cpp20
-rw-r--r--compiler/loco/src/IR/Dialect.test.cpp2
-rw-r--r--compiler/loco/src/IR/Dimension.test.cpp4
-rw-r--r--compiler/loco/src/IR/FeatureIndex.test.cpp40
-rw-r--r--compiler/loco/src/IR/FeatureShape.test.cpp20
-rw-r--r--compiler/loco/src/IR/FilterIndex.test.cpp40
-rw-r--r--compiler/loco/src/IR/FilterShape.test.cpp20
-rw-r--r--compiler/loco/src/IR/Graph.cpp2
-rw-r--r--compiler/loco/src/IR/Graph.test.cpp42
-rw-r--r--compiler/loco/src/IR/MockupNode.h17
-rw-r--r--compiler/loco/src/IR/Node.test.cpp34
-rw-r--r--compiler/loco/src/IR/NodeShape.test.cpp52
-rw-r--r--compiler/loco/src/IR/Nodes.test.cpp368
-rw-r--r--compiler/loco/src/IR/Padding2D.test.cpp8
-rw-r--r--compiler/loco/src/IR/PaddingND.test.cpp6
-rw-r--r--compiler/loco/src/IR/PermutingCodec.test.cpp128
-rw-r--r--compiler/loco/src/IR/Stride.test.cpp12
-rw-r--r--compiler/loco/src/IR/TensorShape.cpp17
-rw-r--r--compiler/loco/src/IR/TensorShape.test.cpp51
-rw-r--r--compiler/loco/src/IR/Use.test.cpp8
-rw-r--r--compiler/loco/src/IR/Verifier.test.cpp6
-rw-r--r--compiler/loco/src/IR/Window.test.cpp12
-rw-r--r--compiler/loco/src/Service/CanonicalShapeInferenceRule.cpp2
-rw-r--r--compiler/loco/src/Service/CanonicalShapeInferenceRule.test.cpp132
-rw-r--r--compiler/loco/src/Service/GraphBuilder.test.cpp6
-rw-r--r--compiler/loco/src/Service/GraphTestcase.h2
-rw-r--r--compiler/loco/src/Service/MultiDialectShapeInferenceRule.test.cpp16
-rw-r--r--compiler/loco/src/Service/ShapeInference.test.cpp12
-rw-r--r--compiler/loco/src/Service/TypeInference.cpp2
-rw-r--r--compiler/loco/src/Service/TypeInference.test.cpp18
-rw-r--r--compiler/loco/src/loco.test.cpp12
-rw-r--r--compiler/locoex-customop/src/Service/COpShapeInferenceRule.cpp2
-rw-r--r--compiler/locomotiv/src/Node/AvgPool2D.cpp16
-rw-r--r--compiler/locomotiv/src/Node/AvgPool2D.test.cpp4
-rw-r--r--compiler/locomotiv/src/Node/BiasAdd.cpp4
-rw-r--r--compiler/locomotiv/src/Node/BiasAdd.test.cpp16
-rw-r--r--compiler/locomotiv/src/Node/BiasEncode.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/ConstGen.test.cpp40
-rw-r--r--compiler/locomotiv/src/Node/Conv2D.test.cpp4
-rw-r--r--compiler/locomotiv/src/Node/DepthwiseConv2D.test.cpp4
-rw-r--r--compiler/locomotiv/src/Node/DepthwiseFilterEncode.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/EltwiseAdd.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/EltwiseDiv.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/EltwiseMax.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/EltwiseMul.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/EltwiseSqrt.test.cpp12
-rw-r--r--compiler/locomotiv/src/Node/EltwiseSub.test.cpp8
-rw-r--r--compiler/locomotiv/src/Node/FeatureCodec.test.cpp32
-rw-r--r--compiler/locomotiv/src/Node/FilterEncode.test.cpp16
-rw-r--r--compiler/locomotiv/src/Node/Forward.test.cpp16
-rw-r--r--compiler/locomotiv/src/Node/MatMul.test.cpp6
-rw-r--r--compiler/locomotiv/src/Node/MatrixCodec.test.cpp32
-rw-r--r--compiler/locomotiv/src/Node/MaxPool2D.test.cpp4
-rw-r--r--compiler/locomotiv/src/Node/Push.test.cpp16
-rw-r--r--compiler/locomotiv/src/Node/ReLU.test.cpp10
-rw-r--r--compiler/locomotiv/src/Node/ReLU6.test.cpp14
-rw-r--r--compiler/locomotiv/src/Node/Reshape.test.cpp14
-rw-r--r--compiler/locomotiv/src/Node/Softmax.test.cpp14
-rw-r--r--compiler/locomotiv/src/Node/Tanh.test.cpp12
-rw-r--r--compiler/locomotiv/src/Node/TensorBroadcast.test.cpp10
-rw-r--r--compiler/locomotiv/src/Node/TensorConcat.cpp2
-rw-r--r--compiler/locomotiv/src/Node/TensorConcat.test.cpp40
-rw-r--r--compiler/locomotiv/src/Node/TensorConstantPad.cpp2
-rw-r--r--compiler/locomotiv/src/Node/TensorConstantPad.test.cpp64
-rw-r--r--compiler/locomotiv/src/Node/TensorReduce.cpp3
-rw-r--r--compiler/locomotiv/src/Node/TensorReduce.test.cpp18
-rw-r--r--compiler/locomotiv/src/Node/TransposedConv2D.test.cpp4
-rw-r--r--compiler/locomotiv/src/NodeData.test.cpp12
-rw-r--r--compiler/locomotiv/src/NodeDataImpl.test.cpp10
-rw-r--r--compiler/locomotiv/src/NodeDomain.test.cpp6
-rw-r--r--compiler/locomotiv/src/NodeExecution.cpp2
-rw-r--r--compiler/locomotiv/src/Session.test.cpp74
-rw-r--r--compiler/locop/requires.cmake1
-rw-r--r--compiler/locop/src/CanonicalNodeSummaryBuilder.cpp7
-rw-r--r--compiler/locop/src/FormattedGraph.cpp3
-rw-r--r--compiler/logo/include/logo/DeadNodeQueryService.h35
-rw-r--r--compiler/logo/include/logo/RemoveDeadNodeWithQueryPass.h34
-rw-r--r--compiler/logo/src/Passes/ConstantFoldingPass.test.cpp4
-rw-r--r--compiler/logo/src/Passes/RemoveDeadNodeWithQueryPass.cpp70
-rw-r--r--compiler/logo/src/Passes/RemoveForwardNodePass.cpp2
-rw-r--r--compiler/logo/src/Passes/ReorderDecodePass.cpp96
-rw-r--r--compiler/logo/src/Passes/ResolveDuplicateReshapePass.cpp4
-rw-r--r--compiler/logo/src/Passes/SimplifyDomainConversionPass.cpp2
-rw-r--r--compiler/logo/src/Passes/SimplifyDomainConversionPass.test.cpp2
-rw-r--r--compiler/luci-interpreter/CMakeLists.txt4
-rw-r--r--compiler/luci-interpreter/include/luci_interpreter/Interpreter.h78
-rw-r--r--compiler/luci-interpreter/include/luci_interpreter/core/DataType.h36
-rw-r--r--compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h133
-rw-r--r--compiler/luci-interpreter/requires.cmake1
-rw-r--r--compiler/luci-interpreter/src/CMakeLists.txt35
-rw-r--r--compiler/luci-interpreter/src/Interpreter.cpp126
-rw-r--r--compiler/luci-interpreter/src/core/CMakeLists.txt17
-rw-r--r--compiler/luci-interpreter/src/core/EventNotifier.h36
-rw-r--r--compiler/luci-interpreter/src/core/Kernel.h75
-rw-r--r--compiler/luci-interpreter/src/core/KernelParams.h151
-rw-r--r--compiler/luci-interpreter/src/core/RuntimeGraph.cpp93
-rw-r--r--compiler/luci-interpreter/src/core/RuntimeGraph.h60
-rw-r--r--compiler/luci-interpreter/src/core/RuntimeModule.h59
-rw-r--r--compiler/luci-interpreter/src/core/Tensor.cpp68
-rw-r--r--compiler/luci-interpreter/src/kernels/Add.cpp141
-rw-r--r--compiler/luci-interpreter/src/kernels/Add.h48
-rw-r--r--compiler/luci-interpreter/src/kernels/Add.test.cpp174
-rw-r--r--compiler/luci-interpreter/src/kernels/ArgMax.cpp140
-rw-r--r--compiler/luci-interpreter/src/kernels/ArgMax.h44
-rw-r--r--compiler/luci-interpreter/src/kernels/ArgMax.test.cpp98
-rw-r--r--compiler/luci-interpreter/src/kernels/AveragePool2D.cpp115
-rw-r--r--compiler/luci-interpreter/src/kernels/AveragePool2D.h51
-rw-r--r--compiler/luci-interpreter/src/kernels/AveragePool2D.test.cpp127
-rw-r--r--compiler/luci-interpreter/src/kernels/CMakeLists.txt106
-rw-r--r--compiler/luci-interpreter/src/kernels/Concatenation.cpp136
-rw-r--r--compiler/luci-interpreter/src/kernels/Concatenation.h48
-rw-r--r--compiler/luci-interpreter/src/kernels/Concatenation.test.cpp83
-rw-r--r--compiler/luci-interpreter/src/kernels/Conv2D.cpp194
-rw-r--r--compiler/luci-interpreter/src/kernels/Conv2D.h57
-rw-r--r--compiler/luci-interpreter/src/kernels/Conv2D.test.cpp185
-rw-r--r--compiler/luci-interpreter/src/kernels/DepthwiseConv2D.cpp175
-rw-r--r--compiler/luci-interpreter/src/kernels/DepthwiseConv2D.h54
-rw-r--r--compiler/luci-interpreter/src/kernels/DepthwiseConv2D.test.cpp135
-rw-r--r--compiler/luci-interpreter/src/kernels/Elu.cpp52
-rw-r--r--compiler/luci-interpreter/src/kernels/Elu.h43
-rw-r--r--compiler/luci-interpreter/src/kernels/Elu.test.cpp64
-rw-r--r--compiler/luci-interpreter/src/kernels/FullyConnected.cpp79
-rw-r--r--compiler/luci-interpreter/src/kernels/FullyConnected.h49
-rw-r--r--compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp67
-rw-r--r--compiler/luci-interpreter/src/kernels/If.cpp89
-rw-r--r--compiler/luci-interpreter/src/kernels/If.h49
-rw-r--r--compiler/luci-interpreter/src/kernels/If.test.cpp111
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Normalize.cpp74
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Normalize.h46
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp57
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Pool2D.cpp88
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Pool2D.h49
-rw-r--r--compiler/luci-interpreter/src/kernels/L2Pool2D.test.cpp228
-rw-r--r--compiler/luci-interpreter/src/kernels/LeakyRelu.cpp92
-rw-r--r--compiler/luci-interpreter/src/kernels/LeakyRelu.h52
-rw-r--r--compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp75
-rw-r--r--compiler/luci-interpreter/src/kernels/LocalResponseNormalization.cpp65
-rw-r--r--compiler/luci-interpreter/src/kernels/LocalResponseNormalization.h44
-rw-r--r--compiler/luci-interpreter/src/kernels/LocalResponseNormalization.test.cpp113
-rw-r--r--compiler/luci-interpreter/src/kernels/Logistic.cpp94
-rw-r--r--compiler/luci-interpreter/src/kernels/Logistic.h52
-rw-r--r--compiler/luci-interpreter/src/kernels/Logistic.test.cpp59
-rw-r--r--compiler/luci-interpreter/src/kernels/MaxPool2D.cpp120
-rw-r--r--compiler/luci-interpreter/src/kernels/MaxPool2D.h51
-rw-r--r--compiler/luci-interpreter/src/kernels/MaxPool2D.test.cpp97
-rw-r--r--compiler/luci-interpreter/src/kernels/Mean.cpp249
-rw-r--r--compiler/luci-interpreter/src/kernels/Mean.h55
-rw-r--r--compiler/luci-interpreter/src/kernels/Mean.test.cpp165
-rw-r--r--compiler/luci-interpreter/src/kernels/Mul.cpp82
-rw-r--r--compiler/luci-interpreter/src/kernels/Mul.h50
-rw-r--r--compiler/luci-interpreter/src/kernels/Mul.test.cpp85
-rw-r--r--compiler/luci-interpreter/src/kernels/Pad.cpp102
-rw-r--r--compiler/luci-interpreter/src/kernels/Pad.h43
-rw-r--r--compiler/luci-interpreter/src/kernels/Pad.test.cpp79
-rw-r--r--compiler/luci-interpreter/src/kernels/Reshape.cpp90
-rw-r--r--compiler/luci-interpreter/src/kernels/Reshape.h43
-rw-r--r--compiler/luci-interpreter/src/kernels/Reshape.test.cpp69
-rw-r--r--compiler/luci-interpreter/src/kernels/Softmax.cpp64
-rw-r--r--compiler/luci-interpreter/src/kernels/Softmax.h46
-rw-r--r--compiler/luci-interpreter/src/kernels/Softmax.test.cpp60
-rw-r--r--compiler/luci-interpreter/src/kernels/SpaceToDepth.cpp79
-rw-r--r--compiler/luci-interpreter/src/kernels/SpaceToDepth.h45
-rw-r--r--compiler/luci-interpreter/src/kernels/SpaceToDepth.test.cpp60
-rw-r--r--compiler/luci-interpreter/src/kernels/Split.cpp81
-rw-r--r--compiler/luci-interpreter/src/kernels/Split.h47
-rw-r--r--compiler/luci-interpreter/src/kernels/Split.test.cpp126
-rw-r--r--compiler/luci-interpreter/src/kernels/Squeeze.cpp86
-rw-r--r--compiler/luci-interpreter/src/kernels/Squeeze.h44
-rw-r--r--compiler/luci-interpreter/src/kernels/Squeeze.test.cpp72
-rw-r--r--compiler/luci-interpreter/src/kernels/StridedSlice.cpp145
-rw-r--r--compiler/luci-interpreter/src/kernels/StridedSlice.h47
-rw-r--r--compiler/luci-interpreter/src/kernels/StridedSlice.test.cpp113
-rw-r--r--compiler/luci-interpreter/src/kernels/TestUtils.cpp61
-rw-r--r--compiler/luci-interpreter/src/kernels/TestUtils.h183
-rw-r--r--compiler/luci-interpreter/src/kernels/Transpose.cpp84
-rw-r--r--compiler/luci-interpreter/src/kernels/Transpose.h44
-rw-r--r--compiler/luci-interpreter/src/kernels/Transpose.test.cpp117
-rw-r--r--compiler/luci-interpreter/src/kernels/TransposeConv.cpp153
-rw-r--r--compiler/luci-interpreter/src/kernels/TransposeConv.h58
-rw-r--r--compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp102
-rw-r--r--compiler/luci-interpreter/src/kernels/Unpack.cpp84
-rw-r--r--compiler/luci-interpreter/src/kernels/Unpack.h46
-rw-r--r--compiler/luci-interpreter/src/kernels/Unpack.test.cpp141
-rw-r--r--compiler/luci-interpreter/src/kernels/Utils.cpp182
-rw-r--r--compiler/luci-interpreter/src/kernels/Utils.h194
-rw-r--r--compiler/luci-interpreter/src/loader/CMakeLists.txt15
-rw-r--r--compiler/luci-interpreter/src/loader/GraphLoader.cpp205
-rw-r--r--compiler/luci-interpreter/src/loader/GraphLoader.h58
-rw-r--r--compiler/luci-interpreter/src/loader/KernelBuilder.cpp529
-rw-r--r--compiler/luci-interpreter/src/loader/KernelBuilder.h91
-rw-r--r--compiler/luci-interpreter/src/loader/ModuleLoader.cpp49
-rw-r--r--compiler/luci-interpreter/src/loader/ModuleLoader.h54
-rw-r--r--compiler/luci-interpreter/src/loader/RuntimeToIR.h38
-rw-r--r--compiler/luci-value-test/CMakeLists.txt25
-rw-r--r--compiler/luci-value-test/README.md15
-rwxr-xr-xcompiler/luci-value-test/evalverify.sh61
-rwxr-xr-xcompiler/luci-value-test/luci_eval_verifier.py82
-rw-r--r--compiler/luci-value-test/requires.cmake6
-rw-r--r--compiler/luci-value-test/test.lst81
-rw-r--r--compiler/luci-value-test/tester/CMakeLists.txt15
-rw-r--r--compiler/luci-value-test/tester/src/CircleExpContract.cpp33
-rw-r--r--compiler/luci-value-test/tester/src/CircleExpContract.h49
-rw-r--r--compiler/luci-value-test/tester/src/EvalTester.cpp164
-rw-r--r--compiler/luci/CMakeLists.txt1
-rw-r--r--compiler/luci/env/CMakeLists.txt18
-rw-r--r--compiler/luci/env/README.md3
-rw-r--r--compiler/luci/env/include/luci/UserSettings.h45
-rw-r--r--compiler/luci/env/src/UserSettings.cpp77
-rw-r--r--compiler/luci/env/src/UserSettings.test.cpp68
-rw-r--r--compiler/luci/export/CMakeLists.txt1
-rw-r--r--compiler/luci/export/src/CircleExporterImpl.cpp59
-rw-r--r--compiler/luci/export/src/CircleExporterUtils.cpp35
-rw-r--r--compiler/luci/export/src/CircleExporterUtils.h1
-rw-r--r--compiler/luci/export/src/CircleOperationExporter.cpp1205
-rw-r--r--compiler/luci/export/src/CircleTensorExporter.cpp179
-rw-r--r--compiler/luci/export/src/ProgressReporter.cpp28
-rw-r--r--compiler/luci/export/src/SerializedData.h23
-rw-r--r--compiler/luci/export/src/TypeBridge.cpp105
-rw-r--r--compiler/luci/export/src/TypeBridge.h44
-rw-r--r--compiler/luci/import/CMakeLists.txt1
-rw-r--r--compiler/luci/import/include/luci/Import/CircleReader.h13
-rw-r--r--compiler/luci/import/include/luci/Import/GraphBuilder.h18
-rw-r--r--compiler/luci/import/include/luci/Import/GraphBuilderBase.h48
-rw-r--r--compiler/luci/import/include/luci/Import/GraphBuilderContext.h23
-rw-r--r--compiler/luci/import/include/luci/Import/GraphBuilderRegistry.h12
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes.h80
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleAddN.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleArgMin.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleBCQFullyConnected.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleBCQGather.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleBatchMatMul.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleCast.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleCeil.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleCustom.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleDepthToSpace.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleElu.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleExpandDims.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleFill.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleFloor.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleFloorDiv.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleFloorMod.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleGather.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleGatherNd.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleGreater.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleGreaterEqual.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleIf.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleInstanceNorm.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleL2Normalize.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleL2Pool2D.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLeakyRelu.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLess.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLessEqual.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLocalResponseNormalization.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLog.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLogSoftmax.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLogicalAnd.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleLogistic.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleMatrixDiag.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleMatrixSetDiag.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleMaximum.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleMinimum.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleMirrorPad.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleNeg.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleNotEqual.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleOneHot.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CirclePRelu.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CirclePow.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleRange.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleRank.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReduceAny.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReduceMax.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReduceMin.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReduceProd.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleRelu6.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReluN1To1.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleResizeBilinear.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleResizeNearestNeighbor.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReverseSequence.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleReverseV2.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleRound.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleScatterNd.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSegmentSum.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSelect.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSelectV2.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleShape.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSin.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSlice.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToBatchND.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToDepth.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSparseToDense.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSplit.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSplitV.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSqrt.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSquare.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSquaredDifference.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSqueeze.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleStridedSlice.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleSum.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleTanh.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleTile.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleTopKV2.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleTransposeConv.h37
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleUnpack.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleWhere.h36
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleWhile.h35
-rw-r--r--compiler/luci/import/include/luci/Import/Nodes/CircleZerosLike.h37
-rw-r--r--compiler/luci/import/include/luci/Importer.h2
-rw-r--r--compiler/luci/import/src/CircleReader.cpp43
-rw-r--r--compiler/luci/import/src/GraphBuilder.cpp37
-rw-r--r--compiler/luci/import/src/GraphBuilderContext.cpp23
-rw-r--r--compiler/luci/import/src/GraphBuilderRegistry.cpp209
-rw-r--r--compiler/luci/import/src/Importer.cpp113
-rw-r--r--compiler/luci/import/src/Nodes/CircleAddN.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleArgMin.cpp48
-rw-r--r--compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp62
-rw-r--r--compiler/luci/import/src/Nodes/CircleBCQGather.cpp52
-rw-r--r--compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp47
-rw-r--r--compiler/luci/import/src/Nodes/CircleCast.cpp99
-rw-r--r--compiler/luci/import/src/Nodes/CircleCeil.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleConst.cpp108
-rw-r--r--compiler/luci/import/src/Nodes/CircleConv2D.cpp3
-rw-r--r--compiler/luci/import/src/Nodes/CircleCustom.cpp88
-rw-r--r--compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp67
-rw-r--r--compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp3
-rw-r--r--compiler/luci/import/src/Nodes/CircleElu.cpp64
-rw-r--r--compiler/luci/import/src/Nodes/CircleExp.cpp4
-rw-r--r--compiler/luci/import/src/Nodes/CircleExpandDims.cpp51
-rw-r--r--compiler/luci/import/src/Nodes/CircleFill.cpp49
-rw-r--r--compiler/luci/import/src/Nodes/CircleFloor.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleFloorDiv.cpp68
-rw-r--r--compiler/luci/import/src/Nodes/CircleFloorMod.cpp57
-rw-r--r--compiler/luci/import/src/Nodes/CircleFullyConnected.cpp12
-rw-r--r--compiler/luci/import/src/Nodes/CircleGather.cpp68
-rw-r--r--compiler/luci/import/src/Nodes/CircleGatherNd.cpp64
-rw-r--r--compiler/luci/import/src/Nodes/CircleGreater.cpp76
-rw-r--r--compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp62
-rw-r--r--compiler/luci/import/src/Nodes/CircleIf.cpp138
-rw-r--r--compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp52
-rw-r--r--compiler/luci/import/src/Nodes/CircleL2Normalize.cpp56
-rw-r--r--compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp54
-rw-r--r--compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleLess.cpp78
-rw-r--r--compiler/luci/import/src/Nodes/CircleLessEqual.cpp62
-rw-r--r--compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp51
-rw-r--r--compiler/luci/import/src/Nodes/CircleLog.cpp65
-rw-r--r--compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp46
-rw-r--r--compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp55
-rw-r--r--compiler/luci/import/src/Nodes/CircleLogistic.cpp66
-rw-r--r--compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp56
-rw-r--r--compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp57
-rw-r--r--compiler/luci/import/src/Nodes/CircleMaximum.cpp72
-rw-r--r--compiler/luci/import/src/Nodes/CircleMinimum.cpp72
-rw-r--r--compiler/luci/import/src/Nodes/CircleMirrorPad.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleNeg.cpp44
-rw-r--r--compiler/luci/import/src/Nodes/CircleNotEqual.cpp62
-rw-r--r--compiler/luci/import/src/Nodes/CircleOneHot.cpp77
-rw-r--r--compiler/luci/import/src/Nodes/CirclePRelu.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CirclePow.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleRange.cpp46
-rw-r--r--compiler/luci/import/src/Nodes/CircleRank.cpp46
-rw-r--r--compiler/luci/import/src/Nodes/CircleReduceAny.cpp69
-rw-r--r--compiler/luci/import/src/Nodes/CircleReduceMax.cpp64
-rw-r--r--compiler/luci/import/src/Nodes/CircleReduceMin.cpp64
-rw-r--r--compiler/luci/import/src/Nodes/CircleReduceProd.cpp64
-rw-r--r--compiler/luci/import/src/Nodes/CircleRelu6.cpp47
-rw-r--r--compiler/luci/import/src/Nodes/CircleReluN1To1.cpp49
-rw-r--r--compiler/luci/import/src/Nodes/CircleReshape.cpp12
-rw-r--r--compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp51
-rw-r--r--compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp49
-rw-r--r--compiler/luci/import/src/Nodes/CircleReverseSequence.cpp71
-rw-r--r--compiler/luci/import/src/Nodes/CircleReverseV2.cpp67
-rw-r--r--compiler/luci/import/src/Nodes/CircleRound.cpp71
-rw-r--r--compiler/luci/import/src/Nodes/CircleScatterNd.cpp58
-rw-r--r--compiler/luci/import/src/Nodes/CircleSegmentSum.cpp68
-rw-r--r--compiler/luci/import/src/Nodes/CircleSelect.cpp56
-rw-r--r--compiler/luci/import/src/Nodes/CircleSelectV2.cpp60
-rw-r--r--compiler/luci/import/src/Nodes/CircleShape.cpp53
-rw-r--r--compiler/luci/import/src/Nodes/CircleSin.cpp63
-rw-r--r--compiler/luci/import/src/Nodes/CircleSlice.cpp52
-rw-r--r--compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp80
-rw-r--r--compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp52
-rw-r--r--compiler/luci/import/src/Nodes/CircleSparseToDense.cpp50
-rw-r--r--compiler/luci/import/src/Nodes/CircleSplit.cpp119
-rw-r--r--compiler/luci/import/src/Nodes/CircleSplitV.cpp121
-rw-r--r--compiler/luci/import/src/Nodes/CircleSqrt.cpp44
-rw-r--r--compiler/luci/import/src/Nodes/CircleSquare.cpp63
-rw-r--r--compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp77
-rw-r--r--compiler/luci/import/src/Nodes/CircleSqueeze.cpp51
-rw-r--r--compiler/luci/import/src/Nodes/CircleStridedSlice.cpp60
-rw-r--r--compiler/luci/import/src/Nodes/CircleSum.cpp46
-rw-r--r--compiler/luci/import/src/Nodes/CircleTanh.cpp60
-rw-r--r--compiler/luci/import/src/Nodes/CircleTile.cpp68
-rw-r--r--compiler/luci/import/src/Nodes/CircleTopKV2.cpp117
-rw-r--r--compiler/luci/import/src/Nodes/CircleTransposeConv.cpp54
-rw-r--r--compiler/luci/import/src/Nodes/CircleUnpack.cpp151
-rw-r--r--compiler/luci/import/src/Nodes/CircleWhere.cpp60
-rw-r--r--compiler/luci/import/src/Nodes/CircleWhile.cpp123
-rw-r--r--compiler/luci/import/src/Nodes/CircleZerosLike.cpp49
-rw-r--r--compiler/luci/import/src/PostImport.cpp354
-rw-r--r--compiler/luci/import/src/PostImport.h34
-rw-r--r--compiler/luci/lang/CMakeLists.txt2
-rw-r--r--compiler/luci/lang/include/luci/IR/AttrDilation.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/AttrMirrorPadMode.h33
-rw-r--r--compiler/luci/lang/include/luci/IR/CircleNodeDecl.h16
-rw-r--r--compiler/luci/lang/include/luci/IR/CircleNodeImpl.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/CircleNodes.h98
-rw-r--r--compiler/luci/lang/include/luci/IR/CircleNodes.lst85
-rw-r--r--compiler/luci/lang/include/luci/IR/LuciNodeMixins.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleAddN.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleArgMin.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleBCQFullyConnected.h66
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleBCQGather.h60
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleBatchMatMul.h54
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleCast.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleCeil.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleConcatenation.h2
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleConv2D.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleCustom.h61
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleCustomOut.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleDepthToSpace.h48
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleDepthwiseConv2D.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleElu.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleExpandDims.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleFill.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleFloor.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleFloorDiv.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleFloorMod.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleGather.h9
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleGatherNd.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleGreater.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleGreaterEqual.h44
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleIf.h79
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleIfOut.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleInput.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleL2Normalize.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleL2Pool2D.h62
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLeakyRelu.h49
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLess.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLessEqual.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLocalResponseNormalization.h60
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLog.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLogSoftmax.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLogicalAnd.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleLogistic.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixDiag.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixSetDiag.h44
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMaximum.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMean.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMinimum.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleMirrorPad.h54
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleNeg.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleNotEqual.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleOneHot.h56
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleOutput.h22
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CirclePRelu.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CirclePow.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRange.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRank.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReduceAny.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMax.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMin.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReduceProd.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRelu.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRelu6.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReluN1To1.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReshape.h5
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleResizeBilinear.h57
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleResizeNearestNeighbor.h53
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReverseSequence.h58
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleReverseV2.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRound.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleRsqrt.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleScatterNd.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSegmentSum.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSelect.h49
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSelectV2.h49
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleShape.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSin.h40
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSlice.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSoftmax.h2
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToBatchND.h47
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToDepth.h48
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSparseToDense.h57
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSplit.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSplitOut.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSplitV.h54
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSplitVOut.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSqrt.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSquare.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSquaredDifference.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSqueeze.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleStridedSlice.h73
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleSum.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTanh.h43
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTile.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2Out.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTranspose.h1
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleTransposeConv.h3
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleUnpack.h54
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleUnpackOut.h51
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleWhere.h45
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleWhile.h79
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleWhileOut.h50
-rw-r--r--compiler/luci/lang/include/luci/IR/Nodes/CircleZerosLike.h46
-rw-r--r--compiler/luci/lang/include/luci/IR/PropertyShapeStatus.h38
-rw-r--r--compiler/luci/lang/include/luci/IR/VariadicArityNode.h12
-rw-r--r--compiler/luci/lang/src/CircleDialect.cpp9
-rw-r--r--compiler/luci/lang/src/CircleDialect.test.cpp59
-rw-r--r--compiler/luci/lang/src/CircleNodeShapeDtype.test.cpp52
-rw-r--r--compiler/luci/lang/src/CircleNodes.cpp35
-rw-r--r--compiler/luci/lang/src/DeadNodeQueryService.cpp74
-rw-r--r--compiler/luci/lang/src/DeadNodeQueryService.h34
-rw-r--r--compiler/luci/lang/src/Module.cpp2
-rw-r--r--compiler/luci/lang/src/Module.test.cpp14
-rw-r--r--compiler/luci/lang/src/Nodes/CircleAbs.test.cpp69
-rw-r--r--compiler/luci/lang/src/Nodes/CircleAdd.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleAddN.test.cpp91
-rw-r--r--compiler/luci/lang/src/Nodes/CircleArgMax.test.cpp59
-rw-r--r--compiler/luci/lang/src/Nodes/CircleArgMin.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleAveragePool2D.test.cpp90
-rw-r--r--compiler/luci/lang/src/Nodes/CircleBCQFullyConnected.test.cpp38
-rw-r--r--compiler/luci/lang/src/Nodes/CircleBCQGather.test.cpp37
-rw-r--r--compiler/luci/lang/src/Nodes/CircleBatchMatMul.test.cpp84
-rw-r--r--compiler/luci/lang/src/Nodes/CircleBatchToSpaceND.test.cpp63
-rw-r--r--compiler/luci/lang/src/Nodes/CircleCast.test.cpp78
-rw-r--r--compiler/luci/lang/src/Nodes/CircleCeil.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleConcatenation.test.cpp63
-rw-r--r--compiler/luci/lang/src/Nodes/CircleConst.cpp3
-rw-r--r--compiler/luci/lang/src/Nodes/CircleConv2D.test.cpp85
-rw-r--r--compiler/luci/lang/src/Nodes/CircleCos.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleCustom.test.cpp45
-rw-r--r--compiler/luci/lang/src/Nodes/CircleCustomOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleDepthToSpace.test.cpp80
-rw-r--r--compiler/luci/lang/src/Nodes/CircleDepthwiseConv2D.test.cpp93
-rw-r--r--compiler/luci/lang/src/Nodes/CircleDiv.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleElu.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleEqual.test.cpp59
-rw-r--r--compiler/luci/lang/src/Nodes/CircleExp.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleExpandDims.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleFill.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleFloor.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleFloorDiv.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleFloorMod.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleFullyConnected.test.cpp68
-rw-r--r--compiler/luci/lang/src/Nodes/CircleGather.test.cpp62
-rw-r--r--compiler/luci/lang/src/Nodes/CircleGatherNd.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleGreater.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleGreaterEqual.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleIf.test.cpp87
-rw-r--r--compiler/luci/lang/src/Nodes/CircleIfOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleInstanceNorm.test.cpp68
-rw-r--r--compiler/luci/lang/src/Nodes/CircleL2Pool2D.test.cpp94
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLeakyRelu.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLess.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLessEqual.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLocalResponseNormalization.test.cpp90
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLog.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLogSoftmax.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLogicalAnd.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLogicalNot.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLogicalOr.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleLogistic.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMatrixDiag.test.cpp78
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMatrixSetDiag.test.cpp84
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMaxPool2D.test.cpp67
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMaximum.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMean.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMinimum.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMirrorPad.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleMul.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleNeg.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleNotEqual.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleOneHot.test.cpp95
-rw-r--r--compiler/luci/lang/src/Nodes/CirclePRelu.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CirclePack.test.cpp63
-rw-r--r--compiler/luci/lang/src/Nodes/CirclePad.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CirclePow.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRange.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRank.test.cpp31
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReduceAny.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReduceMax.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReduceMin.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReduceProd.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRelu.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRelu6.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReluN1To1.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReshape.test.cpp65
-rw-r--r--compiler/luci/lang/src/Nodes/CircleResizeBilinear.test.cpp88
-rw-r--r--compiler/luci/lang/src/Nodes/CircleResizeNearestNeighbor.test.cpp85
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReverseSequence.test.cpp35
-rw-r--r--compiler/luci/lang/src/Nodes/CircleReverseV2.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRound.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleRsqrt.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleScatterNd.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSegmentSum.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSelect.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSelectV2.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleShape.test.cpp80
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSin.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSlice.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSoftmax.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSpaceToBatchND.test.cpp86
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSpaceToDepth.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSparseToDense.test.cpp93
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSplit.test.cpp85
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSplitOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSplitV.test.cpp90
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSplitVOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSqrt.test.cpp51
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSquare.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSquaredDifference.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSqueeze.test.cpp87
-rw-r--r--compiler/luci/lang/src/Nodes/CircleStridedSlice.test.cpp108
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSub.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleSum.test.cpp85
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTanh.test.cpp76
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTile.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTopKV2.test.cpp81
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTopKV2Out.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTranspose.test.cpp57
-rw-r--r--compiler/luci/lang/src/Nodes/CircleTransposeConv.test.cpp75
-rw-r--r--compiler/luci/lang/src/Nodes/CircleUnpack.test.cpp83
-rw-r--r--compiler/luci/lang/src/Nodes/CircleUnpackOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleWhere.test.cpp33
-rw-r--r--compiler/luci/lang/src/Nodes/CircleWhile.test.cpp87
-rw-r--r--compiler/luci/lang/src/Nodes/CircleWhileOut.test.cpp32
-rw-r--r--compiler/luci/lang/src/Nodes/CircleZerosLike.test.cpp76
-rw-r--r--compiler/luci/log/CMakeLists.txt1
-rw-r--r--compiler/luci/log/include/luci/Log.h10
-rw-r--r--compiler/luci/log/src/Log.cpp40
-rw-r--r--compiler/luci/logex/src/FormattedGraph.cpp884
-rw-r--r--compiler/luci/pass/CMakeLists.txt24
-rw-r--r--compiler/luci/pass/include/luci/CircleOptimizer.h19
-rw-r--r--compiler/luci/pass/include/luci/Pass/FuseBCQPass.h38
-rw-r--r--compiler/luci/pass/include/luci/Pass/QuantizationParameters.h31
-rw-r--r--compiler/luci/pass/include/luci/Pass/QuantizeDequantizeWeightsPass.h54
-rw-r--r--compiler/luci/pass/include/luci/Pass/QuantizeWithMinMaxPass.h54
-rw-r--r--compiler/luci/pass/include/luci/Pass/ResolveCustomOpAddPass.h37
-rw-r--r--compiler/luci/pass/include/luci/Pass/ResolveCustomOpBatchMatMulPass.h37
-rw-r--r--compiler/luci/pass/include/luci/Pass/ResolveCustomOpMatMulPass.h37
-rw-r--r--compiler/luci/pass/src/CircleOptimizer.cpp121
-rw-r--r--compiler/luci/pass/src/CircleOptimizerUtils.cpp89
-rw-r--r--compiler/luci/pass/src/CircleOptimizerUtils.h42
-rw-r--r--compiler/luci/pass/src/FuseBCQPass.cpp405
-rw-r--r--compiler/luci/pass/src/FuseInstanceNormPass.cpp231
-rw-r--r--compiler/luci/pass/src/FuseInstanceNormPass.test.cpp64
-rw-r--r--compiler/luci/pass/src/FuseInstanceNormPassInternal.h28
-rw-r--r--compiler/luci/pass/src/QuantizationUtils.cpp172
-rw-r--r--compiler/luci/pass/src/QuantizationUtils.h38
-rw-r--r--compiler/luci/pass/src/QuantizeDequantizeWeightsPass.cpp495
-rw-r--r--compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp551
-rw-r--r--compiler/luci/pass/src/ResolveCustomOpAddPass.cpp124
-rw-r--r--compiler/luci/pass/src/ResolveCustomOpBatchMatMulPass.cpp69
-rw-r--r--compiler/luci/pass/src/ResolveCustomOpMatMulPass.cpp185
-rw-r--r--compiler/luci/requires.cmake2
-rw-r--r--compiler/luci/service/src/CircleShapeInference.cpp7
-rw-r--r--compiler/luci/service/src/CircleShapeInferenceRule.cpp1645
-rw-r--r--compiler/luci/service/src/CircleShapeInferenceRule.test.cpp532
-rw-r--r--compiler/luci/service/src/CircleTypeInference.cpp10
-rw-r--r--compiler/luci/service/src/CircleTypeInferenceRule.cpp452
-rw-r--r--compiler/luci/service/src/CircleTypeInferenceRule.test.cpp24
-rw-r--r--compiler/luci/service/src/GraphBlock.h201
-rw-r--r--compiler/luci/service/src/GraphBlock.test.cpp246
-rw-r--r--compiler/luci/service/src/ShapeInfer_StridedSlice.cpp298
-rw-r--r--compiler/luci/service/src/ShapeInfer_StridedSlice.h31
-rw-r--r--compiler/luci/service/src/TestGraph.h216
-rw-r--r--compiler/luci/service/src/TestGraph.test.cpp101
-rw-r--r--compiler/luci/service/src/Validate.cpp31
-rw-r--r--compiler/luci/tester/CMakeLists.txt12
-rw-r--r--compiler/luci/tester/src/Model.cpp62
-rw-r--r--compiler/luci/tester/src/Model.h27
-rw-r--r--compiler/luci/tester/src/ReadTester.cpp39
-rw-r--r--compiler/luci/tester/src/WriteTester.cpp43
-rw-r--r--compiler/luci/tests/CMakeLists.txt29
-rwxr-xr-xcompiler/luci/tests/readverify.sh3
-rw-r--r--compiler/luci/tests/test.lst296
-rw-r--r--compiler/mio-circle/CMakeLists.txt2
-rw-r--r--compiler/mio-tflite/CMakeLists.txt7
-rw-r--r--compiler/mir-caffe-importer/CMakeLists.txt17
-rw-r--r--compiler/mir-caffe-importer/caffe_importer.cpp439
-rw-r--r--compiler/mir-caffe-importer/caffe_op_creator.cpp834
-rw-r--r--compiler/mir-caffe-importer/requires.cmake1
-rw-r--r--compiler/mir-caffe2-importer/CMakeLists.txt29
-rw-r--r--compiler/mir-caffe2-importer/caffe2_importer.cpp343
-rw-r--r--compiler/mir-caffe2-importer/caffe2_op_creator.cpp547
-rw-r--r--compiler/mir-caffe2-importer/caffe2_proto_helper.cpp62
-rw-r--r--compiler/mir-caffe2-importer/caffe2_proto_helper.h40
-rw-r--r--compiler/mir-caffe2-importer/requires.cmake1
-rw-r--r--compiler/mir-interpreter/README.md1
-rw-r--r--compiler/mir-onnx-importer/AttributeHelpers.h105
-rw-r--r--compiler/mir-onnx-importer/CMakeLists.txt119
-rw-r--r--compiler/mir-onnx-importer/ONNXImporterImpl.cpp241
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.cpp70
-rw-r--r--compiler/mir-onnx-importer/Op/Transpose.cpp57
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.cpp124
-rw-r--r--compiler/mir-onnx-importer/requires.cmake2
-rw-r--r--compiler/mir-tflite-importer/CMakeLists.txt22
-rw-r--r--compiler/mir-tflite-importer/requires.cmake1
-rw-r--r--compiler/mir-tflite-importer/tflite_importer.cpp428
-rw-r--r--compiler/mir-tflite-importer/tflite_op_creator.cpp649
-rw-r--r--compiler/mir/CMakeLists.txt5
-rw-r--r--compiler/mir/include/mir_caffe2_importer/caffe2_importer.h (renamed from compiler/mir-caffe2-importer/caffe2_importer.h)0
-rw-r--r--compiler/mir/include/mir_caffe_importer/caffe_importer.h (renamed from compiler/mir-caffe-importer/caffe_importer.h)0
-rw-r--r--compiler/mir/include/mir_onnx_importer/ONNXImporterImpl.h (renamed from compiler/mir-onnx-importer/ONNXImporterImpl.h)0
-rw-r--r--compiler/mir/include/mir_tflite_importer/tflite_importer.h (renamed from compiler/mir-tflite-importer/tflite_importer.h)0
-rw-r--r--compiler/mir/src/mir_caffe2_importer/CMakeLists.txt28
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp343
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp551
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.h (renamed from compiler/mir-caffe2-importer/caffe2_op_creator.h)0
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_op_types.h (renamed from compiler/mir-caffe2-importer/caffe2_op_types.h)0
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.cpp62
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.h40
-rw-r--r--compiler/mir/src/mir_caffe_importer/CMakeLists.txt16
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_importer.cpp439
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp835
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_op_creator.h (renamed from compiler/mir-caffe-importer/caffe_op_creator.h)0
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_op_types.h (renamed from compiler/mir-caffe-importer/caffe_op_types.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/AttributeHelpers.h105
-rw-r--r--compiler/mir/src/mir_onnx_importer/CMakeLists.txt119
-rw-r--r--compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp (renamed from compiler/mir-onnx-importer/ConvPoolHelpers.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.h (renamed from compiler/mir-onnx-importer/ConvPoolHelpers.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp (renamed from compiler/mir-onnx-importer/ONNXHelpers.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXHelpers.h (renamed from compiler/mir-onnx-importer/ONNXHelpers.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp240
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp (renamed from compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.h (renamed from compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.test.cpp (renamed from compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXOpRegistration.h (renamed from compiler/mir-onnx-importer/ONNXOpRegistration.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Abs.cpp (renamed from compiler/mir-onnx-importer/Op/Abs.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Abs.h (renamed from compiler/mir-onnx-importer/Op/Abs.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Add.cpp (renamed from compiler/mir-onnx-importer/Op/Add.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Add.h (renamed from compiler/mir-onnx-importer/Op/Add.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp (renamed from compiler/mir-onnx-importer/Op/AveragePool.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/AveragePool.h (renamed from compiler/mir-onnx-importer/Op/AveragePool.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp (renamed from compiler/mir-onnx-importer/Op/BatchNormalization.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.h (renamed from compiler/mir-onnx-importer/Op/BatchNormalization.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Concat.cpp (renamed from compiler/mir-onnx-importer/Op/Concat.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Concat.h (renamed from compiler/mir-onnx-importer/Op/Concat.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Constant.cpp (renamed from compiler/mir-onnx-importer/Op/Constant.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Constant.h (renamed from compiler/mir-onnx-importer/Op/Constant.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Conv.cpp (renamed from compiler/mir-onnx-importer/Op/Conv.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Conv.h (renamed from compiler/mir-onnx-importer/Op/Conv.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp (renamed from compiler/mir-onnx-importer/Op/ConvTranspose.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.h (renamed from compiler/mir-onnx-importer/Op/ConvTranspose.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Div.cpp (renamed from compiler/mir-onnx-importer/Op/Div.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Div.h (renamed from compiler/mir-onnx-importer/Op/Div.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Dropout.cpp (renamed from compiler/mir-onnx-importer/Op/Dropout.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Dropout.h (renamed from compiler/mir-onnx-importer/Op/Dropout.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Equal.cpp (renamed from compiler/mir-onnx-importer/Op/Equal.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Equal.h (renamed from compiler/mir-onnx-importer/Op/Equal.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Expand.cpp (renamed from compiler/mir-onnx-importer/Op/Expand.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Expand.h (renamed from compiler/mir-onnx-importer/Op/Expand.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Flatten.cpp (renamed from compiler/mir-onnx-importer/Op/Flatten.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Flatten.h (renamed from compiler/mir-onnx-importer/Op/Flatten.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Gather.cpp (renamed from compiler/mir-onnx-importer/Op/Gather.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Gather.h (renamed from compiler/mir-onnx-importer/Op/Gather.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Gemm.cpp (renamed from compiler/mir-onnx-importer/Op/Gemm.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Gemm.h (renamed from compiler/mir-onnx-importer/Op/Gemm.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.cpp (renamed from compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.h (renamed from compiler/mir-onnx-importer/Op/GlobalAveragePool.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Greater.cpp (renamed from compiler/mir-onnx-importer/Op/Greater.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Greater.h (renamed from compiler/mir-onnx-importer/Op/Greater.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Identity.cpp (renamed from compiler/mir-onnx-importer/Op/Identity.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Identity.h (renamed from compiler/mir-onnx-importer/Op/Identity.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Less.cpp (renamed from compiler/mir-onnx-importer/Op/Less.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Less.h (renamed from compiler/mir-onnx-importer/Op/Less.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/MatMul.cpp (renamed from compiler/mir-onnx-importer/Op/MatMul.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/MatMul.h (renamed from compiler/mir-onnx-importer/Op/MatMul.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Max.cpp (renamed from compiler/mir-onnx-importer/Op/Max.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Max.h (renamed from compiler/mir-onnx-importer/Op/Max.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp (renamed from compiler/mir-onnx-importer/Op/MaxPool.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/MaxPool.h (renamed from compiler/mir-onnx-importer/Op/MaxPool.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Mul.cpp (renamed from compiler/mir-onnx-importer/Op/Mul.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Mul.h (renamed from compiler/mir-onnx-importer/Op/Mul.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Pad.cpp70
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Pad.h (renamed from compiler/mir-onnx-importer/Op/Pad.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Reciprocal.cpp (renamed from compiler/mir-onnx-importer/Op/Reciprocal.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Reciprocal.h (renamed from compiler/mir-onnx-importer/Op/Reciprocal.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp (renamed from compiler/mir-onnx-importer/Op/ReduceMean.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ReduceMean.h (renamed from compiler/mir-onnx-importer/Op/ReduceMean.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Relu.cpp (renamed from compiler/mir-onnx-importer/Op/Relu.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Relu.h (renamed from compiler/mir-onnx-importer/Op/Relu.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Reshape.cpp (renamed from compiler/mir-onnx-importer/Op/Reshape.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Reshape.h (renamed from compiler/mir-onnx-importer/Op/Reshape.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Shape.cpp (renamed from compiler/mir-onnx-importer/Op/Shape.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Shape.h (renamed from compiler/mir-onnx-importer/Op/Shape.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sigmoid.cpp (renamed from compiler/mir-onnx-importer/Op/Sigmoid.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sigmoid.h (renamed from compiler/mir-onnx-importer/Op/Sigmoid.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Softmax.cpp (renamed from compiler/mir-onnx-importer/Op/Softmax.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Softmax.h (renamed from compiler/mir-onnx-importer/Op/Softmax.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sqrt.cpp (renamed from compiler/mir-onnx-importer/Op/Sqrt.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sqrt.h (renamed from compiler/mir-onnx-importer/Op/Sqrt.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sub.cpp (renamed from compiler/mir-onnx-importer/Op/Sub.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sub.h (renamed from compiler/mir-onnx-importer/Op/Sub.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sum.cpp (renamed from compiler/mir-onnx-importer/Op/Sum.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Sum.h (renamed from compiler/mir-onnx-importer/Op/Sum.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Tanh.cpp (renamed from compiler/mir-onnx-importer/Op/Tanh.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Tanh.h (renamed from compiler/mir-onnx-importer/Op/Tanh.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Transpose.cpp57
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Transpose.h (renamed from compiler/mir-onnx-importer/Op/Transpose.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.cpp (renamed from compiler/mir-onnx-importer/Op/Unsqueeze.cpp)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.h (renamed from compiler/mir-onnx-importer/Op/Unsqueeze.h)0
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp127
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Upsample.h (renamed from compiler/mir-onnx-importer/Op/Upsample.h)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/CMakeLists.txt21
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema.fbs (renamed from compiler/mir-tflite-importer/schema/schema.fbs)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema.meta (renamed from compiler/mir-tflite-importer/schema/schema.meta)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v0.fbs (renamed from compiler/mir-tflite-importer/schema/schema_v0.fbs)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v0.meta (renamed from compiler/mir-tflite-importer/schema/schema_v0.meta)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v1.fbs (renamed from compiler/mir-tflite-importer/schema/schema_v1.fbs)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v1.meta (renamed from compiler/mir-tflite-importer/schema/schema_v1.meta)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v2.fbs (renamed from compiler/mir-tflite-importer/schema/schema_v2.fbs)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v2.meta (renamed from compiler/mir-tflite-importer/schema/schema_v2.meta)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v3.fbs (renamed from compiler/mir-tflite-importer/schema/schema_v3.fbs)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/schema/schema_v3.meta (renamed from compiler/mir-tflite-importer/schema/schema_v3.meta)0
-rw-r--r--compiler/mir/src/mir_tflite_importer/tflite_importer.cpp428
-rw-r--r--compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp652
-rw-r--r--compiler/mir/src/mir_tflite_importer/tflite_op_creator.h (renamed from compiler/mir-tflite-importer/tflite_op_creator.h)0
-rw-r--r--compiler/mir2loco/CMakeLists.txt1
-rw-r--r--compiler/mir2loco/README.md1
-rw-r--r--compiler/mir2loco/src/mir2loco.cpp20
-rw-r--r--compiler/moco-tf/requires.cmake1
-rw-r--r--compiler/moco-tf/src/Canonicalization/PadCanonicalizer.cpp2
-rw-r--r--compiler/moco/import/src/Importer.cpp3
-rw-r--r--compiler/moco/import/src/Nodes/BiasAdd.test.cpp4
-rw-r--r--compiler/moco/import/src/Nodes/Concat.test.cpp4
-rw-r--r--compiler/moco/import/src/Nodes/Const.test.cpp14
-rw-r--r--compiler/moco/import/src/Nodes/Mean.test.cpp4
-rw-r--r--compiler/moco/import/src/Nodes/Pack.test.cpp2
-rw-r--r--compiler/moco/import/src/Nodes/StridedSlice.test.cpp2
-rw-r--r--compiler/moco/pass/src/Passes/ConstantFoldPack.cpp15
-rw-r--r--compiler/moco/pass/src/Passes/ConstantFoldStridedSlice.cpp7
-rw-r--r--compiler/moco/pass/src/Passes/FuseBinaryIntoPreceding.cpp3
-rw-r--r--compiler/moco/service/src/Service/TFShapeInferenceRule.cpp11
-rw-r--r--compiler/moco/service/src/Service/TFTypeInferenceRule.cpp2
-rw-r--r--compiler/nest/core/src/Block.test.cpp6
-rw-r--r--compiler/nest/core/src/Bound.test.cpp4
-rw-r--r--compiler/nest/core/src/Closure.test.cpp4
-rw-r--r--compiler/nest/core/src/Domain.test.cpp4
-rw-r--r--compiler/nest/core/src/DomainContext.test.cpp22
-rw-r--r--compiler/nest/core/src/DomainID.test.cpp2
-rw-r--r--compiler/nest/core/src/DomainInfo.test.cpp10
-rw-r--r--compiler/nest/core/src/Expr.test.cpp8
-rw-r--r--compiler/nest/core/src/FV.test.cpp8
-rw-r--r--compiler/nest/core/src/Level.test.cpp2
-rw-r--r--compiler/nest/core/src/Module.test.cpp12
-rw-r--r--compiler/nest/core/src/Ret.test.cpp12
-rw-r--r--compiler/nest/core/src/Schedule.test.cpp4
-rw-r--r--compiler/nest/core/src/Var.test.cpp4
-rw-r--r--compiler/nest/core/src/VarContext.test.cpp30
-rw-r--r--compiler/nest/core/src/VarID.test.cpp2
-rw-r--r--compiler/nest/core/src/expr/AddNode.test.cpp6
-rw-r--r--compiler/nest/core/src/expr/DerefNode.test.cpp2
-rw-r--r--compiler/nest/core/src/expr/MulNode.test.cpp6
-rw-r--r--compiler/nest/core/src/expr/Subscript.test.cpp6
-rw-r--r--compiler/nest/core/src/expr/VarNode.test.cpp4
-rw-r--r--compiler/nest/core/src/stmt/PushNode.test.cpp2
-rw-r--r--compiler/nnc/CMakeLists.txt1
-rw-r--r--compiler/nnc/cmake/config.cmake3
-rw-r--r--compiler/nnc/driver/Driver.cpp14
-rw-r--r--compiler/nnc/requires.cmake6
-rw-r--r--compiler/nnc/tests/acl_soft_backend/CMakeLists.txt3
-rw-r--r--compiler/nnc/tests/acl_soft_backend/artifact_cmake/CMakeLists.txt3
-rwxr-xr-xcompiler/nnc/utils/caffe_model_maker/AllFill.sh48
-rwxr-xr-xcompiler/nnc/utils/caffe_model_maker/Filler.sh28
-rwxr-xr-xcompiler/nnc/utils/caffe_model_maker/GenerateCaffeModels.py722
-rwxr-xr-xcompiler/nnc/utils/caffe_model_maker/Pyloss.py83
-rw-r--r--compiler/nnc/utils/caffe_model_maker/README.md22
-rw-r--r--compiler/nnkit-caffe/README.md1
-rw-r--r--compiler/nnkit-mocotf/README.md1
-rw-r--r--compiler/nnkit-onnxrt/README.md1
-rw-r--r--compiler/nnkit-tf/README.md1
-rw-r--r--compiler/nnkit-tflite/CMakeLists.txt2
-rw-r--r--compiler/nnkit-tflite/README.md1
-rw-r--r--compiler/nnkit-tflite/backend/Backend.cpp4
-rw-r--r--compiler/nnkit-tflite/support/CMakeLists.txt14
-rw-r--r--compiler/nnkit-tflite/support/include/nnkit/support/tflite/AbstractBackend.h2
-rw-r--r--compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSet.h2
-rw-r--r--compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSets.h2
-rw-r--r--compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorUtils.h2
-rw-r--r--compiler/nnkit/actions/HDF5/CMakeLists.txt2
-rw-r--r--compiler/nnop/README.md1
-rw-r--r--compiler/nnsuite/README.md1
-rw-r--r--compiler/one-cmds/CMakeLists.txt44
-rw-r--r--compiler/one-cmds/README.md3
-rw-r--r--compiler/one-cmds/how-to-prepare-virtualenv.txt37
-rw-r--r--compiler/one-cmds/how-to-use-one-commands.txt114
-rw-r--r--compiler/one-cmds/one-codegen55
-rw-r--r--compiler/one-cmds/one-import53
-rw-r--r--compiler/one-cmds/one-import-tf114
-rw-r--r--compiler/one-cmds/one-import-tflite67
-rw-r--r--compiler/one-cmds/one-optimize132
-rw-r--r--compiler/one-cmds/one-pack67
-rw-r--r--compiler/one-cmds/one-prepare-venv40
-rw-r--r--compiler/one-cmds/one-quantize155
-rw-r--r--compiler/one-cmds/requires.cmake5
-rw-r--r--compiler/oneco-value-pbtxt-test/README.md1
-rw-r--r--compiler/oneco/README.md1
-rw-r--r--compiler/onnx2circle/requires.cmake2
-rw-r--r--compiler/onnx2tflite-integration-test/README.md1
-rw-r--r--compiler/onnx2tflite/requires.cmake2
-rw-r--r--compiler/oops/README.md1
-rw-r--r--compiler/pepper-assert/README.md1
-rw-r--r--compiler/pota-quantization-value-test/CMakeLists.txt69
-rw-r--r--compiler/pota-quantization-value-test/README.md41
-rwxr-xr-xcompiler/pota-quantization-value-test/compare_tensors.py111
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json48
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json7
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json52
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json34
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json9
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json38
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json4
-rw-r--r--compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json4
-rwxr-xr-xcompiler/pota-quantization-value-test/gen_h5_explicit_inputs.py59
-rw-r--r--compiler/pota-quantization-value-test/requires.cmake4
-rw-r--r--compiler/pota-quantization-value-test/test.lst2
-rwxr-xr-xcompiler/pota-quantization-value-test/test_fake_wquant.sh87
-rw-r--r--compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt1
-rw-r--r--compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt1
-rwxr-xr-xcompiler/pota-quantization-value-test/test_quantization.sh87
-rwxr-xr-xcompiler/pota-quantization-value-test/test_record_minmax.sh100
-rw-r--r--compiler/pp/src/LinearDocument.cpp4
-rw-r--r--compiler/record-minmax-conversion-test/CMakeLists.txt42
-rw-r--r--compiler/record-minmax-conversion-test/README.md5
-rwxr-xr-xcompiler/record-minmax-conversion-test/gen_h5_random_inputs.py46
-rw-r--r--compiler/record-minmax-conversion-test/requires.cmake2
-rw-r--r--compiler/record-minmax-conversion-test/test.lst16
-rwxr-xr-xcompiler/record-minmax-conversion-test/testall.sh81
-rw-r--r--compiler/record-minmax/CMakeLists.txt27
-rw-r--r--compiler/record-minmax/README.md18
-rw-r--r--compiler/record-minmax/driver/Driver.cpp105
-rw-r--r--compiler/record-minmax/include/MinMaxObserver.h74
-rw-r--r--compiler/record-minmax/include/RecordFunction.h102
-rw-r--r--compiler/record-minmax/include/RecordMinMax.h52
-rw-r--r--compiler/record-minmax/requires.cmake3
-rw-r--r--compiler/record-minmax/src/CircleExpContract.cpp38
-rw-r--r--compiler/record-minmax/src/CircleExpContract.h53
-rw-r--r--compiler/record-minmax/src/HDF5Importer.cpp132
-rw-r--r--compiler/record-minmax/src/HDF5Importer.h82
-rw-r--r--compiler/record-minmax/src/MinMaxObserver.cpp69
-rw-r--r--compiler/record-minmax/src/RecordMinMax.cpp196
-rw-r--r--compiler/record-minmax/tests/RecordFunction.test.cpp104
-rw-r--r--compiler/safemain/README.md1
-rw-r--r--compiler/souschef/CMakeLists.txt5
-rw-r--r--compiler/souschef/README.md3
-rw-r--r--compiler/souschef/include/souschef/Arguments.h39
-rw-r--r--compiler/souschef/include/souschef/Data/Constant.h67
-rw-r--r--compiler/souschef/include/souschef/Data/Explicit.h80
-rw-r--r--compiler/souschef/include/souschef/Data/Gaussian.h93
-rw-r--r--compiler/souschef/include/souschef/DataChef.def19
-rw-r--r--compiler/souschef/include/souschef/DataChef.h61
-rw-r--r--compiler/souschef/include/souschef/DataChefs.h24
-rw-r--r--compiler/souschef/include/souschef/Dataset.h62
-rw-r--r--compiler/souschef/include/souschef/LexicalCast.h37
-rw-r--r--compiler/souschef/include/souschef/RangedArguments.h53
-rw-r--r--compiler/souschef/include/souschef/Registry.h43
-rw-r--r--compiler/souschef/src/Gaussian.cpp140
-rw-r--r--compiler/souschef/src/LexicalCast.cpp42
-rw-r--r--compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt2
-rw-r--r--compiler/tf2nnpackage-value-remote-test/CMakeLists.txt94
-rw-r--r--compiler/tf2nnpackage-value-remote-test/README.md60
-rw-r--r--compiler/tf2nnpackage-value-remote-test/requires.cmake1
-rw-r--r--compiler/tf2nnpackage-value-remote-test/test.lst3
-rwxr-xr-xcompiler/tf2nnpackage-value-remote-test/testall.sh119
-rw-r--r--compiler/tf2nnpkg/README.md1
-rw-r--r--compiler/tf2tflite-dredd-pbtxt-test/README.md1
-rw-r--r--compiler/tf2tfliteV2-conversion-test/CMakeLists.txt109
-rw-r--r--compiler/tf2tfliteV2-conversion-test/README.md2
-rw-r--r--compiler/tf2tfliteV2-conversion-test/requires.cmake2
-rw-r--r--compiler/tf2tfliteV2-conversion-test/test.lst124
-rwxr-xr-xcompiler/tf2tfliteV2-conversion-test/testall.sh81
-rw-r--r--compiler/tf2tfliteV2-value-pbtxt-test/CMakeLists.txt183
-rw-r--r--compiler/tf2tfliteV2-value-pbtxt-test/requirements.txt2
-rw-r--r--compiler/tf2tfliteV2-value-pbtxt-test/requires.cmake4
-rw-r--r--compiler/tf2tfliteV2-value-pbtxt-test/test.lst101
-rwxr-xr-xcompiler/tf2tfliteV2-value-pbtxt-test/testall.sh110
-rw-r--r--compiler/tf2tfliteV2/CMakeLists.txt2
-rw-r--r--compiler/tf2tfliteV2/README.md28
-rwxr-xr-xcompiler/tf2tfliteV2/tf2tfliteV2.py27
-rw-r--r--compiler/tfinfo-v2/README.md1
-rw-r--r--compiler/tfkit/src/PackCommand.cpp21
-rw-r--r--compiler/tfkit/src/UnpackCommand.cpp57
-rw-r--r--compiler/tfl-inspect/CMakeLists.txt3
-rw-r--r--compiler/tfl-inspect/driver/Driver.cpp85
-rw-r--r--compiler/tfl-inspect/requires.cmake3
-rw-r--r--compiler/tfl-inspect/src/Dump.cpp112
-rw-r--r--compiler/tfl-inspect/src/Dump.h9
-rw-r--r--compiler/tfl-inspect/src/Model.cpp143
-rw-r--r--compiler/tfl-inspect/src/Model.h43
-rw-r--r--compiler/tfl-inspect/src/Reader.cpp5
-rw-r--r--compiler/tfl-verify/CMakeLists.txt2
-rw-r--r--compiler/tfl-verify/requires.cmake2
-rw-r--r--compiler/tfl-verify/src/Driver.cpp5
-rw-r--r--compiler/tfl-verify/src/Model.cpp90
-rw-r--r--compiler/tfl-verify/src/Model.h38
-rw-r--r--compiler/tfl-verify/src/VerifyFlatBuffers.cpp10
-rw-r--r--compiler/tflchef/CMakeLists.txt4
-rw-r--r--compiler/tflchef/core/CMakeLists.txt2
-rw-r--r--compiler/tflchef/core/src/Arguments.h34
-rw-r--r--compiler/tflchef/core/src/Convert.cpp17
-rw-r--r--compiler/tflchef/core/src/Convert.h1
-rw-r--r--compiler/tflchef/core/src/CustomOp/AddV2.cpp63
-rw-r--r--compiler/tflchef/core/src/CustomOp/AddV2.h49
-rw-r--r--compiler/tflchef/core/src/CustomOp/All.cpp61
-rw-r--r--compiler/tflchef/core/src/CustomOp/All.h49
-rw-r--r--compiler/tflchef/core/src/CustomOp/BatchMatMulV2.cpp65
-rw-r--r--compiler/tflchef/core/src/CustomOp/BatchMatMulV2.h49
-rw-r--r--compiler/tflchef/core/src/CustomOp/MatMul.cpp63
-rw-r--r--compiler/tflchef/core/src/CustomOp/MatMul.h49
-rw-r--r--compiler/tflchef/core/src/CustomOp/MatrixBandPart.cpp62
-rw-r--r--compiler/tflchef/core/src/CustomOp/MatrixBandPart.h49
-rw-r--r--compiler/tflchef/core/src/Data/Constant.h62
-rw-r--r--compiler/tflchef/core/src/Data/Explicit.h75
-rw-r--r--compiler/tflchef/core/src/Data/Gaussian.cpp135
-rw-r--r--compiler/tflchef/core/src/Data/Gaussian.h88
-rw-r--r--compiler/tflchef/core/src/DataChef.def15
-rw-r--r--compiler/tflchef/core/src/DataChef.h56
-rw-r--r--compiler/tflchef/core/src/DataChefs.h24
-rw-r--r--compiler/tflchef/core/src/Dataset.h57
-rw-r--r--compiler/tflchef/core/src/LexicalCast.cpp36
-rw-r--r--compiler/tflchef/core/src/LexicalCast.h32
-rw-r--r--compiler/tflchef/core/src/ModelChef.cpp819
-rw-r--r--compiler/tflchef/core/src/Op/AddN.cpp32
-rw-r--r--compiler/tflchef/core/src/Op/AddN.h46
-rw-r--r--compiler/tflchef/core/src/Op/ArgMin.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/ArgMin.h46
-rw-r--r--compiler/tflchef/core/src/Op/BatchMatMul.cpp35
-rw-r--r--compiler/tflchef/core/src/Op/BatchMatMul.h49
-rw-r--r--compiler/tflchef/core/src/Op/Cast.cpp42
-rw-r--r--compiler/tflchef/core/src/Op/Cast.h46
-rw-r--r--compiler/tflchef/core/src/Op/Ceil.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Ceil.h46
-rw-r--r--compiler/tflchef/core/src/Op/Conv2D.cpp12
-rw-r--r--compiler/tflchef/core/src/Op/DepthToSpace.cpp42
-rw-r--r--compiler/tflchef/core/src/Op/DepthToSpace.h52
-rw-r--r--compiler/tflchef/core/src/Op/DepthwiseConv2D.cpp2
-rw-r--r--compiler/tflchef/core/src/Op/ELU.cpp27
-rw-r--r--compiler/tflchef/core/src/Op/ELU.h46
-rw-r--r--compiler/tflchef/core/src/Op/ExpandDims.cpp30
-rw-r--r--compiler/tflchef/core/src/Op/ExpandDims.h49
-rw-r--r--compiler/tflchef/core/src/Op/Fill.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Fill.h46
-rw-r--r--compiler/tflchef/core/src/Op/Floor.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Floor.h46
-rw-r--r--compiler/tflchef/core/src/Op/FloorMod.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/FloorMod.h49
-rw-r--r--compiler/tflchef/core/src/Op/Gather.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/Gather.h46
-rw-r--r--compiler/tflchef/core/src/Op/GatherNd.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/GatherNd.h49
-rw-r--r--compiler/tflchef/core/src/Op/Greater.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Greater.h46
-rw-r--r--compiler/tflchef/core/src/Op/GreaterEqual.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/GreaterEqual.h52
-rw-r--r--compiler/tflchef/core/src/Op/If.cpp35
-rw-r--r--compiler/tflchef/core/src/Op/If.h46
-rw-r--r--compiler/tflchef/core/src/Op/L2Normalize.cpp33
-rw-r--r--compiler/tflchef/core/src/Op/L2Normalize.h49
-rw-r--r--compiler/tflchef/core/src/Op/L2Pool2D.cpp47
-rw-r--r--compiler/tflchef/core/src/Op/L2Pool2D.h46
-rw-r--r--compiler/tflchef/core/src/Op/LeakyRelu.cpp34
-rw-r--r--compiler/tflchef/core/src/Op/LeakyRelu.h49
-rw-r--r--compiler/tflchef/core/src/Op/Less.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Less.h46
-rw-r--r--compiler/tflchef/core/src/Op/LessEqual.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/LessEqual.h49
-rw-r--r--compiler/tflchef/core/src/Op/LocalResponseNormalization.cpp50
-rw-r--r--compiler/tflchef/core/src/Op/LocalResponseNormalization.h53
-rw-r--r--compiler/tflchef/core/src/Op/Log.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Log.h46
-rw-r--r--compiler/tflchef/core/src/Op/LogSoftmax.cpp32
-rw-r--r--compiler/tflchef/core/src/Op/LogSoftmax.h49
-rw-r--r--compiler/tflchef/core/src/Op/LogicalAnd.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/LogicalAnd.h49
-rw-r--r--compiler/tflchef/core/src/Op/Logistic.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Logistic.h46
-rw-r--r--compiler/tflchef/core/src/Op/MatrixDiag.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/MatrixDiag.h49
-rw-r--r--compiler/tflchef/core/src/Op/MatrixSetDiag.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/MatrixSetDiag.h52
-rw-r--r--compiler/tflchef/core/src/Op/Maximum.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Maximum.h49
-rw-r--r--compiler/tflchef/core/src/Op/Minimum.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Minimum.h49
-rw-r--r--compiler/tflchef/core/src/Op/MirrorPad.cpp41
-rw-r--r--compiler/tflchef/core/src/Op/MirrorPad.h49
-rw-r--r--compiler/tflchef/core/src/Op/Neg.cpp30
-rw-r--r--compiler/tflchef/core/src/Op/Neg.h46
-rw-r--r--compiler/tflchef/core/src/Op/NotEqual.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/NotEqual.h49
-rw-r--r--compiler/tflchef/core/src/Op/OneHot.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/OneHot.h46
-rw-r--r--compiler/tflchef/core/src/Op/PRelu.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/PRelu.h47
-rw-r--r--compiler/tflchef/core/src/Op/Pow.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Pow.h46
-rw-r--r--compiler/tflchef/core/src/Op/Range.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Range.h46
-rw-r--r--compiler/tflchef/core/src/Op/Rank.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Rank.h46
-rw-r--r--compiler/tflchef/core/src/Op/ReLUN1To1.cpp27
-rw-r--r--compiler/tflchef/core/src/Op/ReLUN1To1.h46
-rw-r--r--compiler/tflchef/core/src/Op/ReduceAny.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/ReduceAny.h46
-rw-r--r--compiler/tflchef/core/src/Op/ReduceMax.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/ReduceMax.h46
-rw-r--r--compiler/tflchef/core/src/Op/ReduceMin.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/ReduceMin.h46
-rw-r--r--compiler/tflchef/core/src/Op/ReduceProd.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/ReduceProd.h46
-rw-r--r--compiler/tflchef/core/src/Op/Reshape.cpp3
-rw-r--r--compiler/tflchef/core/src/Op/ResizeBilinear.cpp42
-rw-r--r--compiler/tflchef/core/src/Op/ResizeBilinear.h52
-rw-r--r--compiler/tflchef/core/src/Op/ResizeNearestNeighbor.cpp43
-rw-r--r--compiler/tflchef/core/src/Op/ResizeNearestNeighbor.h52
-rw-r--r--compiler/tflchef/core/src/Op/ReverseSequence.cpp42
-rw-r--r--compiler/tflchef/core/src/Op/ReverseSequence.h52
-rw-r--r--compiler/tflchef/core/src/Op/ReverseV2.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/ReverseV2.h49
-rw-r--r--compiler/tflchef/core/src/Op/Round.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Round.h46
-rw-r--r--compiler/tflchef/core/src/Op/ScatterNd.cpp32
-rw-r--r--compiler/tflchef/core/src/Op/ScatterNd.h49
-rw-r--r--compiler/tflchef/core/src/Op/SegmentSum.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/SegmentSum.h49
-rw-r--r--compiler/tflchef/core/src/Op/Select.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Select.h46
-rw-r--r--compiler/tflchef/core/src/Op/SelectV2.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/SelectV2.h49
-rw-r--r--compiler/tflchef/core/src/Op/Sin.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Sin.h46
-rw-r--r--compiler/tflchef/core/src/Op/Slice.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/Slice.h46
-rw-r--r--compiler/tflchef/core/src/Op/SpaceToBatchND.cpp31
-rw-r--r--compiler/tflchef/core/src/Op/SpaceToBatchND.h52
-rw-r--r--compiler/tflchef/core/src/Op/SpaceToDepth.cpp38
-rw-r--r--compiler/tflchef/core/src/Op/SpaceToDepth.h52
-rw-r--r--compiler/tflchef/core/src/Op/SparseToDense.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/SparseToDense.h52
-rw-r--r--compiler/tflchef/core/src/Op/Split.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/Split.h46
-rw-r--r--compiler/tflchef/core/src/Op/SplitV.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/SplitV.h46
-rw-r--r--compiler/tflchef/core/src/Op/Square.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Square.h46
-rw-r--r--compiler/tflchef/core/src/Op/SquaredDifference.cpp30
-rw-r--r--compiler/tflchef/core/src/Op/SquaredDifference.h52
-rw-r--r--compiler/tflchef/core/src/Op/Squeeze.cpp41
-rw-r--r--compiler/tflchef/core/src/Op/Squeeze.h46
-rw-r--r--compiler/tflchef/core/src/Op/StridedSlice.cpp44
-rw-r--r--compiler/tflchef/core/src/Op/StridedSlice.h52
-rw-r--r--compiler/tflchef/core/src/Op/Sum.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/Sum.h46
-rw-r--r--compiler/tflchef/core/src/Op/Tile.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/Tile.h46
-rw-r--r--compiler/tflchef/core/src/Op/TopKV2.cpp29
-rw-r--r--compiler/tflchef/core/src/Op/TopKV2.h46
-rw-r--r--compiler/tflchef/core/src/Op/TransposeConv.cpp43
-rw-r--r--compiler/tflchef/core/src/Op/TransposeConv.h52
-rw-r--r--compiler/tflchef/core/src/Op/Unique.cpp39
-rw-r--r--compiler/tflchef/core/src/Op/Unique.h46
-rw-r--r--compiler/tflchef/core/src/Op/Unpack.cpp35
-rw-r--r--compiler/tflchef/core/src/Op/Unpack.h46
-rw-r--r--compiler/tflchef/core/src/Op/Where.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/Where.h46
-rw-r--r--compiler/tflchef/core/src/Op/While.cpp35
-rw-r--r--compiler/tflchef/core/src/Op/While.h46
-rw-r--r--compiler/tflchef/core/src/Op/ZerosLike.cpp28
-rw-r--r--compiler/tflchef/core/src/Op/ZerosLike.h49
-rw-r--r--compiler/tflchef/core/src/OpChef.def81
-rw-r--r--compiler/tflchef/core/src/OpChef.h7
-rw-r--r--compiler/tflchef/core/src/OpChefs.h80
-rw-r--r--compiler/tflchef/log/CMakeLists.txt7
-rw-r--r--compiler/tflchef/log/include/Log.h75
-rw-r--r--compiler/tflchef/log/include/LoggingContext.h35
-rw-r--r--compiler/tflchef/log/src/Log.cpp87
-rw-r--r--compiler/tflchef/log/src/LoggingContext.cpp41
-rw-r--r--compiler/tflchef/proto/tflchef.proto338
-rw-r--r--compiler/tflchef/requires.cmake5
-rw-r--r--compiler/tflchef/tests/no_shape/test.recipe43
-rw-r--r--compiler/tflchef/tests/no_shape/test.reverse0
-rwxr-xr-xcompiler/tflchef/tests/runvalidate.sh4
-rw-r--r--compiler/tflchef/tflite/include/tflchef/RawModel.h41
-rw-r--r--compiler/tflchef/tflite/src/Convert.cpp16
-rw-r--r--compiler/tflchef/tflite/src/Convert.h2
-rw-r--r--compiler/tflchef/tflite/src/FillerHelper.cpp50
-rw-r--r--compiler/tflchef/tflite/src/FillerHelper.h31
-rw-r--r--compiler/tflchef/tflite/src/Op/Add.cpp9
-rw-r--r--compiler/tflchef/tflite/src/Op/AddN.cpp46
-rw-r--r--compiler/tflchef/tflite/src/Op/AddN.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ArgMin.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/ArgMin.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/BatchMatMul.cpp48
-rw-r--r--compiler/tflchef/tflite/src/Op/BatchMatMul.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Cast.cpp48
-rw-r--r--compiler/tflchef/tflite/src/Op/Cast.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Ceil.cpp39
-rw-r--r--compiler/tflchef/tflite/src/Op/Ceil.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Conv2D.cpp3
-rw-r--r--compiler/tflchef/tflite/src/Op/DepthToSpace.cpp47
-rw-r--r--compiler/tflchef/tflite/src/Op/DepthToSpace.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/DepthwiseConv2D.cpp5
-rw-r--r--compiler/tflchef/tflite/src/Op/ELU.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/ELU.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ExpandDims.cpp47
-rw-r--r--compiler/tflchef/tflite/src/Op/ExpandDims.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Fill.cpp47
-rw-r--r--compiler/tflchef/tflite/src/Op/Fill.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Floor.cpp39
-rw-r--r--compiler/tflchef/tflite/src/Op/Floor.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/FloorMod.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/FloorMod.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Gather.cpp59
-rw-r--r--compiler/tflchef/tflite/src/Op/Gather.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/GatherNd.cpp50
-rw-r--r--compiler/tflchef/tflite/src/Op/GatherNd.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Greater.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Greater.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/GreaterEqual.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/GreaterEqual.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/L2Normalize.cpp45
-rw-r--r--compiler/tflchef/tflite/src/Op/L2Normalize.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/L2Pool2D.cpp52
-rw-r--r--compiler/tflchef/tflite/src/Op/L2Pool2D.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/LeakyRelu.cpp46
-rw-r--r--compiler/tflchef/tflite/src/Op/LeakyRelu.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Less.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Less.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/LessEqual.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/LessEqual.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/LocalResponseNormalization.cpp51
-rw-r--r--compiler/tflchef/tflite/src/Op/LocalResponseNormalization.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Log.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Log.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/LogSoftmax.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/LogSoftmax.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/LogicalAnd.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/LogicalAnd.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Logistic.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Logistic.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/MatrixDiag.cpp38
-rw-r--r--compiler/tflchef/tflite/src/Op/MatrixDiag.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/MatrixSetDiag.cpp38
-rw-r--r--compiler/tflchef/tflite/src/Op/MatrixSetDiag.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Maximum.cpp38
-rw-r--r--compiler/tflchef/tflite/src/Op/Maximum.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Minimum.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Minimum.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/MirrorPad.cpp53
-rw-r--r--compiler/tflchef/tflite/src/Op/MirrorPad.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Mul.cpp53
-rw-r--r--compiler/tflchef/tflite/src/Op/Mul.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Neg.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Neg.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/NotEqual.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/NotEqual.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/OneHot.cpp87
-rw-r--r--compiler/tflchef/tflite/src/Op/OneHot.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/PRelu.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/PRelu.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Pow.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Pow.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Range.cpp61
-rw-r--r--compiler/tflchef/tflite/src/Op/Range.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Rank.cpp38
-rw-r--r--compiler/tflchef/tflite/src/Op/Rank.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReLUN1To1.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/ReLUN1To1.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceAny.cpp52
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceAny.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceMax.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceMax.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceMin.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceMin.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceProd.cpp52
-rw-r--r--compiler/tflchef/tflite/src/Op/ReduceProd.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Reshape.cpp25
-rw-r--r--compiler/tflchef/tflite/src/Op/ResizeBilinear.cpp59
-rw-r--r--compiler/tflchef/tflite/src/Op/ResizeBilinear.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.cpp59
-rw-r--r--compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReverseSequence.cpp53
-rw-r--r--compiler/tflchef/tflite/src/Op/ReverseSequence.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ReverseV2.cpp44
-rw-r--r--compiler/tflchef/tflite/src/Op/ReverseV2.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Round.cpp37
-rw-r--r--compiler/tflchef/tflite/src/Op/Round.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ScatterNd.cpp43
-rw-r--r--compiler/tflchef/tflite/src/Op/ScatterNd.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SegmentSum.cpp41
-rw-r--r--compiler/tflchef/tflite/src/Op/SegmentSum.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Select.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Select.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SelectV2.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/SelectV2.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Shape.cpp45
-rw-r--r--compiler/tflchef/tflite/src/Op/Shape.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Sin.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Sin.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Slice.cpp50
-rw-r--r--compiler/tflchef/tflite/src/Op/Slice.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SpaceToBatchND.cpp53
-rw-r--r--compiler/tflchef/tflite/src/Op/SpaceToBatchND.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SpaceToDepth.cpp47
-rw-r--r--compiler/tflchef/tflite/src/Op/SpaceToDepth.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SparseToDense.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/SparseToDense.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Split.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/Split.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SplitV.cpp56
-rw-r--r--compiler/tflchef/tflite/src/Op/SplitV.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Square.cpp40
-rw-r--r--compiler/tflchef/tflite/src/Op/Square.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/SquaredDifference.cpp41
-rw-r--r--compiler/tflchef/tflite/src/Op/SquaredDifference.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Squeeze.cpp52
-rw-r--r--compiler/tflchef/tflite/src/Op/Squeeze.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/StridedSlice.cpp60
-rw-r--r--compiler/tflchef/tflite/src/Op/StridedSlice.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Sub.cpp9
-rw-r--r--compiler/tflchef/tflite/src/Op/Sum.cpp54
-rw-r--r--compiler/tflchef/tflite/src/Op/Sum.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Tile.cpp48
-rw-r--r--compiler/tflchef/tflite/src/Op/Tile.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/TopKV2.cpp50
-rw-r--r--compiler/tflchef/tflite/src/Op/TopKV2.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Transpose.cpp7
-rw-r--r--compiler/tflchef/tflite/src/Op/TransposeConv.cpp60
-rw-r--r--compiler/tflchef/tflite/src/Op/TransposeConv.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Unique.cpp47
-rw-r--r--compiler/tflchef/tflite/src/Op/Unique.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Unpack.cpp45
-rw-r--r--compiler/tflchef/tflite/src/Op/Unpack.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/Where.cpp36
-rw-r--r--compiler/tflchef/tflite/src/Op/Where.h39
-rw-r--r--compiler/tflchef/tflite/src/Op/ZerosLike.cpp42
-rw-r--r--compiler/tflchef/tflite/src/Op/ZerosLike.h39
-rw-r--r--compiler/tflchef/tflite/src/RawModelLoader.cpp94
-rw-r--r--compiler/tflchef/tflite/src/RecipeChef.cpp24
-rw-r--r--compiler/tflchef/tflite/src/TFliteImport.h24
-rw-r--r--compiler/tflchef/tflite/src/TFliteOpChefs.h72
-rw-r--r--compiler/tflchef/tflite/src/TFliteOpRegistry.h72
-rw-r--r--compiler/tflchef/tools/file/CMakeLists.txt1
-rw-r--r--compiler/tflchef/tools/file/Driver.cpp29
-rw-r--r--compiler/tflchef/tools/reverse/CMakeLists.txt2
-rw-r--r--compiler/tflchef/tools/reverse/Driver.cpp40
-rw-r--r--compiler/tfldump/CMakeLists.txt3
-rw-r--r--compiler/tfldump/driver/Driver.cpp26
-rw-r--r--compiler/tfldump/requires.cmake2
-rw-r--r--compiler/tfldump/src/Dump.cpp22
-rw-r--r--compiler/tfldump/src/OpPrinter.cpp418
-rw-r--r--compiler/tflite2circle-conversion-test/CMakeLists.txt44
-rw-r--r--compiler/tflite2circle-conversion-test/requires.cmake2
-rw-r--r--compiler/tflite2circle-conversion-test/test.lst20
-rwxr-xr-xcompiler/tflite2circle-conversion-test/testall.sh7
-rw-r--r--compiler/tflite2circle/CMakeLists.txt4
-rw-r--r--compiler/tflite2circle/driver/Driver.cpp37
-rw-r--r--compiler/tflite2circle/include/CircleModel.h1
-rw-r--r--compiler/tflite2circle/requires.cmake2
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions.h56
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/CastOptions.cpp4
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.cpp39
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.h32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.cpp34
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ReshapeOptions.cpp4
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.h32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.cpp33
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.cpp35
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.cpp34
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.cpp30
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.h32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.cpp39
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.cpp31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.cpp31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.cpp37
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.cpp29
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.cpp36
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.h31
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.cpp32
-rw-r--r--compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.h31
-rw-r--r--compiler/tflite2circle/src/CircleModel.cpp38
-rw-r--r--compiler/tflite2circle/src/DataLookup.cpp13
-rw-r--r--compiler/tflite2circle/src/DataLookup.h44
-rw-r--r--compiler/tflite2circle/src/TFLBuiltinOptions.lst96
-rw-r--r--compiler/tflite2circle/src/TFLOperator.lst5
1658 files changed, 75944 insertions, 11197 deletions
diff --git a/compiler/adtidas/README.md b/compiler/adtidas/README.md
new file mode 100644
index 000000000..df427d335
--- /dev/null
+++ b/compiler/adtidas/README.md
@@ -0,0 +1 @@
+# adtidas
diff --git a/compiler/angkor/src/ADT/feature/Buffer.test.cpp b/compiler/angkor/src/ADT/feature/Buffer.test.cpp
index 1e4430251..f61b7244b 100644
--- a/compiler/angkor/src/ADT/feature/Buffer.test.cpp
+++ b/compiler/angkor/src/ADT/feature/Buffer.test.cpp
@@ -30,9 +30,9 @@ TEST(ADT_FEATURE_BUFFER, ctor)
const Shape shape{4, 6, 3};
auto buffer = make_buffer<int, CHWLayout>(shape);
- ASSERT_EQ(buffer.shape().depth(), shape.depth());
- ASSERT_EQ(buffer.shape().height(), shape.height());
- ASSERT_EQ(buffer.shape().width(), shape.width());
+ ASSERT_EQ(shape.depth(), buffer.shape().depth());
+ ASSERT_EQ(shape.height(), buffer.shape().height());
+ ASSERT_EQ(shape.width(), buffer.shape().width());
}
TEST(ADT_FEATURE_BUFFER, access)
@@ -40,9 +40,9 @@ TEST(ADT_FEATURE_BUFFER, access)
const Shape shape{4, 6, 3};
auto buffer = make_buffer<int, CHWLayout>(shape);
- ASSERT_EQ(buffer.at(3, 5, 2), 0);
+ ASSERT_EQ(0, buffer.at(3, 5, 2));
buffer.at(3, 5, 2) = 4;
// Casting is introduced to use 'const T &at(...) const' method
- ASSERT_EQ(static_cast<const Buffer<int> &>(buffer).at(3, 5, 2), 4);
+ ASSERT_EQ(4, static_cast<const Buffer<int> &>(buffer).at(3, 5, 2));
}
diff --git a/compiler/angkor/src/ADT/feature/CHWLayout.test.cpp b/compiler/angkor/src/ADT/feature/CHWLayout.test.cpp
index 5610df8f3..72aef22d1 100644
--- a/compiler/angkor/src/ADT/feature/CHWLayout.test.cpp
+++ b/compiler/angkor/src/ADT/feature/CHWLayout.test.cpp
@@ -25,7 +25,7 @@ TEST(ADT_FEATURE_CHW_LAYOUT, col_increase)
const Shape shape{4, 3, 6};
const CHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 2, 1) + 1, l.offset(shape, 1, 2, 2));
+ ASSERT_EQ(l.offset(shape, 1, 2, 2), l.offset(shape, 1, 2, 1) + 1);
}
TEST(ADT_FEATURE_CHW_LAYOUT, row_increase)
@@ -33,7 +33,7 @@ TEST(ADT_FEATURE_CHW_LAYOUT, row_increase)
const Shape shape{4, 3, 6};
const CHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1) + 6, l.offset(shape, 1, 2, 1));
+ ASSERT_EQ(l.offset(shape, 1, 2, 1), l.offset(shape, 1, 1, 1) + 6);
}
TEST(ADT_FEATURE_CHW_LAYOUT, ch_increase)
@@ -41,5 +41,5 @@ TEST(ADT_FEATURE_CHW_LAYOUT, ch_increase)
const Shape shape{4, 3, 6};
const CHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1) + 6 * 3, l.offset(shape, 2, 1, 1));
+ ASSERT_EQ(l.offset(shape, 2, 1, 1), l.offset(shape, 1, 1, 1) + 6 * 3);
}
diff --git a/compiler/angkor/src/ADT/feature/HWCLayout.test.cpp b/compiler/angkor/src/ADT/feature/HWCLayout.test.cpp
index d1f359753..1cfb29c6f 100644
--- a/compiler/angkor/src/ADT/feature/HWCLayout.test.cpp
+++ b/compiler/angkor/src/ADT/feature/HWCLayout.test.cpp
@@ -29,7 +29,7 @@ TEST(ADT_FEATURE_HWC_LAYOUT, C_increase)
const Shape shape{C, H, W};
const HWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1) + 1, l.offset(shape, 2, 1, 1));
+ ASSERT_EQ(l.offset(shape, 2, 1, 1), l.offset(shape, 1, 1, 1) + 1);
}
TEST(ADT_FEATURE_HWC_LAYOUT, W_increase)
@@ -41,7 +41,7 @@ TEST(ADT_FEATURE_HWC_LAYOUT, W_increase)
const Shape shape{C, H, W};
const HWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 2, 1) + C, l.offset(shape, 1, 2, 2));
+ ASSERT_EQ(l.offset(shape, 1, 2, 2), l.offset(shape, 1, 2, 1) + C);
}
TEST(ADT_FEATURE_HWC_LAYOUT, H_increase)
@@ -53,5 +53,5 @@ TEST(ADT_FEATURE_HWC_LAYOUT, H_increase)
const Shape shape{C, H, W};
const HWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1) + W * C, l.offset(shape, 1, 2, 1));
+ ASSERT_EQ(l.offset(shape, 1, 2, 1), l.offset(shape, 1, 1, 1) + W * C);
}
diff --git a/compiler/angkor/src/ADT/feature/Layout.test.cpp b/compiler/angkor/src/ADT/feature/Layout.test.cpp
index 023594e16..de91cb3cd 100644
--- a/compiler/angkor/src/ADT/feature/Layout.test.cpp
+++ b/compiler/angkor/src/ADT/feature/Layout.test.cpp
@@ -28,7 +28,7 @@ TEST(ADT_FEATURE_LAYOUT, ctor)
{
Layout l{offset_0};
- ASSERT_EQ(l.offset(Shape{4, 3, 6}, 1, 1, 1), 0);
+ ASSERT_EQ(0, l.offset(Shape{4, 3, 6}, 1, 1, 1));
}
TEST(ADT_FEATURE_LAYOUT, copy)
@@ -36,11 +36,11 @@ TEST(ADT_FEATURE_LAYOUT, copy)
Layout orig{offset_0};
Layout copy{offset_1};
- ASSERT_EQ(copy.offset(Shape{4, 3, 6}, 1, 1, 1), 1);
+ ASSERT_EQ(1, copy.offset(Shape{4, 3, 6}, 1, 1, 1));
copy = orig;
- ASSERT_EQ(copy.offset(Shape{4, 3, 6}, 1, 1, 1), 0);
+ ASSERT_EQ(0, copy.offset(Shape{4, 3, 6}, 1, 1, 1));
}
TEST(ADT_FEATURE_LAYOUT, move)
@@ -48,9 +48,9 @@ TEST(ADT_FEATURE_LAYOUT, move)
Layout orig{offset_0};
Layout move{offset_1};
- ASSERT_EQ(move.offset(Shape{4, 3, 6}, 1, 1, 1), 1);
+ ASSERT_EQ(1, move.offset(Shape{4, 3, 6}, 1, 1, 1));
move = std::move(orig);
- ASSERT_EQ(move.offset(Shape{4, 3, 6}, 1, 1, 1), 0);
+ ASSERT_EQ(0, move.offset(Shape{4, 3, 6}, 1, 1, 1));
}
diff --git a/compiler/angkor/src/ADT/feature/Overlay.test.cpp b/compiler/angkor/src/ADT/feature/Overlay.test.cpp
index c8e2943f8..8ba28bf5a 100644
--- a/compiler/angkor/src/ADT/feature/Overlay.test.cpp
+++ b/compiler/angkor/src/ADT/feature/Overlay.test.cpp
@@ -34,9 +34,9 @@ TEST(ADT_FEATURE_OVERLAY, ctor)
};
auto overlay = make_overlay<int, CHWLayout>(shape, data);
- ASSERT_EQ(overlay.shape().depth(), shape.depth());
- ASSERT_EQ(overlay.shape().height(), shape.height());
- ASSERT_EQ(overlay.shape().width(), shape.width());
+ ASSERT_EQ(shape.depth(), overlay.shape().depth());
+ ASSERT_EQ(shape.height(), overlay.shape().height());
+ ASSERT_EQ(shape.width(), overlay.shape().width());
}
TEST(ADT_FEATURE_OVERLAY, read)
@@ -50,9 +50,9 @@ TEST(ADT_FEATURE_OVERLAY, read)
CHWLayout layout{};
- ASSERT_EQ(data[layout.offset(shape, 3, 5, 2)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, 3, 5, 2)]);
data[layout.offset(shape, 3, 5, 2)] = 2;
- ASSERT_EQ(overlay.at(3, 5, 2), 2);
+ ASSERT_EQ(2, overlay.at(3, 5, 2));
}
TEST(ADT_FEATURE_OVERLAY, access)
@@ -66,7 +66,7 @@ TEST(ADT_FEATURE_OVERLAY, access)
CHWLayout layout{};
- ASSERT_EQ(data[layout.offset(shape, 3, 5, 2)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, 3, 5, 2)]);
overlay.at(3, 5, 2) = 4;
- ASSERT_EQ(data[layout.offset(shape, 3, 5, 2)], 4);
+ ASSERT_EQ(4, data[layout.offset(shape, 3, 5, 2)]);
}
diff --git a/compiler/angkor/src/ADT/feature/Shape.test.cpp b/compiler/angkor/src/ADT/feature/Shape.test.cpp
index 9216182f0..460561bc3 100644
--- a/compiler/angkor/src/ADT/feature/Shape.test.cpp
+++ b/compiler/angkor/src/ADT/feature/Shape.test.cpp
@@ -26,9 +26,9 @@ TEST(ADT_FEATURE_SHAPE, ctor)
nncc::core::ADT::feature::Shape shape{C, H, W};
- ASSERT_EQ(shape.depth(), C);
- ASSERT_EQ(shape.height(), H);
- ASSERT_EQ(shape.width(), W);
+ ASSERT_EQ(C, shape.depth());
+ ASSERT_EQ(H, shape.height());
+ ASSERT_EQ(W, shape.width());
}
TEST(ADT_FEATURE_SHAPE, num_elements)
@@ -40,7 +40,7 @@ TEST(ADT_FEATURE_SHAPE, num_elements)
using nncc::core::ADT::feature::Shape;
using nncc::core::ADT::feature::num_elements;
- ASSERT_EQ(num_elements(Shape{C, H, W}), C * H * W);
+ ASSERT_EQ(C * H * W, num_elements(Shape{C, H, W}));
}
TEST(ADT_FEATURE_SHAPE, operator_eq)
diff --git a/compiler/angkor/src/ADT/kernel/Buffer.test.cpp b/compiler/angkor/src/ADT/kernel/Buffer.test.cpp
index da344593e..a3d92a6f2 100644
--- a/compiler/angkor/src/ADT/kernel/Buffer.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/Buffer.test.cpp
@@ -30,10 +30,10 @@ TEST(ADT_KERNEL_BUFFER, ctor)
const Shape shape{2, 4, 6, 3};
auto buffer = make_buffer<int, NCHWLayout>(shape);
- ASSERT_EQ(buffer.shape().count(), shape.count());
- ASSERT_EQ(buffer.shape().depth(), shape.depth());
- ASSERT_EQ(buffer.shape().height(), shape.height());
- ASSERT_EQ(buffer.shape().width(), shape.width());
+ ASSERT_EQ(shape.count(), buffer.shape().count());
+ ASSERT_EQ(shape.depth(), buffer.shape().depth());
+ ASSERT_EQ(shape.height(), buffer.shape().height());
+ ASSERT_EQ(shape.width(), buffer.shape().width());
}
TEST(ADT_KERNEL_BUFFER, access)
@@ -41,9 +41,9 @@ TEST(ADT_KERNEL_BUFFER, access)
const Shape shape{2, 4, 6, 3};
auto buffer = make_buffer<int, NCHWLayout>(shape);
- ASSERT_EQ(buffer.at(1, 3, 5, 2), 0);
+ ASSERT_EQ(0, buffer.at(1, 3, 5, 2));
buffer.at(1, 3, 5, 2) = 4;
// Casting is introduced to use 'const T &at(...) const' method
- ASSERT_EQ(static_cast<const Buffer<int> &>(buffer).at(1, 3, 5, 2), 4);
+ ASSERT_EQ(4, static_cast<const Buffer<int> &>(buffer).at(1, 3, 5, 2));
}
diff --git a/compiler/angkor/src/ADT/kernel/Layout.test.cpp b/compiler/angkor/src/ADT/kernel/Layout.test.cpp
index 94885cd4e..36234d2a6 100644
--- a/compiler/angkor/src/ADT/kernel/Layout.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/Layout.test.cpp
@@ -28,7 +28,7 @@ TEST(ADT_KERNEL_LAYOUT, ctor)
{
Layout l{offset_0};
- ASSERT_EQ(l.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1), 0);
+ ASSERT_EQ(0, l.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1));
}
TEST(ADT_KERNEL_LAYOUT, copy)
@@ -36,11 +36,11 @@ TEST(ADT_KERNEL_LAYOUT, copy)
Layout orig{offset_0};
Layout copy{offset_1};
- ASSERT_EQ(copy.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1), 1);
+ ASSERT_EQ(1, copy.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1));
copy = orig;
- ASSERT_EQ(copy.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1), 0);
+ ASSERT_EQ(0, copy.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1));
}
TEST(ADT_KERNEL_LAYOUT, move)
@@ -48,9 +48,9 @@ TEST(ADT_KERNEL_LAYOUT, move)
Layout orig{offset_0};
Layout move{offset_1};
- ASSERT_EQ(move.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1), 1);
+ ASSERT_EQ(1, move.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1));
move = std::move(orig);
- ASSERT_EQ(move.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1), 0);
+ ASSERT_EQ(0, move.offset(Shape{4, 3, 6, 5}, 1, 1, 1, 1));
}
diff --git a/compiler/angkor/src/ADT/kernel/NCHWLayout.test.cpp b/compiler/angkor/src/ADT/kernel/NCHWLayout.test.cpp
index ba03b7b04..578bc58c7 100644
--- a/compiler/angkor/src/ADT/kernel/NCHWLayout.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/NCHWLayout.test.cpp
@@ -25,7 +25,7 @@ TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, col_increment)
const Shape shape{4, 3, 6, 5};
const NCHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + 1, l.offset(shape, 1, 1, 1, 2));
+ ASSERT_EQ(l.offset(shape, 1, 1, 1, 2), l.offset(shape, 1, 1, 1, 1) + 1);
}
TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, row_increment)
@@ -33,7 +33,7 @@ TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, row_increment)
const Shape shape{4, 3, 6, 5};
const NCHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + 5, l.offset(shape, 1, 1, 2, 1));
+ ASSERT_EQ(l.offset(shape, 1, 1, 2, 1), l.offset(shape, 1, 1, 1, 1) + 5);
}
TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, ch_increment)
@@ -41,7 +41,7 @@ TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, ch_increment)
const Shape shape{4, 3, 6, 5};
const NCHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + 6 * 5, l.offset(shape, 1, 2, 1, 1));
+ ASSERT_EQ(l.offset(shape, 1, 2, 1, 1), l.offset(shape, 1, 1, 1, 1) + 6 * 5);
}
TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, n_increment)
@@ -49,5 +49,5 @@ TEST(ADT_KERNEL_KERNEL_NCHW_LAYOUT, n_increment)
const Shape shape{4, 3, 6, 5};
const NCHWLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + 3 * 6 * 5, l.offset(shape, 2, 1, 1, 1));
+ ASSERT_EQ(l.offset(shape, 2, 1, 1, 1), l.offset(shape, 1, 1, 1, 1) + 3 * 6 * 5);
}
diff --git a/compiler/angkor/src/ADT/kernel/NHWCLayout.test.cpp b/compiler/angkor/src/ADT/kernel/NHWCLayout.test.cpp
index 2c5df7d89..184e10751 100644
--- a/compiler/angkor/src/ADT/kernel/NHWCLayout.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/NHWCLayout.test.cpp
@@ -31,7 +31,7 @@ TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, ch_increment)
const Shape shape{N, C, H, W};
const NHWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + 1, l.offset(shape, 1, 2, 1, 1));
+ ASSERT_EQ(l.offset(shape, 1, 2, 1, 1), l.offset(shape, 1, 1, 1, 1) + 1);
}
TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, col_increment)
@@ -44,7 +44,7 @@ TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, col_increment)
const Shape shape{N, C, H, W};
const NHWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + C, l.offset(shape, 1, 1, 1, 2));
+ ASSERT_EQ(l.offset(shape, 1, 1, 1, 2), l.offset(shape, 1, 1, 1, 1) + C);
}
TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, row_increment)
@@ -57,7 +57,7 @@ TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, row_increment)
const Shape shape{N, C, H, W};
const NHWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + C * W, l.offset(shape, 1, 1, 2, 1));
+ ASSERT_EQ(l.offset(shape, 1, 1, 2, 1), l.offset(shape, 1, 1, 1, 1) + C * W);
}
TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, n_increment)
@@ -70,5 +70,5 @@ TEST(ADT_KERNEL_KERNEL_NHWC_LAYOUT, n_increment)
const Shape shape{N, C, H, W};
const NHWCLayout l;
- ASSERT_EQ(l.offset(shape, 1, 1, 1, 1) + H * W * C, l.offset(shape, 2, 1, 1, 1));
+ ASSERT_EQ(l.offset(shape, 2, 1, 1, 1), l.offset(shape, 1, 1, 1, 1) + H * W * C);
}
diff --git a/compiler/angkor/src/ADT/kernel/Overlay.test.cpp b/compiler/angkor/src/ADT/kernel/Overlay.test.cpp
index e80ebbc30..4e9bd8dbd 100644
--- a/compiler/angkor/src/ADT/kernel/Overlay.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/Overlay.test.cpp
@@ -34,10 +34,10 @@ TEST(ADT_KERNEL_OVERLAY, ctor)
};
auto overlay = make_overlay<int, NCHWLayout>(shape, data);
- ASSERT_EQ(overlay.shape().count(), shape.count());
- ASSERT_EQ(overlay.shape().depth(), shape.depth());
- ASSERT_EQ(overlay.shape().height(), shape.height());
- ASSERT_EQ(overlay.shape().width(), shape.width());
+ ASSERT_EQ(shape.count(), overlay.shape().count());
+ ASSERT_EQ(shape.depth(), overlay.shape().depth());
+ ASSERT_EQ(shape.height(), overlay.shape().height());
+ ASSERT_EQ(shape.width(), overlay.shape().width());
}
TEST(ADT_KERNEL_OVERLAY, read)
@@ -51,9 +51,9 @@ TEST(ADT_KERNEL_OVERLAY, read)
NCHWLayout layout{};
- ASSERT_EQ(data[layout.offset(shape, 1, 3, 5, 2)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, 1, 3, 5, 2)]);
data[layout.offset(shape, 1, 3, 5, 2)] = 2;
- ASSERT_EQ(overlay.at(1, 3, 5, 2), 2);
+ ASSERT_EQ(2, overlay.at(1, 3, 5, 2));
}
TEST(ADT_KERNEL_OVERLAY, access)
@@ -67,7 +67,7 @@ TEST(ADT_KERNEL_OVERLAY, access)
NCHWLayout layout{};
- ASSERT_EQ(data[layout.offset(shape, 1, 3, 5, 2)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, 1, 3, 5, 2)]);
overlay.at(1, 3, 5, 2) = 4;
- ASSERT_EQ(data[layout.offset(shape, 1, 3, 5, 2)], 4);
+ ASSERT_EQ(4, data[layout.offset(shape, 1, 3, 5, 2)]);
}
diff --git a/compiler/angkor/src/ADT/kernel/Shape.test.cpp b/compiler/angkor/src/ADT/kernel/Shape.test.cpp
index da608fb7f..b2bd13c77 100644
--- a/compiler/angkor/src/ADT/kernel/Shape.test.cpp
+++ b/compiler/angkor/src/ADT/kernel/Shape.test.cpp
@@ -27,10 +27,10 @@ TEST(ADT_KERNEL_SHAPE, ctor)
nncc::core::ADT::kernel::Shape shape{N, C, H, W};
- ASSERT_EQ(shape.count(), N);
- ASSERT_EQ(shape.depth(), C);
- ASSERT_EQ(shape.height(), H);
- ASSERT_EQ(shape.width(), W);
+ ASSERT_EQ(N, shape.count());
+ ASSERT_EQ(C, shape.depth());
+ ASSERT_EQ(H, shape.height());
+ ASSERT_EQ(W, shape.width());
}
TEST(ADT_KERNEL_SHAPE, num_elements)
@@ -43,7 +43,7 @@ TEST(ADT_KERNEL_SHAPE, num_elements)
using nncc::core::ADT::kernel::Shape;
using nncc::core::ADT::kernel::num_elements;
- ASSERT_EQ(num_elements(Shape{N, C, H, W}), N * C * H * W);
+ ASSERT_EQ(N * C * H * W, num_elements(Shape{N, C, H, W}));
}
TEST(ADT_KERNEL_SHAPE, operator_eq)
diff --git a/compiler/angkor/src/ADT/tensor/Buffer.test.cpp b/compiler/angkor/src/ADT/tensor/Buffer.test.cpp
index c2b6a9983..39d0a8068 100644
--- a/compiler/angkor/src/ADT/tensor/Buffer.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/Buffer.test.cpp
@@ -31,7 +31,7 @@ TEST(ADT_TENSOR_BUFFER, ctor)
const Shape shape{2, 3};
auto buffer = make_buffer<int, LexicalLayout>(shape);
- ASSERT_EQ(buffer.shape(), shape);
+ ASSERT_EQ(shape, buffer.shape());
}
TEST(ADT_TENSOR_BUFFER, access)
@@ -41,9 +41,9 @@ TEST(ADT_TENSOR_BUFFER, access)
const Index index{1, 2};
- ASSERT_EQ(buffer.at(index), 0);
+ ASSERT_EQ(0, buffer.at(index));
buffer.at(index) = 4;
// Casting is introduced to use 'const T &at(...) const' method
- ASSERT_EQ(static_cast<const Buffer<int> &>(buffer).at(index), 4);
+ ASSERT_EQ(4, static_cast<const Buffer<int> &>(buffer).at(index));
}
diff --git a/compiler/angkor/src/ADT/tensor/Index.test.cpp b/compiler/angkor/src/ADT/tensor/Index.test.cpp
index 230602816..53dbd41d4 100644
--- a/compiler/angkor/src/ADT/tensor/Index.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/Index.test.cpp
@@ -22,19 +22,19 @@ TEST(ADT_TENSOR_INDEX, ctor)
{
nncc::core::ADT::tensor::Index index;
- ASSERT_EQ(index.rank(), 0);
+ ASSERT_EQ(0, index.rank());
}
TEST(ADT_TENSOR_INDEX, ctor_initializer_list)
{
const nncc::core::ADT::tensor::Index index{1, 3, 5, 7};
- ASSERT_EQ(index.rank(), 4);
+ ASSERT_EQ(4, index.rank());
- ASSERT_EQ(index.at(0), 1);
- ASSERT_EQ(index.at(1), 3);
- ASSERT_EQ(index.at(2), 5);
- ASSERT_EQ(index.at(3), 7);
+ ASSERT_EQ(1, index.at(0));
+ ASSERT_EQ(3, index.at(1));
+ ASSERT_EQ(5, index.at(2));
+ ASSERT_EQ(7, index.at(3));
}
TEST(ADT_TENSOR_INDEX, operator_add)
@@ -43,10 +43,10 @@ TEST(ADT_TENSOR_INDEX, operator_add)
nncc::core::ADT::tensor::Index index2{5, 6, 7, 8};
nncc::core::ADT::tensor::Index result{index1 + index2};
- ASSERT_EQ(result.at(0), 6);
- ASSERT_EQ(result.at(1), 8);
- ASSERT_EQ(result.at(2), 10);
- ASSERT_EQ(result.at(3), 12);
+ ASSERT_EQ(6, result.at(0));
+ ASSERT_EQ(8, result.at(1));
+ ASSERT_EQ(10, result.at(2));
+ ASSERT_EQ(12, result.at(3));
}
TEST(ADT_TENSOR_INDEX, operator_eqaul)
@@ -75,7 +75,7 @@ TEST(ADT_TENSOR_INDEX, resize)
index.resize(4);
- ASSERT_EQ(index.rank(), 4);
+ ASSERT_EQ(4, index.rank());
}
TEST(ADT_TENSOR_INDEX, at)
@@ -89,7 +89,7 @@ TEST(ADT_TENSOR_INDEX, at)
for (uint32_t axis = 0; axis < 4; ++axis)
{
index.at(axis) = indices[axis];
- ASSERT_EQ(index.at(axis), indices[axis]);
+ ASSERT_EQ(indices[axis], index.at(axis));
}
}
@@ -98,11 +98,11 @@ TEST(ADT_TENSOR_INDEX, copy)
const nncc::core::ADT::tensor::Index original{3, 5, 2, 7};
const nncc::core::ADT::tensor::Index copied{original};
- ASSERT_EQ(original.rank(), copied.rank());
+ ASSERT_EQ(copied.rank(), original.rank());
for (uint32_t axis = 0; axis < 4; ++axis)
{
- ASSERT_EQ(original.at(axis), copied.at(axis));
+ ASSERT_EQ(copied.at(axis), original.at(axis));
}
}
@@ -112,8 +112,8 @@ TEST(ADT_TENSOR_INDEX, fill)
index.fill(3);
- ASSERT_EQ(index.rank(), 2);
+ ASSERT_EQ(2, index.rank());
- ASSERT_EQ(index.at(0), 3);
- ASSERT_EQ(index.at(1), 3);
+ ASSERT_EQ(3, index.at(0));
+ ASSERT_EQ(3, index.at(1));
}
diff --git a/compiler/angkor/src/ADT/tensor/IndexEnumerator.test.cpp b/compiler/angkor/src/ADT/tensor/IndexEnumerator.test.cpp
index 204a8aa21..54cc2e9ad 100644
--- a/compiler/angkor/src/ADT/tensor/IndexEnumerator.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/IndexEnumerator.test.cpp
@@ -40,7 +40,7 @@ TEST(ADT_TENSOR_INDEX_ENUMERATOR, iterate_full_range)
{
const auto &ind = e.current();
- ASSERT_EQ(ind.rank(), 2);
+ ASSERT_EQ(2, ind.rank());
count.at(ind.at(0) * W + ind.at(1)) += 1;
}
diff --git a/compiler/angkor/src/ADT/tensor/Layout.test.cpp b/compiler/angkor/src/ADT/tensor/Layout.test.cpp
index 145adfecc..6d5b3fe71 100644
--- a/compiler/angkor/src/ADT/tensor/Layout.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/Layout.test.cpp
@@ -28,7 +28,7 @@ TEST(ADT_TENSOR_LAYOUT, ctor)
{
nncc::core::ADT::tensor::Layout l{offset_0};
- ASSERT_EQ(l.offset(Shape{4, 3, 6}, Index{1, 1, 1}), 0);
+ ASSERT_EQ(0, l.offset(Shape{4, 3, 6}, Index{1, 1, 1}));
}
TEST(ADT_TENSOR_LAYOUT, copy)
@@ -36,11 +36,11 @@ TEST(ADT_TENSOR_LAYOUT, copy)
nncc::core::ADT::tensor::Layout orig{offset_0};
nncc::core::ADT::tensor::Layout copy{offset_1};
- ASSERT_EQ(copy.offset(Shape{4, 3, 6}, Index{1, 1, 1}), 1);
+ ASSERT_EQ(1, copy.offset(Shape{4, 3, 6}, Index{1, 1, 1}));
copy = orig;
- ASSERT_EQ(copy.offset(Shape{4, 3, 6}, Index{1, 1, 1}), 0);
+ ASSERT_EQ(0, copy.offset(Shape{4, 3, 6}, Index{1, 1, 1}));
}
TEST(ADT_TENSOR_LAYOUT, move)
@@ -48,9 +48,9 @@ TEST(ADT_TENSOR_LAYOUT, move)
nncc::core::ADT::tensor::Layout orig{offset_0};
nncc::core::ADT::tensor::Layout move{offset_1};
- ASSERT_EQ(move.offset(Shape{4, 3, 6}, Index{1, 1, 1}), 1);
+ ASSERT_EQ(1, move.offset(Shape{4, 3, 6}, Index{1, 1, 1}));
move = std::move(orig);
- ASSERT_EQ(move.offset(Shape{4, 3, 6}, Index{1, 1, 1}), 0);
+ ASSERT_EQ(0, move.offset(Shape{4, 3, 6}, Index{1, 1, 1}));
}
diff --git a/compiler/angkor/src/ADT/tensor/LexicalLayout.test.cpp b/compiler/angkor/src/ADT/tensor/LexicalLayout.test.cpp
index 8f9b7296f..0acaa3a86 100644
--- a/compiler/angkor/src/ADT/tensor/LexicalLayout.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/LexicalLayout.test.cpp
@@ -28,7 +28,7 @@ TEST(ADT_TENSOR_LEXICAL_LAYOUT, last)
const nncc::core::ADT::tensor::LexicalLayout l;
- ASSERT_EQ(l.offset(shape, curr) + 1, l.offset(shape, next));
+ ASSERT_EQ(l.offset(shape, next), l.offset(shape, curr) + 1);
}
TEST(ADT_TENSOR_LEXICAL_LAYOUT, lexical_middle)
@@ -39,7 +39,7 @@ TEST(ADT_TENSOR_LEXICAL_LAYOUT, lexical_middle)
const nncc::core::ADT::tensor::LexicalLayout l;
- ASSERT_EQ(l.offset(shape, curr) + 6, l.offset(shape, next));
+ ASSERT_EQ(l.offset(shape, next), l.offset(shape, curr) + 6);
}
TEST(ADT_TENSOR_LEXICAL_LAYOUT, lexical_first)
@@ -50,5 +50,5 @@ TEST(ADT_TENSOR_LEXICAL_LAYOUT, lexical_first)
const nncc::core::ADT::tensor::LexicalLayout l;
- ASSERT_EQ(l.offset(shape, curr) + 6 * 3, l.offset(shape, next));
+ ASSERT_EQ(l.offset(shape, next), l.offset(shape, curr) + 6 * 3);
}
diff --git a/compiler/angkor/src/ADT/tensor/Overlay.test.cpp b/compiler/angkor/src/ADT/tensor/Overlay.test.cpp
index aacb5a9a1..57cd1e6f9 100644
--- a/compiler/angkor/src/ADT/tensor/Overlay.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/Overlay.test.cpp
@@ -35,7 +35,7 @@ TEST(ADT_TENSOR_OVERLAY, ctor)
};
auto view = make_overlay<int, LexicalLayout>(shape, data);
- ASSERT_EQ(view.shape(), shape);
+ ASSERT_EQ(shape, view.shape());
}
TEST(ADT_TENSOR_OVERLAY, read)
@@ -51,9 +51,9 @@ TEST(ADT_TENSOR_OVERLAY, read)
const Index index{1, 2};
- ASSERT_EQ(data[layout.offset(shape, index)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, index)]);
data[layout.offset(shape, index)] = 2;
- ASSERT_EQ(view.at(index), 2);
+ ASSERT_EQ(2, view.at(index));
}
TEST(ADT_TENSOR_OVERLAY, access)
@@ -69,7 +69,7 @@ TEST(ADT_TENSOR_OVERLAY, access)
const Index index{1, 2};
- ASSERT_EQ(data[layout.offset(shape, index)], 0);
+ ASSERT_EQ(0, data[layout.offset(shape, index)]);
view.at(index) = 4;
- ASSERT_EQ(data[layout.offset(shape, index)], 4);
+ ASSERT_EQ(4, data[layout.offset(shape, index)]);
}
diff --git a/compiler/angkor/src/ADT/tensor/Shape.test.cpp b/compiler/angkor/src/ADT/tensor/Shape.test.cpp
index 711ae3d40..9915e7877 100644
--- a/compiler/angkor/src/ADT/tensor/Shape.test.cpp
+++ b/compiler/angkor/src/ADT/tensor/Shape.test.cpp
@@ -22,19 +22,19 @@ TEST(ADT_TENSOR_SHAPE, ctor)
{
nncc::core::ADT::tensor::Shape shape;
- ASSERT_EQ(shape.rank(), 0);
+ ASSERT_EQ(0, shape.rank());
}
TEST(ADT_TENSOR_SHAPE, ctor_initializer_list)
{
nncc::core::ADT::tensor::Shape shape{1, 3, 5, 7};
- ASSERT_EQ(shape.rank(), 4);
+ ASSERT_EQ(4, shape.rank());
- ASSERT_EQ(shape.dim(0), 1);
- ASSERT_EQ(shape.dim(1), 3);
- ASSERT_EQ(shape.dim(2), 5);
- ASSERT_EQ(shape.dim(3), 7);
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ ASSERT_EQ(5, shape.dim(2));
+ ASSERT_EQ(7, shape.dim(3));
}
TEST(ADT_TENSOR_SHAPE, resize)
@@ -43,7 +43,7 @@ TEST(ADT_TENSOR_SHAPE, resize)
shape.resize(4);
- ASSERT_EQ(shape.rank(), 4);
+ ASSERT_EQ(4, shape.rank());
}
TEST(ADT_TENSOR_SHAPE, dim)
@@ -57,7 +57,7 @@ TEST(ADT_TENSOR_SHAPE, dim)
for (uint32_t axis = 0; axis < 4; ++axis)
{
shape.dim(axis) = dims[axis];
- ASSERT_EQ(shape.dim(axis), dims[axis]);
+ ASSERT_EQ(dims[axis], shape.dim(axis));
}
}
@@ -66,11 +66,11 @@ TEST(ADT_TENSOR_SHAPE, copy)
const nncc::core::ADT::tensor::Shape original{3, 5, 2, 7};
const nncc::core::ADT::tensor::Shape copied{original};
- ASSERT_EQ(original.rank(), copied.rank());
+ ASSERT_EQ(copied.rank(), original.rank());
for (uint32_t axis = 0; axis < 4; ++axis)
{
- ASSERT_EQ(original.dim(axis), copied.dim(axis));
+ ASSERT_EQ(copied.dim(axis), original.dim(axis));
}
}
@@ -81,7 +81,7 @@ TEST(ADT_TENSOR_SHAPE, num_elements_rank_0)
Shape rank_0_shape;
- ASSERT_EQ(num_elements(rank_0_shape), 1);
+ ASSERT_EQ(1, num_elements(rank_0_shape));
}
TEST(ADT_TENSOR_SHAPE, num_elements_zero)
@@ -89,7 +89,7 @@ TEST(ADT_TENSOR_SHAPE, num_elements_zero)
using nncc::core::ADT::tensor::Shape;
using nncc::core::ADT::tensor::num_elements;
- ASSERT_EQ(num_elements(Shape{0, 0, 0, 0}), 0);
+ ASSERT_EQ(0, num_elements(Shape{0, 0, 0, 0}));
}
TEST(ADT_TENSOR_SHAPE, num_elements_nonzero)
@@ -97,7 +97,7 @@ TEST(ADT_TENSOR_SHAPE, num_elements_nonzero)
using nncc::core::ADT::tensor::Shape;
using nncc::core::ADT::tensor::num_elements;
- ASSERT_EQ(num_elements(Shape{2, 3}), 6);
+ ASSERT_EQ(6, num_elements(Shape{2, 3}));
}
TEST(ADT_TENSOR_SHAPE, num_elements_nulldim)
@@ -105,7 +105,7 @@ TEST(ADT_TENSOR_SHAPE, num_elements_nulldim)
using nncc::core::ADT::tensor::Shape;
using nncc::core::ADT::tensor::num_elements;
- ASSERT_EQ(num_elements(Shape{2, 0, 3}), 0);
+ ASSERT_EQ(0, num_elements(Shape{2, 0, 3}));
}
TEST(ADT_TENSOR_SHAPE, squeeze_neg)
@@ -115,10 +115,10 @@ TEST(ADT_TENSOR_SHAPE, squeeze_neg)
auto squeezed = squeeze(Shape{3, 5, 2});
- ASSERT_EQ(squeezed.rank(), 3);
- ASSERT_EQ(squeezed.dim(0), 3);
- ASSERT_EQ(squeezed.dim(1), 5);
- ASSERT_EQ(squeezed.dim(2), 2);
+ ASSERT_EQ(3, squeezed.rank());
+ ASSERT_EQ(3, squeezed.dim(0));
+ ASSERT_EQ(5, squeezed.dim(1));
+ ASSERT_EQ(2, squeezed.dim(2));
}
TEST(ADT_TENSOR_SHAPE, squeeze_neg_0)
@@ -128,10 +128,10 @@ TEST(ADT_TENSOR_SHAPE, squeeze_neg_0)
auto squeezed = squeeze(Shape{3, 0, 2});
- ASSERT_EQ(squeezed.rank(), 3);
- ASSERT_EQ(squeezed.dim(0), 3);
- ASSERT_EQ(squeezed.dim(1), 0);
- ASSERT_EQ(squeezed.dim(2), 2);
+ ASSERT_EQ(3, squeezed.rank());
+ ASSERT_EQ(3, squeezed.dim(0));
+ ASSERT_EQ(0, squeezed.dim(1));
+ ASSERT_EQ(2, squeezed.dim(2));
}
TEST(ADT_TENSOR_SHAPE, squeeze_pos)
@@ -141,9 +141,9 @@ TEST(ADT_TENSOR_SHAPE, squeeze_pos)
auto squeezed = squeeze(Shape{3, 1, 2});
- ASSERT_EQ(squeezed.rank(), 2);
- ASSERT_EQ(squeezed.dim(0), 3);
- ASSERT_EQ(squeezed.dim(1), 2);
+ ASSERT_EQ(2, squeezed.rank());
+ ASSERT_EQ(3, squeezed.dim(0));
+ ASSERT_EQ(2, squeezed.dim(1));
}
TEST(ADT_TENSOR_SHAPE, squeeze_nested)
@@ -155,9 +155,9 @@ TEST(ADT_TENSOR_SHAPE, squeeze_nested)
shape.squeeze().squeeze();
- ASSERT_EQ(shape.rank(), 2);
- ASSERT_EQ(shape.dim(0), 3);
- ASSERT_EQ(shape.dim(1), 2);
+ ASSERT_EQ(2, shape.rank());
+ ASSERT_EQ(3, shape.dim(0));
+ ASSERT_EQ(2, shape.dim(1));
}
TEST(ADT_TENSOR_SHAPE, eq_negative_on_unmatched_rank)
diff --git a/compiler/angkor/src/TensorIndex.test.cpp b/compiler/angkor/src/TensorIndex.test.cpp
index 68cf3917a..dcfc4d39f 100644
--- a/compiler/angkor/src/TensorIndex.test.cpp
+++ b/compiler/angkor/src/TensorIndex.test.cpp
@@ -22,19 +22,19 @@ TEST(TensorIndexTest, ctor)
{
angkor::TensorIndex index;
- ASSERT_EQ(index.rank(), 0);
+ ASSERT_EQ(0, index.rank());
}
TEST(TensorIndexTest, ctor_initializer_list)
{
const angkor::TensorIndex index{1, 3, 5, 7};
- ASSERT_EQ(index.rank(), 4);
+ ASSERT_EQ(4, index.rank());
- ASSERT_EQ(index.at(0), 1);
- ASSERT_EQ(index.at(1), 3);
- ASSERT_EQ(index.at(2), 5);
- ASSERT_EQ(index.at(3), 7);
+ ASSERT_EQ(1, index.at(0));
+ ASSERT_EQ(3, index.at(1));
+ ASSERT_EQ(5, index.at(2));
+ ASSERT_EQ(7, index.at(3));
}
TEST(TensorIndexTest, resize)
@@ -43,7 +43,7 @@ TEST(TensorIndexTest, resize)
index.resize(4);
- ASSERT_EQ(index.rank(), 4);
+ ASSERT_EQ(4, index.rank());
}
TEST(TensorIndexTest, at)
@@ -57,7 +57,7 @@ TEST(TensorIndexTest, at)
for (uint32_t axis = 0; axis < 4; ++axis)
{
index.at(axis) = indices[axis];
- ASSERT_EQ(index.at(axis), indices[axis]);
+ ASSERT_EQ(indices[axis], index.at(axis));
}
}
@@ -66,11 +66,11 @@ TEST(TensorIndexTest, copy)
const angkor::TensorIndex original{3, 5, 2, 7};
const angkor::TensorIndex copied{original};
- ASSERT_EQ(original.rank(), copied.rank());
+ ASSERT_EQ(copied.rank(), original.rank());
for (uint32_t axis = 0; axis < 4; ++axis)
{
- ASSERT_EQ(original.at(axis), copied.at(axis));
+ ASSERT_EQ(copied.at(axis), original.at(axis));
}
}
@@ -80,8 +80,8 @@ TEST(TensorIndexTest, fill)
index.fill(3);
- ASSERT_EQ(index.rank(), 2);
+ ASSERT_EQ(2, index.rank());
- ASSERT_EQ(index.at(0), 3);
- ASSERT_EQ(index.at(1), 3);
+ ASSERT_EQ(3, index.at(0));
+ ASSERT_EQ(3, index.at(1));
}
diff --git a/compiler/angkor/src/TensorShape.test.cpp b/compiler/angkor/src/TensorShape.test.cpp
index 5e6766a96..3b96bb863 100644
--- a/compiler/angkor/src/TensorShape.test.cpp
+++ b/compiler/angkor/src/TensorShape.test.cpp
@@ -22,19 +22,19 @@ TEST(TensorShapeTest, ctor)
{
angkor::TensorShape shape;
- ASSERT_EQ(shape.rank(), 0);
+ ASSERT_EQ(0, shape.rank());
}
TEST(TensorShapeTest, ctor_initializer_list)
{
angkor::TensorShape shape{1, 3, 5, 7};
- ASSERT_EQ(shape.rank(), 4);
+ ASSERT_EQ(4, shape.rank());
- ASSERT_EQ(shape.dim(0), 1);
- ASSERT_EQ(shape.dim(1), 3);
- ASSERT_EQ(shape.dim(2), 5);
- ASSERT_EQ(shape.dim(3), 7);
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ ASSERT_EQ(5, shape.dim(2));
+ ASSERT_EQ(7, shape.dim(3));
}
TEST(TensorShapeTest, resize)
@@ -43,7 +43,7 @@ TEST(TensorShapeTest, resize)
shape.resize(4);
- ASSERT_EQ(shape.rank(), 4);
+ ASSERT_EQ(4, shape.rank());
}
TEST(TensorShapeTest, dim)
@@ -57,7 +57,7 @@ TEST(TensorShapeTest, dim)
for (uint32_t axis = 0; axis < 4; ++axis)
{
shape.dim(axis) = dims[axis];
- ASSERT_EQ(shape.dim(axis), dims[axis]);
+ ASSERT_EQ(dims[axis], shape.dim(axis));
}
}
@@ -66,11 +66,11 @@ TEST(TensorShapeTest, copy)
const angkor::TensorShape original{3, 5, 2, 7};
const angkor::TensorShape copied{original};
- ASSERT_EQ(original.rank(), copied.rank());
+ ASSERT_EQ(copied.rank(), original.rank());
for (uint32_t axis = 0; axis < 4; ++axis)
{
- ASSERT_EQ(original.dim(axis), copied.dim(axis));
+ ASSERT_EQ(copied.dim(axis), original.dim(axis));
}
}
diff --git a/compiler/ann-api/README.md b/compiler/ann-api/README.md
new file mode 100644
index 000000000..0c141168b
--- /dev/null
+++ b/compiler/ann-api/README.md
@@ -0,0 +1 @@
+# ann-api
diff --git a/compiler/arser/CMakeLists.txt b/compiler/arser/CMakeLists.txt
new file mode 100644
index 000000000..63d19f538
--- /dev/null
+++ b/compiler/arser/CMakeLists.txt
@@ -0,0 +1,15 @@
+add_library(arser INTERFACE)
+
+# It specifies INTERFACE so that future targets linked with arser library will inherit its include directory.
+# It means that a developer who want to link arser just need to add one line.
+# target_link_library(another-users-target arser)
+target_include_directories(arser INTERFACE include/)
+
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
+nnas_find_package(GTest REQUIRED)
+set(TESTS "${CMAKE_CURRENT_SOURCE_DIR}/tests/arser.test.cpp")
+GTest_AddTest(arser_test ${TESTS})
+target_include_directories(arser_test PRIVATE include)
diff --git a/compiler/arser/README.md b/compiler/arser/README.md
new file mode 100644
index 000000000..e853e7eea
--- /dev/null
+++ b/compiler/arser/README.md
@@ -0,0 +1,3 @@
+# arser
+
+This is an Argument parser for c++. See [`arser.test.cpp`](tests/arser.test.cpp) for details on how to use
diff --git a/compiler/arser/include/arser/arser.h b/compiler/arser/include/arser/arser.h
new file mode 100644
index 000000000..64bb557c4
--- /dev/null
+++ b/compiler/arser/include/arser/arser.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <sstream>
+
+#include <iterator>
+#include <typeinfo>
+
+#include <algorithm>
+#include <functional>
+#include <list>
+#include <map>
+#include <string>
+#include <vector>
+
+#include <cstring>
+
+namespace
+{
+
+template <typename T> T lexical_cast(const std::string &str)
+{
+ std::istringstream ss;
+ ss.str(str);
+ T data;
+ ss >> data;
+ return data;
+}
+
+template <> bool lexical_cast(const std::string &str)
+{
+ bool data = true;
+ if (str == "false" || str == "False" || str == "FALSE" || str == "0")
+ data = false;
+ return data;
+}
+
+template <typename T> inline std::string to_string(const T value) { return std::to_string(value); }
+
+template <> inline std::string to_string(const char *value) { return std::string(value); }
+
+template <> inline std::string to_string(const bool value) { return value ? "true" : "false"; }
+
+} // namespace
+
+namespace arser
+{
+
+// TypeName declaration
+template <typename T> struct TypeName
+{
+ static const char *Get() { return typeid(T).name(); }
+};
+template <> struct TypeName<int>
+{
+ static const char *Get() { return "int"; }
+};
+template <> struct TypeName<std::vector<int>>
+{
+ static const char *Get() { return "vector<int>"; }
+};
+template <> struct TypeName<float>
+{
+ static const char *Get() { return "float"; }
+};
+template <> struct TypeName<std::vector<float>>
+{
+ static const char *Get() { return "vector<float>"; }
+};
+template <> struct TypeName<bool>
+{
+ static const char *Get() { return "bool"; }
+};
+template <> struct TypeName<std::string>
+{
+ static const char *Get() { return "string"; }
+};
+template <> struct TypeName<std::vector<std::string>>
+{
+ static const char *Get() { return "vector<string>"; }
+};
+template <> struct TypeName<const char *>
+{
+ static const char *Get() { return "string"; }
+};
+template <> struct TypeName<std::vector<const char *>>
+{
+ static const char *Get() { return "vector<string>"; }
+};
+
+// supported DataType
+enum class DataType
+{
+ INT32,
+ INT32_VEC,
+ FLOAT,
+ FLOAT_VEC,
+ BOOL,
+ STR,
+ STR_VEC,
+};
+
+class Arser;
+
+class Argument
+{
+public:
+ explicit Argument(const std::string &arg_name) : _name{arg_name} {}
+
+ Argument &nargs(uint32_t num)
+ {
+ if (num == 0)
+ {
+ _type = "bool";
+ }
+ _nargs = num;
+ return *this;
+ }
+
+ Argument &type(DataType type)
+ {
+ switch (type)
+ {
+ case DataType::INT32:
+ _type = "int";
+ break;
+ case DataType::INT32_VEC:
+ _type = "vector<int>";
+ break;
+ case DataType::FLOAT:
+ _type = "float";
+ break;
+ case DataType::FLOAT_VEC:
+ _type = "vector<float>";
+ break;
+ case DataType::BOOL:
+ _type = "bool";
+ break;
+ case DataType::STR:
+ _type = "string";
+ break;
+ case DataType::STR_VEC:
+ _type = "vector<string>";
+ break;
+ default:
+ throw std::runtime_error("NYI DataType");
+ }
+ return *this;
+ }
+
+ Argument &required(void)
+ {
+ _is_required = true;
+ return *this;
+ }
+
+ Argument &required(bool value)
+ {
+ _is_required = value;
+ return *this;
+ }
+
+ Argument &help(std::string help_message)
+ {
+ _help_message = help_message;
+ return *this;
+ }
+
+ Argument &exit_with(const std::function<void(void)> &func)
+ {
+ _func = func;
+ return *this;
+ }
+
+ template <typename T> Argument &default_value(const T value)
+ {
+ if ((_nargs <= 1 && TypeName<T>::Get() == _type) ||
+ (_nargs > 1 && TypeName<std::vector<T>>::Get() == _type))
+ _values.emplace_back(::to_string(value));
+ else
+ {
+ throw std::runtime_error("Type mismatch. "
+ "You called default_value() method with a type different "
+ "from the one you specified. "
+ "Please check the type of what you specified in "
+ "add_argument() method.");
+ }
+ return *this;
+ }
+
+ template <typename T, typename... Ts> Argument &default_value(const T value, const Ts... values)
+ {
+ if ((_nargs <= 1 && TypeName<T>::Get() == _type) ||
+ (_nargs > 1 && TypeName<std::vector<T>>::Get() == _type))
+ {
+ _values.emplace_back(::to_string(value));
+ default_value(values...);
+ }
+ else
+ {
+ throw std::runtime_error("Type mismatch. "
+ "You called default_value() method with a type different "
+ "from the one you specified. "
+ "Please check the type of what you specified in "
+ "add_argument() method.");
+ }
+ return *this;
+ }
+
+private:
+ std::string _name;
+ std::string _type;
+ std::string _help_message;
+ std::function<void(void)> _func;
+ uint32_t _nargs{1};
+ bool _is_required{false};
+ std::vector<std::string> _values;
+
+ friend class Arser;
+ friend std::ostream &operator<<(std::ostream &, const Arser &);
+};
+
+class Arser
+{
+public:
+ explicit Arser(const std::string &program_description = {})
+ : _program_description{program_description}
+ {
+ add_argument("--help").help("Show help message and exit").nargs(0);
+ }
+
+ Argument &add_argument(const std::string &arg_name)
+ {
+ if (arg_name.at(0) != '-')
+ {
+ _positional_arg_vec.emplace_back(arg_name);
+ _arg_map[arg_name] = &_positional_arg_vec.back();
+ }
+ else
+ {
+ _optional_arg_vec.emplace_back(arg_name);
+ _arg_map[arg_name] = &_optional_arg_vec.back();
+ }
+ return *_arg_map[arg_name];
+ }
+
+ void parse(int argc, char **argv)
+ {
+ _program_name = argv[0];
+ _program_name.erase(0, _program_name.find_last_of("/\\") + 1);
+ if (argc >= 2)
+ {
+ if (!std::strcmp(argv[1], "--help"))
+ {
+ std::cout << *this;
+ std::exit(0);
+ }
+ else
+ {
+ for (const auto &arg : _arg_map)
+ {
+ const auto &func = arg.second->_func;
+ if (func && !std::strcmp(argv[1], arg.second->_name.c_str()))
+ {
+ func();
+ std::exit(0);
+ }
+ }
+ }
+ }
+ /*
+ ** ./program_name [optional argument] [positional argument]
+ */
+ // get the number of positioanl argument
+ size_t parg_num = _positional_arg_vec.size();
+ // get the number of "required" optional argument
+ size_t required_oarg_num = 0;
+ for (auto arg : _optional_arg_vec)
+ {
+ if (arg._is_required)
+ required_oarg_num++;
+ }
+ // parse argument
+ for (int c = 1; c < argc;)
+ {
+ std::string arg_name{argv[c++]};
+ auto arg = _arg_map.find(arg_name);
+ // check whether arg is positional or not
+ if (arg == _arg_map.end())
+ {
+ if (parg_num)
+ {
+ auto it = _positional_arg_vec.begin();
+ std::advance(it, _positional_arg_vec.size() - parg_num);
+ (*it)._values.clear();
+ (*it)._values.emplace_back(arg_name);
+ parg_num--;
+ }
+ else
+ throw std::runtime_error("Invalid argument. "
+ "You've given more positional argument than necessary.");
+ }
+ else // optional argument
+ {
+ // check whether arg is required or not
+ if (arg->second->_is_required)
+ required_oarg_num--;
+ arg->second->_values.clear();
+ for (uint32_t n = 0; n < arg->second->_nargs; n++)
+ {
+ if (c >= argc)
+ throw std::runtime_error("Invalid argument. "
+ "You must have missed some argument.");
+ arg->second->_values.emplace_back(argv[c++]);
+ }
+ if (arg->second->_nargs == 0)
+ {
+ // TODO std::boolalpha for true or false
+ arg->second->_values.emplace_back("1");
+ }
+ }
+ }
+ if (parg_num || required_oarg_num)
+ throw std::runtime_error("Invalid argument. "
+ "You must have missed some argument.");
+ }
+
+ bool operator[](const std::string &arg_name)
+ {
+ auto arg = _arg_map.find(arg_name);
+ if (arg == _arg_map.end())
+ return false;
+
+ return arg->second->_values.size() > 0 ? true : false;
+ }
+
+ template <typename T> T get_impl(const std::string &arg_name, T *);
+
+ template <typename T> std::vector<T> get_impl(const std::string &arg_name, std::vector<T> *);
+
+ template <typename T> T get(const std::string &arg_name);
+
+private:
+ std::string _program_name;
+ std::string _program_description;
+ std::list<Argument> _positional_arg_vec;
+ std::list<Argument> _optional_arg_vec;
+ std::map<std::string, Argument *> _arg_map;
+
+ friend std::ostream &operator<<(std::ostream &, const Arser &);
+};
+
+template <typename T> T Arser::get_impl(const std::string &arg_name, T *)
+{
+ auto arg = _arg_map.find(arg_name);
+ if (arg == _arg_map.end())
+ throw std::runtime_error("Invalid argument. "
+ "There is no argument you are looking for.");
+
+ if (arg->second->_type != TypeName<T>::Get())
+ throw std::runtime_error("Type mismatch. "
+ "You called get() method with a type different "
+ "from the one you specified. "
+ "Please check the type of what you specified in "
+ "add_argument() method.");
+
+ if (arg->second->_values.size() == 0)
+ throw std::runtime_error("Wrong access. "
+ "You must make sure that the argument is given before accessing it. "
+ "You can do it by calling arser[\"argument\"].");
+
+ return ::lexical_cast<T>(arg->second->_values[0]);
+}
+
+template <typename T> std::vector<T> Arser::get_impl(const std::string &arg_name, std::vector<T> *)
+{
+ auto arg = _arg_map.find(arg_name);
+ if (arg == _arg_map.end())
+ throw std::runtime_error("Invalid argument. "
+ "There is no argument you are looking for.");
+
+ if (arg->second->_type != TypeName<std::vector<T>>::Get())
+ throw std::runtime_error("Type mismatch. "
+ "You called get using a type different from the one you specified.");
+
+ std::vector<T> data;
+ std::transform(arg->second->_values.begin(), arg->second->_values.end(), std::back_inserter(data),
+ [](std::string str) -> T { return ::lexical_cast<T>(str); });
+ return data;
+}
+
+template <typename T> T Arser::get(const std::string &arg_name)
+{
+ return get_impl(arg_name, static_cast<T *>(nullptr));
+}
+
+std::ostream &operator<<(std::ostream &stream, const Arser &parser)
+{
+ // print description
+ if (!parser._program_description.empty())
+ {
+ stream << "What " << parser._program_name << " does: " << parser._program_description << "\n\n";
+ }
+ /*
+ ** print usage
+ */
+ stream << "Usage: ./" << parser._program_name << " ";
+ // required optional argument
+ for (const auto &arg : parser._optional_arg_vec)
+ {
+ if (!arg._is_required)
+ continue;
+ stream << arg._name << " ";
+ std::string arg_name = arg._name.substr(2);
+ std::for_each(arg_name.begin(), arg_name.end(),
+ [&stream](const char &c) { stream << static_cast<char>(::toupper(c)); });
+ stream << " ";
+ }
+ // rest of the optional argument
+ for (const auto &arg : parser._optional_arg_vec)
+ {
+ if (arg._is_required)
+ continue;
+ stream << "[" << arg._name;
+ if (arg._nargs)
+ {
+ stream << " ";
+ std::string arg_name = arg._name.substr(2);
+ std::for_each(arg_name.begin(), arg_name.end(),
+ [&stream](const char &c) { stream << static_cast<char>(::toupper(c)); });
+ }
+ stream << "]"
+ << " ";
+ }
+ // positional arguement
+ for (const auto &arg : parser._positional_arg_vec)
+ {
+ stream << arg._name << " ";
+ }
+ stream << "\n\n";
+ /*
+ ** print argument list and its help message
+ */
+ // get the length of the longest argument
+ size_t length_of_longest_arg = 0;
+ for (const auto &arg : parser._positional_arg_vec)
+ {
+ length_of_longest_arg = std::max(length_of_longest_arg, arg._name.length());
+ }
+ for (const auto &arg : parser._optional_arg_vec)
+ {
+ length_of_longest_arg = std::max(length_of_longest_arg, arg._name.length());
+ }
+
+ const size_t message_width = 60;
+ // positional argument
+ if (!parser._positional_arg_vec.empty())
+ {
+ stream << "[Positional argument]" << std::endl;
+ for (const auto &arg : parser._positional_arg_vec)
+ {
+ stream.width(length_of_longest_arg);
+ stream << std::left << arg._name << "\t";
+ for (size_t i = 0; i < arg._help_message.length(); i += message_width)
+ {
+ if (i)
+ stream << std::string(length_of_longest_arg, ' ') << "\t";
+ stream << arg._help_message.substr(i, message_width) << std::endl;
+ }
+ }
+ std::cout << std::endl;
+ }
+ // optional argument
+ if (!parser._optional_arg_vec.empty())
+ {
+ stream << "[Optional argument]" << std::endl;
+ for (const auto &arg : parser._optional_arg_vec)
+ {
+ stream.width(length_of_longest_arg);
+ stream << std::left << arg._name << "\t";
+ for (size_t i = 0; i < arg._help_message.length(); i += message_width)
+ {
+ if (i)
+ stream << std::string(length_of_longest_arg, ' ') << "\t";
+ stream << arg._help_message.substr(i, message_width) << std::endl;
+ }
+ }
+ }
+
+ return stream;
+}
+
+} // namespace arser
diff --git a/compiler/arser/tests/arser.test.cpp b/compiler/arser/tests/arser.test.cpp
new file mode 100644
index 000000000..28bee4238
--- /dev/null
+++ b/compiler/arser/tests/arser.test.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "arser/arser.h"
+
+using namespace arser;
+
+class Prompt
+{
+public:
+ Prompt(const std::string &command)
+ {
+ std::istringstream iss(command);
+ std::vector<std::string> token(std::istream_iterator<std::string>{iss},
+ std::istream_iterator<std::string>());
+ _arg = std::move(token);
+ _argv.reserve(_arg.size());
+ for (const auto &t : _arg)
+ {
+ _argv.push_back(const_cast<char *>(t.data()));
+ }
+ }
+ int argc(void) const { return _argv.size(); }
+ char **argv(void) { return _argv.data(); }
+
+private:
+ std::vector<char *> _argv;
+ std::vector<std::string> _arg;
+};
+
+TEST(BasicTest, option)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--verbose")
+ .nargs(0)
+ .help("It provides additional details as to what the executable is doing");
+
+ Prompt prompt("./executable --verbose");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--verbose"]);
+ EXPECT_TRUE(arser.get<bool>("--verbose"));
+}
+
+TEST(BasicTest, OptionalArgument)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--volume")
+ .nargs(1)
+ .type(arser::DataType::INT32)
+ .help("Set a volume as you provided.");
+ arser.add_argument("--frequency")
+ .nargs(1)
+ .type(arser::DataType::FLOAT)
+ .help("Set a frequency as you provided.");
+
+ Prompt prompt("./radio --volume 5 --frequency 128.5");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--volume"]);
+ EXPECT_EQ(5, arser.get<int>("--volume"));
+
+ EXPECT_TRUE(arser["--frequency"]);
+ EXPECT_FLOAT_EQ(128.5, arser.get<float>("--frequency"));
+
+ EXPECT_FALSE(arser["--price"]);
+ EXPECT_THROW(arser.get<bool>("--volume"), std::runtime_error);
+}
+
+TEST(BasicTest, NonRequiredOptionalArgument)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--weight")
+ .nargs(1)
+ .type(arser::DataType::INT32)
+ .help("Set a volume as you provided.");
+
+ Prompt prompt("./radio"); // empty argument
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_FALSE(arser["--volume"]);
+ EXPECT_THROW(arser.get<int>("--weight"), std::runtime_error);
+}
+
+TEST(BasicTest, RequiredOptionalArgument)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--volume")
+ .nargs(1)
+ .type(arser::DataType::INT32)
+ .required()
+ .help("Set a volume as you provided.");
+
+ Prompt prompt("./radio");
+ /* act */ /* assert */
+ EXPECT_THROW(arser.parse(prompt.argc(), prompt.argv()), std::runtime_error);
+}
+
+TEST(BasicTest, OptionalMultipleArgument)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--add").nargs(2).type(arser::DataType::INT32_VEC).help("Add two numbers.");
+
+ Prompt prompt("./calculator --add 3 5");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--add"]);
+ std::vector<int> values = arser.get<std::vector<int>>("--add");
+ EXPECT_EQ(3, values.at(0));
+ EXPECT_EQ(5, values.at(1));
+
+ EXPECT_THROW(arser.get<std::vector<float>>("--add"), std::runtime_error);
+}
+
+TEST(BasicTest, MultipleOptionalArgument)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--input_path")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .help("input path of this program.")
+ .required();
+ arser.add_argument("--output_path")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .help("output path of this program.")
+ .required(true);
+ arser.add_argument("--training_data")
+ .nargs(5)
+ .type(arser::DataType::INT32_VEC)
+ .help("give traning data to this program.")
+ .required();
+
+ Prompt prompt("./ml --input_path /I/am/in.put --output_path I/am/out.put "
+ "--training_data 2 43 234 3 334");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--input_path"]);
+ EXPECT_EQ("/I/am/in.put", arser.get<std::string>("--input_path"));
+ EXPECT_TRUE(arser["--output_path"]);
+ EXPECT_EQ("I/am/out.put", arser.get<std::string>("--output_path"));
+ EXPECT_TRUE(arser["--training_data"]);
+ std::vector<int32_t> data = arser.get<std::vector<int32_t>>("--training_data");
+ EXPECT_EQ(2, data.at(0));
+ EXPECT_EQ(43, data.at(1));
+ EXPECT_EQ(234, data.at(2));
+ EXPECT_EQ(3, data.at(3));
+ EXPECT_EQ(334, data.at(4));
+}
+
+TEST(BasicTest, MultipleFloatValue)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--add_float")
+ .nargs(2)
+ .type(arser::DataType::FLOAT_VEC)
+ .help("Add two float numbers.");
+
+ Prompt prompt("./calculator --add_float 3.2 5.4");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--add_float"]);
+ std::vector<float> values = arser.get<std::vector<float>>("--add_float");
+ EXPECT_FLOAT_EQ(3.2, values.at(0));
+ EXPECT_FLOAT_EQ(5.4, values.at(1));
+
+ EXPECT_THROW(arser.get<std::vector<int>>("--add_float"), std::runtime_error);
+}
+
+TEST(BasicTest, MultipleStringValue)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--three_color")
+ .nargs(3)
+ .type(arser::DataType::STR_VEC)
+ .help("insert your three favorite color");
+
+ Prompt prompt("./color_factory --three_color red blue yellow");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ EXPECT_TRUE(arser["--three_color"]);
+ std::vector<std::string> values = arser.get<std::vector<std::string>>("--three_color");
+ EXPECT_EQ("red", values.at(0));
+ EXPECT_EQ("blue", values.at(1));
+ EXPECT_EQ("yellow", values.at(2));
+
+ EXPECT_THROW(arser.get<std::vector<std::string>>("--color"), std::runtime_error);
+}
+
+void printBiography(void) { std::cerr << "When I was young.." << std::endl; }
+
+TEST(BasicTest, ExitWithFunctionCall)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--history").help("Show history and exit").exit_with(printBiography);
+
+ arser.add_argument("--name").nargs(1).type(arser::DataType::STR).help("Name your hero");
+
+ Prompt prompt("./hero --history");
+ /* act */ /* assert */
+ EXPECT_EXIT(arser.parse(prompt.argc(), prompt.argv()), testing::ExitedWithCode(0),
+ "When I was young..");
+}
+
+void printVersion(std::string version) { std::cerr << "arser version : " << version << std::endl; }
+
+TEST(BasicTest, ExitWithFunctionCallWithBind)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--version")
+ .help("Show version and exit")
+ .exit_with(std::bind(printVersion, "1.2.0"));
+
+ Prompt prompt("./arser --version");
+ /* act */ /* assert */
+ EXPECT_EXIT(arser.parse(prompt.argc(), prompt.argv()), testing::ExitedWithCode(0),
+ "arser version : 1.2.0");
+}
+
+TEST(BasicTest, ExitWithFunctionCallWithLamda)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--shutdown").help("Shut down your computer").exit_with([](void) {
+ std::cerr << "Good bye.." << std::endl;
+ });
+
+ arser.add_argument("OS").nargs(1).type(arser::DataType::STR).help("The OS you want to boot");
+
+ Prompt prompt("./computer --shutdown");
+ /* act */ /* assert */
+ EXPECT_EXIT(arser.parse(prompt.argc(), prompt.argv()), testing::ExitedWithCode(0), "Good bye..");
+}
+
+TEST(BasicTest, DefaultValue)
+{
+ /* arrange */
+ Arser arser;
+
+ arser.add_argument("--delivery")
+ .nargs(3)
+ .type(arser::DataType::STR_VEC)
+ .default_value("pizza", "chicken", "hamburger")
+ .help("Enter three foods that you want to deliver");
+ arser.add_argument("--assistant")
+ .type(arser::DataType::STR)
+ .default_value("Bixby")
+ .help("Enter name of your assistant");
+ arser.add_argument("--sound")
+ .type(arser::DataType::BOOL)
+ .nargs(1)
+ .default_value(true)
+ .help("Sound on/off");
+ arser.add_argument("--number")
+ .type(arser::DataType::INT32_VEC)
+ .nargs(4)
+ .default_value(1, 2, 3, 4)
+ .help("Enter the number that you want to call");
+ arser.add_argument("--time")
+ .type(arser::DataType::INT32_VEC)
+ .nargs(3)
+ .default_value(0, 0, 0)
+ .help("Current time(H/M/S)");
+ arser.add_argument("--name")
+ .type(arser::DataType::STR)
+ .nargs(1)
+ .default_value("no name")
+ .help("Enter your name");
+
+ Prompt prompt("/phone --time 1 52 34 --name arser");
+ /* act */
+ arser.parse(prompt.argc(), prompt.argv());
+ /* assert */
+ // 3 strings, no argument
+ std::vector<std::string> delivery = arser.get<std::vector<std::string>>("--delivery");
+ EXPECT_EQ("pizza", delivery.at(0));
+ EXPECT_EQ("chicken", delivery.at(1));
+ EXPECT_EQ("hamburger", delivery.at(2));
+ // 1 string, no argument
+ EXPECT_EQ("Bixby", arser.get<std::string>("--assistant"));
+ // 1 bool, no argument
+ EXPECT_EQ(true, arser.get<bool>("--sound"));
+ // 4 integer, no argument
+ std::vector<int> number = arser.get<std::vector<int>>("--number");
+ EXPECT_EQ(1, number.at(0));
+ EXPECT_EQ(2, number.at(1));
+ EXPECT_EQ(3, number.at(2));
+ EXPECT_EQ(4, number.at(3));
+ // 3 integer, 3 arguments
+ std::vector<int> time = arser.get<std::vector<int>>("--time");
+ EXPECT_EQ(1, time.at(0));
+ EXPECT_EQ(52, time.at(1));
+ EXPECT_EQ(34, time.at(2));
+ // 1 string, 1 argument
+ EXPECT_EQ("arser", arser.get<std::string>("--name"));
+}
diff --git a/compiler/caffe2circle/requires.cmake b/compiler/caffe2circle/requires.cmake
index cc05edd84..b16a51141 100644
--- a/compiler/caffe2circle/requires.cmake
+++ b/compiler/caffe2circle/requires.cmake
@@ -1,3 +1,3 @@
-require("mir-onnx-importer")
+require("mir")
require("mir2loco")
require("exo")
diff --git a/compiler/circle-inspect/CMakeLists.txt b/compiler/circle-inspect/CMakeLists.txt
index 222f8cb1a..d0775ea2d 100644
--- a/compiler/circle-inspect/CMakeLists.txt
+++ b/compiler/circle-inspect/CMakeLists.txt
@@ -8,6 +8,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(circle-inspect ${DRIVER} ${SOURCES})
target_include_directories(circle-inspect PRIVATE src)
+target_link_libraries(circle-inspect arser)
+target_link_libraries(circle-inspect foder)
target_link_libraries(circle-inspect mio_circle)
target_link_libraries(circle-inspect safemain)
-target_link_libraries(circle-inspect stdex)
diff --git a/compiler/circle-inspect/driver/Driver.cpp b/compiler/circle-inspect/driver/Driver.cpp
index d23cd0f8b..72cfa28a3 100644
--- a/compiler/circle-inspect/driver/Driver.cpp
+++ b/compiler/circle-inspect/driver/Driver.cpp
@@ -14,74 +14,63 @@
* limitations under the License.
*/
-#include "Model.h"
#include "Dump.h"
-#include <stdex/Memory.h>
+#include <arser/arser.h>
+#include <foder/FileLoader.h>
#include <functional>
#include <iostream>
#include <map>
+#include <memory>
#include <vector>
#include <string>
-using OptionHook = std::function<std::unique_ptr<circleinspect::DumpInterface>(void)>;
-
int entry(int argc, char **argv)
{
- if (argc < 3)
+ arser::Arser arser{
+ "circle-inspect allows users to retrieve various information from a Circle model file"};
+ arser.add_argument("--operators").nargs(0).help("Dump operators in circle file");
+ arser.add_argument("--conv2d_weight")
+ .nargs(0)
+ .help("Dump Conv2D series weight operators in circle file");
+ arser.add_argument("--op_version").nargs(0).help("Dump versions of the operators in circle file");
+ arser.add_argument("circle").type(arser::DataType::STR).help("Circle file to inspect");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [options] [circle]" << std::endl;
- std::cerr << " --operators : dump operators in circle file" << std::endl;
- std::cerr << " --conv2d_weight : dump Conv2D series weight operators in circle file"
- << std::endl;
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
return 255;
}
- // Simple argument parser (based on map)
- std::map<std::string, OptionHook> argparse;
-
- argparse["--operators"] = [&](void) {
- // dump all operators
- return std::move(stdex::make_unique<circleinspect::DumpOperators>());
- };
-
- argparse["--conv2d_weight"] = [&](void) {
- // dump Conv2D, DepthwiseConv2D weight operators
- return std::move(stdex::make_unique<circleinspect::DumpConv2DWeight>());
- };
+ if (!arser["--operators"] && !arser["--conv2d_weight"] && !arser["--op_version"])
+ {
+ std::cout << "At least one option must be specified" << std::endl;
+ std::cout << arser;
+ return 255;
+ }
std::vector<std::unique_ptr<circleinspect::DumpInterface>> dumps;
- for (int n = 1; n < argc - 1; ++n)
- {
- const std::string tag{argv[n]};
-
- auto it = argparse.find(tag);
- if (it == argparse.end())
- {
- std::cerr << "Option '" << tag << "' is not supported" << std::endl;
- return 255;
- }
- auto dump = it->second();
- assert(dump != nullptr);
- dumps.push_back(std::move(dump));
- }
+ if (arser["--operators"])
+ dumps.push_back(std::make_unique<circleinspect::DumpOperators>());
+ if (arser["--conv2d_weight"])
+ dumps.push_back(std::make_unique<circleinspect::DumpConv2DWeight>());
+ if (arser["--op_version"])
+ dumps.push_back(std::make_unique<circleinspect::DumpOperatorVersion>());
- std::string model_file = argv[argc - 1];
+ std::string model_file = arser.get<std::string>("circle");
// Load Circle model from a circle file
- auto model = circleinspect::load_circle(model_file);
- if (model == nullptr)
- {
- std::cerr << "ERROR: Failed to load circle '" << model_file << "'" << std::endl;
- return 255;
- }
-
- const circle::Model *circlemodel = model->model();
- if (circlemodel == nullptr)
+ foder::FileLoader fileLoader{model_file};
+ std::vector<char> modelData = fileLoader.load();
+ const circle::Model *circleModel = circle::GetModel(modelData.data());
+ if (circleModel == nullptr)
{
std::cerr << "ERROR: Failed to load circle '" << model_file << "'" << std::endl;
return 255;
@@ -89,7 +78,7 @@ int entry(int argc, char **argv)
for (auto &dump : dumps)
{
- dump->run(std::cout, circlemodel);
+ dump->run(std::cout, circleModel);
}
return 0;
diff --git a/compiler/circle-inspect/requires.cmake b/compiler/circle-inspect/requires.cmake
index b090dbd4d..81e0f0dbd 100644
--- a/compiler/circle-inspect/requires.cmake
+++ b/compiler/circle-inspect/requires.cmake
@@ -1,3 +1,3 @@
+require("arser")
require("mio-circle")
require("safemain")
-require("stdex")
diff --git a/compiler/circle-inspect/src/Dump.cpp b/compiler/circle-inspect/src/Dump.cpp
index fbc092b89..5c71afb3f 100644
--- a/compiler/circle-inspect/src/Dump.cpp
+++ b/compiler/circle-inspect/src/Dump.cpp
@@ -26,19 +26,22 @@ void DumpOperators::run(std::ostream &os, const circle::Model *model)
{
circleinspect::Reader reader(model);
- assert(reader.num_subgraph() == 1);
- reader.select_subgraph(0);
-
- auto ops = reader.operators();
+ const uint32_t subgraph_size = reader.num_subgraph();
- // dump operators
- for (uint32_t i = 0; i < ops->Length(); ++i)
+ for (uint32_t g = 0; g < subgraph_size; g++)
{
- const auto op = ops->Get(i);
+ reader.select_subgraph(g);
+ auto ops = reader.operators();
- auto op_name = reader.opcode_name(op);
+ // dump operators
+ for (uint32_t i = 0; i < ops->Length(); ++i)
+ {
+ const auto op = ops->Get(i);
+
+ auto op_name = reader.opcode_name(op);
- os << op_name << std::endl;
+ os << op_name << std::endl;
+ }
}
}
@@ -92,43 +95,82 @@ void DumpConv2DWeight::run(std::ostream &os, const circle::Model *model)
{
circleinspect::Reader reader(model);
+ const uint32_t subgraph_size = reader.num_subgraph();
+
+ for (uint32_t g = 0; g < subgraph_size; g++)
+ {
+ reader.select_subgraph(g);
+ auto ops = reader.operators();
+
+ // dump Conv2D, DepthwiseConv2D and its weight input operator
+ for (uint32_t i = 0; i < ops->Length(); ++i)
+ {
+ const auto op = ops->Get(i);
+ auto bc = reader.builtin_code(op);
+
+ if (bc == circle::BuiltinOperator_CONV_2D || bc == circle::BuiltinOperator_DEPTHWISE_CONV_2D)
+ {
+ const std::vector<int32_t> &inputs = circleinspect::as_index_vector(op->inputs());
+ if (inputs.size() < 2)
+ {
+ throw std::runtime_error("Operator has invalid input");
+ }
+ auto weight_input = inputs[1]; // Tensor ID of weight input
+
+ const auto op_weight = operator_match_output(reader, weight_input);
+ const auto buffer_size = tensor_buffer_size(reader, weight_input);
+
+ std::string weight_op_name = "?";
+
+ if (op_weight == nullptr && buffer_size > 0)
+ {
+ weight_op_name = "CONST";
+ }
+ else if (op_weight != nullptr)
+ {
+ weight_op_name = reader.opcode_name(op_weight);
+ }
+
+ auto op_name = reader.opcode_name(op);
+ os << op_name << "," << weight_op_name << std::endl;
+ }
+ }
+ }
+}
+
+} // namespace circleinspect
+
+namespace circleinspect
+{
+
+void DumpOperatorVersion::run(std::ostream &os, const circle::Model *model)
+{
+ std::map<std::string, int32_t> op_version_map;
+
+ circleinspect::Reader reader(model);
+
+ // This assert is subject to be changed later
assert(reader.num_subgraph() == 1);
reader.select_subgraph(0);
auto ops = reader.operators();
- // dump Conv2D, DepthwiseConv2D and its weight input operator
+ // Dump operators' version
for (uint32_t i = 0; i < ops->Length(); ++i)
{
const auto op = ops->Get(i);
- auto bc = reader.builtin_code(op);
-
- if (bc == circle::BuiltinOperator_CONV_2D || bc == circle::BuiltinOperator_DEPTHWISE_CONV_2D)
- {
- const std::vector<int32_t> &inputs = circleinspect::as_index_vector(op->inputs());
- if (inputs.size() < 2)
- {
- throw std::runtime_error("Operator has invalid input");
- }
- auto weight_input = inputs[1]; // Tensor ID of weight input
-
- const auto op_weight = operator_match_output(reader, weight_input);
- const auto buffer_size = tensor_buffer_size(reader, weight_input);
- std::string weight_op_name = "?";
+ auto op_name = reader.opcode_name(op);
+ auto op_version = reader.opcodes().at(op->opcode_index())->version();
- if (op_weight == nullptr && buffer_size > 0)
- {
- weight_op_name = "CONST";
- }
- else if (op_weight != nullptr)
- {
- weight_op_name = reader.opcode_name(op_weight);
- }
+ if (op_version_map.find(op_name) == op_version_map.end() ||
+ op_version_map[op_name] < op_version)
+ op_version_map[op_name] = op_version;
+ }
- auto op_name = reader.opcode_name(op);
- os << op_name << "," << weight_op_name << std::endl;
- }
+ for (auto op : op_version_map)
+ {
+ os << op.first << "," << op.second << std::endl;
}
}
diff --git a/compiler/circle-inspect/src/Dump.h b/compiler/circle-inspect/src/Dump.h
index 6afba83b3..996c421f9 100644
--- a/compiler/circle-inspect/src/Dump.h
+++ b/compiler/circle-inspect/src/Dump.h
@@ -51,6 +51,15 @@ public:
void run(std::ostream &os, const circle::Model *model);
};
+class DumpOperatorVersion final : public DumpInterface
+{
+public:
+ DumpOperatorVersion() = default;
+
+public:
+ void run(std::ostream &os, const circle::Model *model);
+};
+
} // namespace circleinspect
#endif // __DUMP_H__
diff --git a/compiler/circle-inspect/src/Model.cpp b/compiler/circle-inspect/src/Model.cpp
deleted file mode 100644
index 1924bfafc..000000000
--- a/compiler/circle-inspect/src/Model.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Model.h"
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-namespace
-{
-
-class MemoryMappedModel final : public circleinspect::Model
-{
-public:
- /**
- * @require fd and data SHOULD be valid
- */
- explicit MemoryMappedModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- ~MemoryMappedModel()
- {
- munmap(_data, _size);
- close(_fd);
- }
-
-public:
- MemoryMappedModel(const MemoryMappedModel &) = delete;
- MemoryMappedModel(MemoryMappedModel &&) = delete;
-
-public:
- const ::circle::Model *model(void) const override { return ::circle::GetModel(_data); }
-
-private:
- int _fd = -1;
- void *_data = nullptr;
- size_t _size = 0;
-};
-
-class FileDescriptor final
-{
-public:
- FileDescriptor(int value) : _value{value}
- {
- // DO NOTHING
- }
-
-public:
- // NOTE Copy is not allowed
- FileDescriptor(const FileDescriptor &) = delete;
-
-public:
- // NOTE Move is allowed
- FileDescriptor(FileDescriptor &&fd) { _value = fd.release(); }
-
-public:
- ~FileDescriptor()
- {
- if (_value != -1)
- {
- // Close on descturction
- close(_value);
- }
- }
-
-public:
- int value(void) const { return _value; }
-
-public:
- int release(void)
- {
- auto res = _value;
- _value = -1;
- return res;
- }
-
-private:
- int _value = -1;
-};
-
-} // namespace
-
-namespace circleinspect
-{
-
-std::unique_ptr<Model> load_circle(const std::string &path)
-{
- FileDescriptor fd = open(path.c_str(), O_RDONLY);
-
- if (fd.value() == -1)
- {
- // Return nullptr on open failure
- return nullptr;
- }
-
- struct stat st;
- if (fstat(fd.value(), &st) == -1)
- {
- // Return nullptr on fstat failure
- return nullptr;
- }
-
- auto size = st.st_size;
- auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd.value(), 0);
-
- if (data == MAP_FAILED)
- {
- // Return nullptr on mmap failure
- return nullptr;
- }
-
- // Check if file is a valid Flatbuffer file
- const uint8_t *u8data = reinterpret_cast<const uint8_t *>(data);
- flatbuffers::Verifier verifier{u8data, static_cast<size_t>(size)};
- if (!circle::VerifyModelBuffer(verifier))
- {
- munmap(data, size);
- close(fd.release());
- return nullptr;
- }
-
- return std::unique_ptr<circleinspect::Model>{new MemoryMappedModel(fd.release(), data, size)};
-}
-
-} // namespace circleinspect
diff --git a/compiler/circle-inspect/src/Model.h b/compiler/circle-inspect/src/Model.h
deleted file mode 100644
index 8206ed364..000000000
--- a/compiler/circle-inspect/src/Model.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <mio/circle/schema_generated.h>
-
-#include <memory>
-
-namespace circleinspect
-{
-
-struct Model
-{
- virtual ~Model() = default;
-
- virtual const ::circle::Model *model(void) const = 0;
-};
-
-/**
- * @brief Load Circle model (as a raw Model) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<Model> load_circle(const std::string &path);
-
-} // namespace circleinspect
-
-#endif // __MODEL_H__
diff --git a/compiler/circle-inspect/src/Reader.cpp b/compiler/circle-inspect/src/Reader.cpp
index dbbc7c75e..7807db38a 100644
--- a/compiler/circle-inspect/src/Reader.cpp
+++ b/compiler/circle-inspect/src/Reader.cpp
@@ -50,7 +50,10 @@ std::string opcode_name(const circle::OperatorCode *opcode)
if (!opcode->custom_code())
return "(invalid custom)";
- return opcode->custom_code()->c_str();
+ std::string custom_op = "CUSTOM(";
+ custom_op += opcode->custom_code()->c_str();
+ custom_op += ")";
+ return custom_op;
}
circle::BuiltinOperator code = opcode->builtin_code();
diff --git a/compiler/circle-quantizer/CMakeLists.txt b/compiler/circle-quantizer/CMakeLists.txt
new file mode 100644
index 000000000..1335057eb
--- /dev/null
+++ b/compiler/circle-quantizer/CMakeLists.txt
@@ -0,0 +1,17 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_executable(circle-quantizer "${SOURCES}")
+target_include_directories(circle-quantizer PRIVATE include)
+target_include_directories(circle-quantizer PRIVATE src)
+target_link_libraries(circle-quantizer foder)
+target_link_libraries(circle-quantizer safemain)
+target_link_libraries(circle-quantizer oops)
+target_link_libraries(circle-quantizer loco)
+target_link_libraries(circle-quantizer mio_circle)
+target_link_libraries(circle-quantizer luci_import)
+target_link_libraries(circle-quantizer luci_service)
+target_link_libraries(circle-quantizer luci_pass)
+target_link_libraries(circle-quantizer luci_export)
+target_link_libraries(circle-quantizer arser)
+
+install(TARGETS circle-quantizer DESTINATION bin)
diff --git a/compiler/circle-quantizer/README.md b/compiler/circle-quantizer/README.md
new file mode 100644
index 000000000..2666c8412
--- /dev/null
+++ b/compiler/circle-quantizer/README.md
@@ -0,0 +1,3 @@
+# circle-quantizer
+
+_circle-quantizer_ provides post-training quantization functionalities for Circle models
diff --git a/compiler/circle-quantizer/include/CircleExpContract.h b/compiler/circle-quantizer/include/CircleExpContract.h
new file mode 100644
index 000000000..e888e4a12
--- /dev/null
+++ b/compiler/circle-quantizer/include/CircleExpContract.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLEQUANTIZER_CIRCLEXPCONTRACT_H__
+#define __CIRCLEQUANTIZER_CIRCLEXPCONTRACT_H__
+
+#include <loco.h>
+#include <luci/CircleExporter.h>
+#include <luci/IR/Module.h>
+
+#include <memory>
+#include <string>
+
+struct CircleExpContract : public luci::CircleExporter::Contract
+{
+public:
+ CircleExpContract(luci::Module *module, const std::string &filename)
+ : _module(module), _filepath(filename)
+ {
+ // NOTHING TO DO
+ }
+ virtual ~CircleExpContract() = default;
+
+public:
+ loco::Graph *graph(void) const final { return nullptr; }
+ luci::Module *module(void) const final { return _module; };
+
+public:
+ bool store(const char *ptr, const size_t size) const final;
+
+private:
+ luci::Module *_module;
+ const std::string _filepath;
+};
+
+#endif // __CIRCLEQUANTIZER_CIRCLEXPCONTRACT_H__
diff --git a/compiler/circle-quantizer/requires.cmake b/compiler/circle-quantizer/requires.cmake
new file mode 100644
index 000000000..2293e53f8
--- /dev/null
+++ b/compiler/circle-quantizer/requires.cmake
@@ -0,0 +1,7 @@
+require("foder")
+require("loco")
+require("locop")
+require("safemain")
+require("luci")
+require("oops")
+require("arser")
diff --git a/compiler/circle-quantizer/src/CircleExpContract.cpp b/compiler/circle-quantizer/src/CircleExpContract.cpp
new file mode 100644
index 000000000..b56b7eedc
--- /dev/null
+++ b/compiler/circle-quantizer/src/CircleExpContract.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleExpContract.h"
+
+#include <oops/InternalExn.h>
+
+#include <fstream>
+#include <iostream>
+
+bool CircleExpContract::store(const char *ptr, const size_t size) const
+{
+ if (!ptr)
+ INTERNAL_EXN("Graph was not serialized by FlatBuffer for some reason");
+
+ std::ofstream fs(_filepath.c_str(), std::ofstream::binary);
+ fs.write(ptr, size);
+
+ return fs.good();
+}
diff --git a/compiler/circle-quantizer/src/CircleQuantizer.cpp b/compiler/circle-quantizer/src/CircleQuantizer.cpp
new file mode 100644
index 000000000..b56b547a9
--- /dev/null
+++ b/compiler/circle-quantizer/src/CircleQuantizer.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleExpContract.h"
+
+#include <foder/FileLoader.h>
+
+#include <luci/Importer.h>
+#include <luci/CircleOptimizer.h>
+#include <luci/Service/Validate.h>
+#include <luci/CircleExporter.h>
+
+#include <oops/InternalExn.h>
+#include <arser/arser.h>
+
+#include <functional>
+#include <iostream>
+#include <map>
+#include <string>
+
+using OptionHook = std::function<int(const char **)>;
+
+using Algorithms = luci::CircleOptimizer::Options::Algorithm;
+using AlgorithmParameters = luci::CircleOptimizer::Options::AlgorithmParameters;
+
+int entry(int argc, char **argv)
+{
+ // Simple argument parser (based on map)
+ std::map<std::string, OptionHook> argparse;
+ luci::CircleOptimizer optimizer;
+
+ auto options = optimizer.options();
+
+ const std::string qdqw = "--quantize_dequantize_weights";
+ const std::string qwmm = "--quantize_with_minmax";
+
+ arser::Arser arser("circle-quantizer provides circle model quantization");
+
+ arser.add_argument(qdqw)
+ .nargs(3)
+ .type(arser::DataType::STR_VEC)
+ .required(false)
+ .help("Quantize-dequantize weight values required action before quantization. "
+ "Three arguments required: input_dtype(float32) "
+ "output_dtype(uint8) granularity(layer)");
+
+ arser.add_argument(qwmm)
+ .nargs(3)
+ .type(arser::DataType::STR_VEC)
+ .required(false)
+ .help("Quantize with min/max values. "
+ "Three arguments required: input_dtype(float32) "
+ "output_dtype(uint8) granularity(layer)");
+
+ arser.add_argument("input").nargs(1).type(arser::DataType::STR).help("Input circle model");
+ arser.add_argument("output").nargs(1).type(arser::DataType::STR).help("Output circle model");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 255;
+ }
+
+ if (arser[qdqw])
+ {
+ auto values = arser.get<std::vector<std::string>>(qdqw);
+ if (values.size() != 3)
+ {
+ std::cerr << arser;
+ return 255;
+ }
+ options->enable(Algorithms::QuantizeDequantizeWeights);
+
+ options->param(AlgorithmParameters::Quantize_input_dtype, values.at(0));
+ options->param(AlgorithmParameters::Quantize_output_dtype, values.at(1));
+ options->param(AlgorithmParameters::Quantize_granularity, values.at(2));
+ }
+
+ if (arser[qwmm])
+ {
+ auto values = arser.get<std::vector<std::string>>(qwmm);
+ if (values.size() != 3)
+ {
+ std::cerr << arser;
+ return 255;
+ }
+ options->enable(Algorithms::QuantizeWithMinMax);
+
+ options->param(AlgorithmParameters::Quantize_input_dtype, values.at(0));
+ options->param(AlgorithmParameters::Quantize_output_dtype, values.at(1));
+ options->param(AlgorithmParameters::Quantize_granularity, values.at(2));
+ }
+
+ std::string input_path = arser.get<std::string>("input");
+ std::string output_path = arser.get<std::string>("output");
+
+ // Load model from the file
+ foder::FileLoader file_loader{input_path};
+ std::vector<char> model_data = file_loader.load();
+ const circle::Model *circle_model = circle::GetModel(model_data.data());
+ if (circle_model == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load circle '" << input_path << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Import from input Circle file
+ luci::Importer importer;
+ auto module = importer.importModule(circle_model);
+
+ for (size_t idx = 0; idx < module->size(); ++idx)
+ {
+ auto graph = module->graph(idx);
+
+ // quantize the graph
+ optimizer.quantize(graph);
+
+ if (!luci::validate(graph))
+ {
+ std::cerr << "ERROR: Quantized graph is invalid" << std::endl;
+ return 255;
+ }
+ }
+
+ // Export to output Circle file
+ luci::CircleExporter exporter;
+
+ CircleExpContract contract(module.get(), output_path);
+
+ if (!exporter.invoke(&contract))
+ {
+ std::cerr << "ERROR: Failed to export '" << output_path << "'" << std::endl;
+ return 255;
+ }
+
+ return 0;
+}
diff --git a/compiler/circle-tensordump/CMakeLists.txt b/compiler/circle-tensordump/CMakeLists.txt
new file mode 100644
index 000000000..e55901fe2
--- /dev/null
+++ b/compiler/circle-tensordump/CMakeLists.txt
@@ -0,0 +1,23 @@
+if(NOT TARGET mio_circle)
+ return()
+endif(NOT TARGET mio_circle)
+
+nnas_find_package(HDF5 QUIET)
+
+if(NOT HDF5_FOUND)
+ message(STATUS "Build circle-tensordump: FAILED (missing HDF5)")
+ return()
+endif(NOT HDF5_FOUND)
+
+set(DRIVER "driver/Driver.cpp")
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_executable(circle-tensordump ${DRIVER} ${SOURCES})
+target_include_directories(circle-tensordump PRIVATE src)
+target_include_directories(circle-tensordump PRIVATE ${HDF5_INCLUDE_DIRS})
+target_link_libraries(circle-tensordump PRIVATE ${HDF5_CXX_LIBRARIES})
+target_link_libraries(circle-tensordump PRIVATE arser)
+target_link_libraries(circle-tensordump PRIVATE foder)
+target_link_libraries(circle-tensordump PRIVATE mio_circle)
+target_link_libraries(circle-tensordump PRIVATE safemain)
diff --git a/compiler/circle-tensordump/README.md b/compiler/circle-tensordump/README.md
new file mode 100644
index 000000000..dcb05d57a
--- /dev/null
+++ b/compiler/circle-tensordump/README.md
@@ -0,0 +1,73 @@
+# circle-tensordump
+
+_circle-tensordump_ allows users to retrieve tensor information from a Circle model file
+
+## options
+
+**--tensors**
+
+dump tensors in circle file
+
+```
+$ ./circle-tensordump --tensors ../luci/tests/Conv2D_000.circle
+
+----------------------------------------------------------------------
+[ifm]
+ └── shape : (1, 3, 3, 2)
+
+----------------------------------------------------------------------
+[ker]
+ ├── shape : (1, 1, 1, 2)
+ └── buffer
+    ├── index : 3
+    ├── size : 8
+    └── data : 0.727939, 0.320132,
+
+----------------------------------------------------------------------
+[bias]
+ ├── shape : (1)
+ └── buffer
+    ├── index : 4
+    ├── size : 4
+    └── data : -0.794465,
+
+----------------------------------------------------------------------
+[ofm]
+ └── shape : (1, 3, 3, 1)
+```
+
+**--tensors_to_hdf5**
+
+dump tensors in circle file to hdf5 file
+
+```
+$ ./circle-tensordump --tensors_to_hdf5 ../luci/tests/Conv2D_000.circle output_path.h5
+$ h5dump output_path.h5
+
+HDF5 "output_path.h5" {
+GROUP "/" {
+ GROUP "bias" {
+ DATASET "weights" {
+ DATATYPE H5T_IEEE_F32LE
+ DATASPACE SIMPLE { ( 1 ) / ( 1 ) }
+ DATA {
+ (0): -0.794465
+ }
+ }
+ }
+ GROUP "ifm" {
+ }
+ GROUP "ker" {
+ DATASET "weights" {
+ DATATYPE H5T_IEEE_F32LE
+ DATASPACE SIMPLE { ( 1, 1, 1, 2 ) / ( 1, 1, 1, 2 ) }
+ DATA {
+ (0,0,0,0): 0.727939, 0.320132
+ }
+ }
+ }
+ GROUP "ofm" {
+ }
+}
+}
+```
diff --git a/compiler/circle-tensordump/driver/Driver.cpp b/compiler/circle-tensordump/driver/Driver.cpp
new file mode 100644
index 000000000..a55cd4574
--- /dev/null
+++ b/compiler/circle-tensordump/driver/Driver.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dump.h"
+
+#include <arser/arser.h>
+#include <foder/FileLoader.h>
+
+#include <functional>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+int entry(int argc, char **argv)
+{
+ arser::Arser arser{
+ "circle-tensordump allows users to retrieve tensor information from a Circle model file"};
+
+ arser.add_argument("circle").nargs(1).type(arser::DataType::STR).help("Circle file path to dump");
+ arser.add_argument("--tensors").nargs(0).help("Dump to console");
+ arser.add_argument("--tensors_to_hdf5")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .help("Dump to hdf5 file. Specify hdf5 file path to be dumped");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
+ }
+
+ std::unique_ptr<circletensordump::DumpInterface> dump;
+
+ std::string model_file = arser.get<std::string>("circle");
+ std::string output_path;
+ if (arser["--tensors_to_hdf5"])
+ {
+ dump = std::move(std::make_unique<circletensordump::DumpTensorsToHdf5>());
+ output_path = arser.get<std::string>("--tensors_to_hdf5");
+ }
+ if (arser["--tensors"])
+ {
+ dump = std::move(std::make_unique<circletensordump::DumpTensors>());
+ }
+
+ // Load Circle model from a circle file
+ foder::FileLoader fileLoader{model_file};
+ std::vector<char> modelData = fileLoader.load();
+ const circle::Model *circleModel = circle::GetModel(modelData.data());
+ if (circleModel == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load circle '" << model_file << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ dump->run(std::cout, circleModel, output_path);
+
+ return EXIT_SUCCESS;
+}
diff --git a/compiler/circle-tensordump/requires.cmake b/compiler/circle-tensordump/requires.cmake
new file mode 100644
index 000000000..1c754f518
--- /dev/null
+++ b/compiler/circle-tensordump/requires.cmake
@@ -0,0 +1,4 @@
+require("arser")
+require("foder")
+require("mio-circle")
+require("safemain")
diff --git a/compiler/circle-tensordump/src/Dump.cpp b/compiler/circle-tensordump/src/Dump.cpp
new file mode 100644
index 000000000..dfa78f031
--- /dev/null
+++ b/compiler/circle-tensordump/src/Dump.cpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dump.h"
+#include "Reader.h"
+
+#include <H5Cpp.h>
+
+#include <memory>
+#include <ostream>
+#include <string>
+#include <vector>
+
+namespace
+{
+
+template <typename T>
+void print_comma_sepearted(std::ostream &os, const flatbuffers::Vector<T> *vec)
+{
+ if (vec == nullptr)
+ return;
+ for (auto iter = vec->begin(); iter != vec->end(); iter++)
+ {
+ if (iter != vec->begin())
+ os << ", ";
+ os << *iter;
+ }
+}
+
+void print_buffer(std::ostream &os, uint32_t buff_idx, const flatbuffers::Vector<uint8_t> *data_ptr,
+ const circle::TensorType &type)
+{
+ if (data_ptr == nullptr)
+ return;
+
+ os << " └── buffer" << std::endl;
+ os << "    ├── index : " << buff_idx << std::endl;
+ size_t buff_size = data_ptr->size();
+ os << "    ├── size : " << buff_size << std::endl;
+ os << "    └── data : ";
+ switch (type)
+ {
+ case circle::TensorType_UINT8:
+ {
+ const uint8_t *buff_data_ui8 = reinterpret_cast<const uint8_t *>(data_ptr->data());
+ for (uint32_t idx = 0; idx < buff_size / sizeof(uint8_t); idx++)
+ {
+ os << static_cast<const uint32_t>(buff_data_ui8[idx]) << ", ";
+ }
+ break;
+ }
+ case circle::TensorType_INT32:
+ {
+ const int32_t *buff_data_i32 = reinterpret_cast<const int32_t *>(data_ptr->data());
+ for (uint32_t idx = 0; idx < buff_size / sizeof(int32_t); idx++)
+ {
+ os << buff_data_i32[idx] << ", ";
+ }
+ break;
+ }
+ case circle::TensorType_INT64:
+ {
+ const int64_t *buff_data_i64 = reinterpret_cast<const int64_t *>(data_ptr->data());
+ for (uint32_t idx = 0; idx < buff_size / sizeof(int64_t); idx++)
+ {
+ os << buff_data_i64[idx] << ", ";
+ }
+ break;
+ }
+ case circle::TensorType_FLOAT32:
+ {
+ const float *buff_data_f32 = reinterpret_cast<const float *>(data_ptr->data());
+ for (uint32_t idx = 0; idx < buff_size / sizeof(float); idx++)
+ {
+ os << buff_data_f32[idx] << ", ";
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI tensor type : " + std::to_string(type));
+ }
+ os << std::endl;
+}
+
+} // namespace
+
+namespace circletensordump
+{
+
+void DumpTensors::run(std::ostream &os, const circle::Model *model, const std::string &)
+{
+ circletensordump::Reader reader(model);
+ uint32_t num_subgraph = reader.num_subgraph();
+ auto buffers = reader.buffers();
+
+ for (uint32_t subgraph_idx = 0; subgraph_idx < num_subgraph; subgraph_idx++)
+ {
+ reader.select_subgraph(subgraph_idx);
+
+ auto tensors = reader.tensors();
+ for (const auto &tensor : *tensors)
+ {
+ os << std::string(70, '-') << std::endl;
+ os << "[" << tensor->name()->str() << "]" << std::endl;
+ auto buff_idx = tensor->buffer();
+ auto buff_data_ptr = reader.buffers()->Get(buff_idx)->data();
+ auto quant_param = tensor->quantization();
+ std::string print_format = (!buff_data_ptr && !quant_param) ? "└──" : "├──";
+
+ // shape
+ auto shape = tensor->shape();
+ os << " " + print_format + " shape : (";
+ ::print_comma_sepearted(os, shape);
+ os << ")" << std::endl;
+
+ // quantization paramters
+ if (quant_param)
+ {
+ std::string print_format1 = buff_data_ptr ? "├──" : "└──";
+ std::string print_format2 = buff_data_ptr ? "│" : " ";
+ os << " " + print_format1 + " quantization" << std::endl;
+ auto min = quant_param->min();
+ auto max = quant_param->max();
+ auto scale = quant_param->scale();
+ auto zero_point = quant_param->zero_point();
+
+ os << " " + print_format2 + "   ├── min : ";
+ ::print_comma_sepearted(os, min);
+ os << std::endl;
+ os << " " + print_format2 + "   ├── max : ";
+ ::print_comma_sepearted(os, max);
+ os << std::endl;
+ os << " " + print_format2 + "   ├── scale : ";
+ ::print_comma_sepearted(os, scale);
+ os << std::endl;
+ os << " " + print_format2 + "   └── zero_point : ";
+ ::print_comma_sepearted(os, zero_point);
+ os << std::endl;
+ }
+
+ // buffer
+ print_buffer(os, buff_idx, buff_data_ptr, tensor->type());
+ os << std::endl;
+ }
+ }
+}
+
+} // namespace circletensordump
+
+namespace
+{
+
+// HDF5 forbids the inclusion of '/' in the name.
+std::string mangle(const std::string &name)
+{
+ std::string ret{name};
+ std::replace(ret.begin(), ret.end(), '/', '_');
+ return ret;
+}
+
+H5::PredType hdf5_dtype_cast(const circle::TensorType &circle_type)
+{
+ switch (circle_type)
+ {
+ case circle::TensorType_UINT8:
+ {
+ return H5::PredType::NATIVE_UINT8;
+ }
+ case circle::TensorType_INT32:
+ {
+ return H5::PredType::NATIVE_INT32;
+ }
+ case circle::TensorType_INT64:
+ {
+ return H5::PredType::NATIVE_INT64;
+ }
+ case circle::TensorType_FLOAT32:
+ {
+ return H5::PredType::NATIVE_FLOAT;
+ }
+ default:
+ throw std::runtime_error("NYI tensor type : " + std::to_string(circle_type));
+ }
+}
+
+/**
+ * In order to create a dataspace, its rank and dimensions are required as hsize_t type.
+ * This function converts flatbuffers::Vector<T> to std::vector<hsize_t>.
+ *
+ * If "dims" parameter is passed, the parameter will be converted. However, if
+ * not passed(nullptr), data is considered as a rank 1 vector.
+ */
+template <typename T>
+std::vector<hsize_t> hdf5_dims_cast(const flatbuffers::Vector<T> *data,
+ const flatbuffers::Vector<int32_t> *dims = nullptr)
+{
+ std::vector<hsize_t> ret;
+ if (data != nullptr)
+ {
+ if (dims == nullptr)
+ {
+ ret.resize(1);
+ ret.at(0) = data->size();
+ }
+ else
+ {
+ const uint32_t rank = dims->size();
+ ret.resize(rank);
+ for (uint32_t d = 0; d < rank; d++)
+ {
+ ret.at(d) = dims->Get(d);
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * This function writes data to given hdf5 file like below.
+ *
+ * GROUP "group_name"
+ * ã„´DATATYPE "type"
+ * ã„´DATASET "dataset_name"
+ * ã„´DATASPACE "dims"
+ * ã„´DATA "data"
+ */
+template <typename T>
+void write_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name,
+ const H5::PredType &type, const flatbuffers::Vector<T> *data,
+ std::vector<hsize_t> dims)
+{
+ if (data == nullptr)
+ return;
+ auto dataspace = std::make_unique<H5::DataSpace>(dims.size(), dims.data());
+ auto dataset = std::make_unique<H5::DataSet>(
+ file.createDataSet(group_name + "/" + dataset_name, type, *dataspace));
+ dataset->write(data->data(), type);
+}
+
+} // namespace
+
+namespace circletensordump
+{
+
+/**
+ * HDF5 layout is like below
+ *
+ * GROUP "/"
+ * ã„´GROUP "tensor name"
+ * ã„´DATASET "weights" : Shape (x, y, ...), type(uint8, int16)
+ * ã„´DATASET "min" : Shape (n)
+ * ã„´DATASET "max" : Shape (n)
+ * ã„´DATASET "scale" : Shape (m)
+ * ã„´DATASET "zero_point" : Shape (m)
+ *
+ * NOTE All Dataset is optional. It means that if tensor doesn't have the data, it won't be created
+ * as a Dataset
+ *
+ */
+void DumpTensorsToHdf5::run(std::ostream &os, const circle::Model *model,
+ const std::string &output_path)
+{
+ // loads a circle model
+ circletensordump::Reader reader(model);
+ uint32_t num_subgraph = reader.num_subgraph();
+
+ // create a hdf5 file
+ H5::H5File file{output_path, H5F_ACC_TRUNC};
+
+ for (uint32_t subgraph_idx = 0; subgraph_idx < num_subgraph; subgraph_idx++)
+ {
+ reader.select_subgraph(subgraph_idx);
+
+ auto tensors = reader.tensors();
+ for (const auto &tensor : *tensors)
+ {
+ // create a group for each tensor whose name is its tensor name
+ std::string group_name = ::mangle(tensor->name()->c_str());
+ std::unique_ptr<H5::Group> tensor_group =
+ std::make_unique<H5::Group>(file.createGroup(group_name));
+
+ // write a buffer data
+ uint32_t buff_idx = tensor->buffer();
+ auto buff_data_ptr = reader.buffers()->Get(buff_idx)->data();
+ if (buff_data_ptr)
+ {
+ ::write_data_to_hdf5(file, group_name, "weights", ::hdf5_dtype_cast(tensor->type()),
+ buff_data_ptr, ::hdf5_dims_cast(buff_data_ptr, tensor->shape()));
+ }
+
+ // write quantization parameters
+ auto quant_param = tensor->quantization();
+ if (quant_param)
+ {
+ auto min = quant_param->min();
+ ::write_data_to_hdf5(file, group_name, "min", H5::PredType::NATIVE_FLOAT, min,
+ ::hdf5_dims_cast(min));
+ auto max = quant_param->max();
+ ::write_data_to_hdf5(file, group_name, "max", H5::PredType::NATIVE_FLOAT, max,
+ ::hdf5_dims_cast(max));
+ auto scale = quant_param->scale();
+ ::write_data_to_hdf5(file, group_name, "scale", H5::PredType::NATIVE_FLOAT, scale,
+ ::hdf5_dims_cast(scale));
+ auto zero_point = quant_param->zero_point();
+ ::write_data_to_hdf5(file, group_name, "zero_point", H5::PredType::NATIVE_INT64, zero_point,
+ ::hdf5_dims_cast(zero_point));
+ }
+ }
+ }
+}
+
+} // namespace circletensordump
diff --git a/compiler/circle-tensordump/src/Dump.h b/compiler/circle-tensordump/src/Dump.h
new file mode 100644
index 000000000..5dfa59d44
--- /dev/null
+++ b/compiler/circle-tensordump/src/Dump.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_TENSORDUMP_DUMP_H__
+#define __CIRCLE_TENSORDUMP_DUMP_H__
+
+#include <mio/circle/schema_generated.h>
+
+#include <ostream>
+
+namespace circletensordump
+{
+
+class DumpInterface
+{
+public:
+ virtual ~DumpInterface() = default;
+
+public:
+ virtual void run(std::ostream &os, const circle::Model *model,
+ const std::string &output_path = {}) = 0;
+};
+
+class DumpTensors final : public DumpInterface
+{
+public:
+ DumpTensors() = default;
+
+public:
+ void run(std::ostream &os, const circle::Model *model, const std::string &) override;
+};
+
+class DumpTensorsToHdf5 final : public DumpInterface
+{
+public:
+ DumpTensorsToHdf5() = default;
+
+public:
+ void run(std::ostream &os, const circle::Model *model, const std::string &output_path) override;
+};
+
+} // namespace circletensordump
+
+#endif // __CIRCLE_TENSORDUMP_DUMP_H__
diff --git a/compiler/circle-tensordump/src/Reader.cpp b/compiler/circle-tensordump/src/Reader.cpp
new file mode 100644
index 000000000..429736bfe
--- /dev/null
+++ b/compiler/circle-tensordump/src/Reader.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reader.h"
+
+#include <sstream>
+#include <string>
+
+namespace circletensordump
+{
+
+bool is_valid(const circle::OperatorCode *opcode)
+{
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return (circle::BuiltinOperator_MIN <= code && code <= circle::BuiltinOperator_MAX);
+}
+
+bool is_custom(const circle::OperatorCode *opcode)
+{
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return (code == circle::BuiltinOperator_CUSTOM);
+}
+
+std::string opcode_name(const circle::OperatorCode *opcode)
+{
+ assert(opcode);
+
+ if (!is_valid(opcode))
+ {
+ std::ostringstream oss;
+ oss << "(invalid)";
+ return oss.str();
+ }
+
+ if (is_custom(opcode))
+ {
+ if (!opcode->custom_code())
+ return "(invalid custom)";
+
+ std::string custom_op = "CUSTOM(";
+ custom_op += opcode->custom_code()->c_str();
+ custom_op += ")";
+ return custom_op;
+ }
+
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return circle::EnumNameBuiltinOperator(code);
+}
+
+const char *tensor_type(const circle::Tensor *tensor)
+{
+ return circle::EnumNameTensorType(tensor->type());
+}
+
+const char *tensor_name(const circle::Tensor *tensor)
+{
+ static const char *kEmptyTensorName = "(noname)";
+
+ auto name = tensor->name();
+ if (name)
+ return name->c_str();
+
+ return kEmptyTensorName;
+}
+
+Reader::Reader(const circle::Model *model)
+{
+ _subgraphs = model->subgraphs();
+ _buffers = model->buffers();
+
+ auto opcodes = model->operator_codes();
+ for (const ::circle::OperatorCode *opcode : *opcodes)
+ {
+ _op_codes.push_back(opcode);
+ }
+}
+
+size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data)
+{
+ if (buff_data != nullptr)
+ {
+ *buff_data = nullptr;
+ }
+
+ if (buf_idx == 0)
+ return 0;
+
+ if (auto *buffer = (*_buffers)[buf_idx])
+ {
+ if (auto *array = buffer->data())
+ {
+ if (size_t size = array->size())
+ {
+ if (buff_data != nullptr)
+ {
+ *buff_data = reinterpret_cast<const uint8_t *>(array->data());
+ }
+ return size;
+ }
+ }
+ }
+
+ return 0;
+}
+
+circle::BuiltinOperator Reader::builtin_code(const circle::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _op_codes.size());
+ const circle::OperatorCode *opcode = _op_codes.at(index);
+
+ return opcode->builtin_code();
+}
+
+std::string Reader::opcode_name(const circle::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _op_codes.size());
+ const circle::OperatorCode *opcode = _op_codes.at(index);
+
+ if (!is_valid(opcode))
+ {
+ std::ostringstream oss;
+ oss << "(invalid: " << index << ")";
+ return oss.str();
+ }
+
+ return circletensordump::opcode_name(opcode);
+}
+
+bool Reader::select_subgraph(uint32_t sgindex)
+{
+ _tensors = nullptr;
+ _operators = nullptr;
+
+ _inputs.clear();
+ _outputs.clear();
+
+ if (_subgraphs->Length() <= sgindex)
+ {
+ assert(false);
+ return false;
+ }
+
+ const circle::SubGraph *subgraph = (*_subgraphs)[sgindex];
+
+ _tensors = subgraph->tensors();
+ _operators = subgraph->operators();
+
+ _inputs = as_index_vector(subgraph->inputs());
+ _outputs = as_index_vector(subgraph->outputs());
+
+ return true;
+}
+
+} // namespace circletensordump
diff --git a/compiler/circle-tensordump/src/Reader.h b/compiler/circle-tensordump/src/Reader.h
new file mode 100644
index 000000000..bbb039552
--- /dev/null
+++ b/compiler/circle-tensordump/src/Reader.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_TENSORDUMP_READER_H__
+#define __CIRCLE_TENSORDUMP_READER_H__
+
+#include <mio/circle/schema_generated.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+namespace circletensordump
+{
+
+template <typename T> std::vector<T> as_index_vector(const flatbuffers::Vector<T> *flat_array)
+{
+ std::vector<T> ret(flat_array->Length());
+ for (uint32_t i = 0; i < flat_array->Length(); i++)
+ {
+ ret[i] = flat_array->Get(i);
+ }
+ return ret;
+}
+
+bool is_valid(const circle::OperatorCode *opcode);
+bool is_custom(const circle::OperatorCode *opcode);
+std::string opcode_name(const circle::OperatorCode *opcode);
+const char *tensor_type(const circle::Tensor *tensor);
+const char *tensor_name(const circle::Tensor *tensor);
+
+/**
+ * @brief Loads Circle file and provides helpers to access attributes
+ */
+class Reader
+{
+private:
+ using CircleSubGraphs_t = flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>>;
+ using CircleBuffers_t = flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>>;
+ using CircleTensors_t = flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>>;
+ using CircleOperators_t = flatbuffers::Vector<flatbuffers::Offset<circle::Operator>>;
+
+public:
+ Reader(const circle::Model *model);
+
+ Reader() = delete;
+
+public:
+ const std::vector<const circle::OperatorCode *> &opcodes() { return _op_codes; }
+ const CircleBuffers_t *buffers() { return _buffers; }
+ const CircleTensors_t *tensors() { return _tensors; }
+ const CircleOperators_t *operators() { return _operators; }
+ const std::vector<int32_t> &inputs() const { return _inputs; }
+ const std::vector<int32_t> &outputs() const { return _outputs; }
+
+ uint32_t num_subgraph() const { return _subgraphs->Length(); }
+
+ size_t buffer_info(uint32_t buf_idx, const uint8_t **buff_data);
+ circle::BuiltinOperator builtin_code(const circle::Operator *op) const;
+ std::string opcode_name(const circle::Operator *op) const;
+
+public:
+ bool select_subgraph(uint32_t subgraph);
+
+private:
+ const CircleSubGraphs_t *_subgraphs{nullptr};
+ const CircleBuffers_t *_buffers{nullptr};
+ const CircleTensors_t *_tensors{nullptr};
+ const CircleOperators_t *_operators{nullptr};
+
+ std::vector<const circle::OperatorCode *> _op_codes;
+ std::vector<int32_t> _inputs;
+ std::vector<int32_t> _outputs;
+};
+
+} // namespace circletensordump
+
+#endif // __CIRCLE_TENSORDUMP_READER_H__
diff --git a/compiler/circle-verify/CMakeLists.txt b/compiler/circle-verify/CMakeLists.txt
index 2e19951e1..f22174865 100644
--- a/compiler/circle-verify/CMakeLists.txt
+++ b/compiler/circle-verify/CMakeLists.txt
@@ -6,7 +6,8 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(circle-verify ${SOURCES})
target_include_directories(circle-verify PRIVATE src)
+target_link_libraries(circle-verify arser)
target_link_libraries(circle-verify mio_circle)
target_link_libraries(circle-verify safemain)
target_link_libraries(circle-verify cwrap)
-target_link_libraries(circle-verify stdex)
+target_link_libraries(circle-verify foder)
diff --git a/compiler/circle-verify/requires.cmake b/compiler/circle-verify/requires.cmake
index 2509b6931..e1b7fb212 100644
--- a/compiler/circle-verify/requires.cmake
+++ b/compiler/circle-verify/requires.cmake
@@ -1,4 +1,5 @@
+require("arser")
require("mio-circle")
require("safemain")
require("cwrap")
-require("stdex")
+require("foder")
diff --git a/compiler/circle-verify/src/Driver.cpp b/compiler/circle-verify/src/Driver.cpp
index ad13e504f..1af31d986 100644
--- a/compiler/circle-verify/src/Driver.cpp
+++ b/compiler/circle-verify/src/Driver.cpp
@@ -16,23 +16,31 @@
#include "VerifyFlatBuffers.h"
-#include <stdex/Memory.h>
+#include <arser/arser.h>
#include <iostream>
+#include <memory>
#include <string>
int entry(int argc, char **argv)
{
- if (argc != 2)
+ arser::Arser arser;
+ arser.add_argument("circle").type(arser::DataType::STR).help("Circle file path to verify");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [circle]" << std::endl;
- return 255;
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
}
- auto verifier = stdex::make_unique<VerifyFlatbuffers>();
- std::string model_file = argv[argc - 1];
+ auto verifier = std::make_unique<VerifyFlatbuffers>();
+
+ std::string model_file = arser.get<std::string>("circle");
std::cout << "[ RUN ] Check " << model_file << std::endl;
diff --git a/compiler/circle-verify/src/Model.cpp b/compiler/circle-verify/src/Model.cpp
deleted file mode 100644
index efac1210d..000000000
--- a/compiler/circle-verify/src/Model.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Model.h"
-
-#include <cwrap/Fildes.h>
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-namespace
-{
-
-class MemoryMappedModel final : public ModelData
-{
-public:
- /**
- * @require fd and data SHOULD be valid
- */
- explicit MemoryMappedModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- ~MemoryMappedModel()
- {
- munmap(_data, _size);
- close(_fd);
- }
-
-public:
- MemoryMappedModel(const MemoryMappedModel &) = delete;
- MemoryMappedModel(MemoryMappedModel &&) = delete;
-
-public:
- const void *data(void) const override { return _data; };
- const size_t size(void) const override { return _size; };
-
-private:
- int _fd = -1;
- void *_data = nullptr;
- size_t _size = 0;
-};
-
-} // namespace
-
-std::unique_ptr<ModelData> load_modeldata(const std::string &path)
-{
- cwrap::Fildes fd(open(path.c_str(), O_RDONLY));
-
- if (fd.get() == -1)
- {
- // Return nullptr on open failure
- return nullptr;
- }
-
- struct stat st;
- if (fstat(fd.get(), &st) == -1)
- {
- // Return nullptr on fstat failure
- return nullptr;
- }
-
- auto size = st.st_size;
- auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd.get(), 0);
-
- if (data == MAP_FAILED)
- {
- // Return nullptr on mmap failure
- return nullptr;
- }
-
- return std::unique_ptr<ModelData>{new MemoryMappedModel(fd.release(), data, size)};
-}
diff --git a/compiler/circle-verify/src/Model.h b/compiler/circle-verify/src/Model.h
deleted file mode 100644
index e1bd83971..000000000
--- a/compiler/circle-verify/src/Model.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <memory>
-#include <string>
-
-struct ModelData
-{
- virtual ~ModelData() = default;
-
- virtual const void *data(void) const = 0;
- virtual const size_t size(void) const = 0;
-};
-
-/**
- * @brief Load Circle model (as a raw data) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<ModelData> load_modeldata(const std::string &path);
-
-#endif // __MODEL_H__
diff --git a/compiler/circle-verify/src/VerifyFlatBuffers.cpp b/compiler/circle-verify/src/VerifyFlatBuffers.cpp
index 36b16685f..e8557d2ef 100644
--- a/compiler/circle-verify/src/VerifyFlatBuffers.cpp
+++ b/compiler/circle-verify/src/VerifyFlatBuffers.cpp
@@ -16,16 +16,16 @@
#include "VerifyFlatBuffers.h"
-#include "Model.h"
-
+#include <foder/FileLoader.h>
#include <mio/circle/schema_generated.h>
int VerifyFlatbuffers::run(const std::string &model_file)
{
- auto modeldata = load_modeldata(model_file);
+ foder::FileLoader fileLoader{model_file};
+ std::vector<char> modeldata = fileLoader.load();
- const uint8_t *data = reinterpret_cast<const uint8_t *>(modeldata->data());
- flatbuffers::Verifier verifier{data, modeldata->size()};
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(modeldata.data());
+ flatbuffers::Verifier verifier{data, modeldata.size()};
if (!circle::VerifyModelBuffer(verifier))
{
diff --git a/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt b/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt
new file mode 100644
index 000000000..6663cb938
--- /dev/null
+++ b/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt
@@ -0,0 +1,185 @@
+nnas_include(TargetRequire)
+
+unset(REQUIRED_TARGETS)
+list(APPEND REQUIRED_TARGETS circlechef)
+list(APPEND REQUIRED_TARGETS circle-inspect)
+list(APPEND REQUIRED_TARGETS circle-verify)
+list(APPEND REQUIRED_TARGETS circle2circle)
+list(APPEND REQUIRED_TARGETS dredd_rule_lib)
+list(APPEND REQUIRED_TARGETS tflchef)
+list(APPEND REQUIRED_TARGETS tflite2circle)
+TargetRequire_Return(${REQUIRED_TARGETS})
+
+nncc_find_resource(TensorFlowLiteRecipes)
+nncc_find_resource(CircleRecipes)
+
+set(TFLITE_RECIPE_REPO "${TensorFlowLiteRecipes_DIR}")
+set(CIRCLE_RECIPE_REPO "${CircleRecipes_DIR}")
+unset(RECIPE_REPO)
+
+set(TEST_RECIPE_FILENAME "test.recipe")
+set(TEST_RULE_FILENAME "test.rule")
+
+unset(TEST_DEPS)
+unset(TEST_NAMES)
+
+set(options "")
+set(oneValueArgs "")
+set(multiValueArgs PASS)
+
+macro(Add RECIPE)
+ if(NOT EXISTS "${TFLITE_RECIPE_REPO}/${RECIPE}/test.recipe")
+ if(NOT EXISTS "${CIRCLE_RECIPE_REPO}/${RECIPE}/test.recipe")
+ message(FATAL_ERROR "Missing recipe of '${RECIPE}' test")
+ else()
+ set(RECIPE_REPO ${CIRCLE_RECIPE_REPO})
+ endif()
+ else()
+ set(RECIPE_REPO ${TFLITE_RECIPE_REPO})
+ endif()
+
+ if(NOT EXISTS "${RECIPE_REPO}/${RECIPE}/test.rule")
+ message(FATAL_ERROR "Missing rule of '${RECIPE}' test")
+ endif()
+
+ cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+ unset(OPT_OPTIONS)
+ foreach(src ${ARG_PASS})
+ # option = "--${src}"
+ list(APPEND OPT_OPTIONS "--${src}")
+ endforeach(src ${ARG_PASS})
+
+ set(RECIPE_FILE "${RECIPE}.recipe")
+ set(RECIPE_SOURCE_PATH "${RECIPE_REPO}/${RECIPE}/${TEST_RECIPE_FILENAME}")
+ set(RECIPE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}")
+
+ set(RULE_FILE "${RECIPE}.rule")
+ set(RULE_SOURCE_PATH "${RECIPE_REPO}/${RECIPE}/${TEST_RULE_FILENAME}")
+ set(RULE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RULE_FILE}")
+
+ set(TFLITE_FILE "${RECIPE}.tflite")
+ set(TFLITE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${TFLITE_FILE}")
+
+ set(CIRCLE_FILE "${RECIPE}.circle")
+ set(CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${CIRCLE_FILE}")
+
+ set(OPT_CIRCLE_FILE "${RECIPE}.opt.circle")
+ set(OPT_CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${OPT_CIRCLE_FILE}")
+
+ # Copy .recipe
+ add_custom_command(OUTPUT ${RECIPE_BINARY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${RECIPE_SOURCE_PATH}" "${RECIPE_BINARY_PATH}"
+ DEPENDS ${RECIPE_SOURCE_PATH}
+ COMMENT "Generate ${RECIPE_FILE}"
+ )
+
+ # Copy .rule
+ add_custom_command(OUTPUT ${RULE_BINARY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${RULE_SOURCE_PATH}" "${RULE_BINARY_PATH}"
+ DEPENDS ${RULE_SOURCE_PATH}
+ COMMENT "Generate ${RULE_FILE}"
+ )
+
+ if(${RECIPE_REPO} STREQUAL ${TFLITE_RECIPE_REPO})
+ # Generate .tflite
+ add_custom_command(OUTPUT ${TFLITE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:tflchef-file> ${RECIPE_BINARY_PATH} ${TFLITE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:tflchef-file> ${RECIPE_BINARY_PATH}
+ COMMENT "Generate ${TFLITE_FILE}"
+ )
+
+ # Generate .circle
+ add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:tflite2circle> ${TFLITE_OUTPUT_PATH} ${CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:tflite2circle> ${TFLITE_OUTPUT_PATH}
+ COMMENT "Generate ${CIRCLE_FILE}"
+ )
+
+ list(APPEND TEST_DEPS ${TFLITE_OUTPUT_PATH})
+ else()
+ # Generate .circle
+ add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:circlechef-file> ${RECIPE_BINARY_PATH} ${CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:circlechef-file> ${RECIPE_BINARY_PATH}
+ COMMENT "Generate ${CIRCLE_FILE}"
+ )
+ endif()
+
+ # Generate optimized .circle
+ add_custom_command(OUTPUT ${OPT_CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:circle2circle> ${OPT_OPTIONS} ${CIRCLE_OUTPUT_PATH} ${OPT_CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:circle2circle> ${CIRCLE_OUTPUT_PATH}
+ COMMENT "Generate ${OPT_CIRCLE_FILE}"
+ )
+
+ list(APPEND TEST_DEPS ${RECIPE_BINARY_PATH} ${RULE_BINARY_PATH}
+ ${CIRCLE_OUTPUT_PATH} ${OPT_CIRCLE_OUTPUT_PATH})
+ list(APPEND TEST_NAMES ${RECIPE})
+endmacro(Add)
+
+# Read "test.lst"
+include("test.lst")
+
+##
+## Copy testall
+##
+set(TEST_RUNNER "${CMAKE_CURRENT_BINARY_DIR}/testall.sh")
+set(TEST_RUNNER_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/testall.sh")
+
+add_custom_command(
+ OUTPUT ${TEST_RUNNER}
+ COMMAND ${CMAKE_COMMAND} -E copy "${TEST_RUNNER_SOURCE}" "${TEST_RUNNER}"
+ DEPENDS ${TEST_RUNNER_SOURCE}
+ COMMENT "Generate test runner"
+)
+
+list(APPEND TEST_DEPS "${TEST_RUNNER}")
+
+###
+### Generate test.config
+###
+set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
+
+add_custom_command(
+ OUTPUT ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'CIRCLE_INSPECT_PATH=\"$<TARGET_FILE:circle-inspect>\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'CIRCLE_VERIFY_PATH=\"$<TARGET_FILE:circle-verify>\"' >> ${TEST_CONFIG}
+ DEPENDS
+ circle-inspect
+ circle-verify
+ COMMENT "Generate test configuration"
+)
+
+list(APPEND TEST_DEPS "${TEST_CONFIG}")
+
+#
+# copy rule-lib.sh (a library of shell script functions)
+#
+
+# getting path for rule-lib.sh in dredd-rule-lib
+get_target_property(DREDD_RULE_LIB_DIR dredd_rule_lib BINARY_DIR)
+
+set(RULE_LIB_SOURCE_PATH "${DREDD_RULE_LIB_DIR}/rule-lib.sh")
+set(RULE_LIB_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/rule-lib.sh")
+
+add_custom_command(
+ OUTPUT ${RULE_LIB_BINARY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${RULE_LIB_SOURCE_PATH}" "${RULE_LIB_BINARY_PATH}"
+ DEPENDS ${RULE_LIB_SOURCE_PATH}
+ COMMENT "Generate rule lib"
+)
+
+list(APPEND TEST_DEPS "${RULE_LIB_BINARY_PATH}")
+
+# Generate dependencies
+add_custom_target(circle2circle_dredd_recipe_test ALL DEPENDS ${TEST_DEPS})
+
+# Run tests
+add_test(
+ NAME circle2circle_dredd_recipe_test
+ COMMAND "${TEST_RUNNER}"
+ "${TEST_CONFIG}"
+ "${CMAKE_CURRENT_BINARY_DIR}"
+ ${TEST_NAMES}
+)
diff --git a/compiler/circle2circle-dredd-recipe-test/README.md b/compiler/circle2circle-dredd-recipe-test/README.md
new file mode 100644
index 000000000..85140a8d1
--- /dev/null
+++ b/compiler/circle2circle-dredd-recipe-test/README.md
@@ -0,0 +1,21 @@
+# circle2circle-dredd-recipe-test
+
+It tests the non-functional conditions of the optimized circle binary resulting from circle2circle.
+
+This test basically refers to the _TensorFlowLiteRecipes_ resource. So you should add what you want to test to both of the resource and `test.lst`.
+
+## Example
+
+```
+# TensorFlowLiteRecipes
+res/TensorFlowLiteRecipes/BatchMatMulV2_000
+├── test.recipe # What you want to test
+└── test.rule # Non-functional conditions to be satisfied
+
+# test.lst
+...
+Add(BatchMatMulV2_000 PASS resolve_customop_batchmatmul)
+...
+```
+
+For more information on the rules, see _dredd-rule-lib_ module.
diff --git a/compiler/circle2circle-dredd-recipe-test/requires.cmake b/compiler/circle2circle-dredd-recipe-test/requires.cmake
new file mode 100644
index 000000000..e4a5b71a7
--- /dev/null
+++ b/compiler/circle2circle-dredd-recipe-test/requires.cmake
@@ -0,0 +1,7 @@
+require("circlechef")
+require("circle2circle")
+require("circle-inspect")
+require("circle-verify")
+require("dredd-rule-lib")
+require("tflchef")
+require("tflite2circle")
diff --git a/compiler/circle2circle-dredd-recipe-test/test.lst b/compiler/circle2circle-dredd-recipe-test/test.lst
new file mode 100644
index 000000000..202f66938
--- /dev/null
+++ b/compiler/circle2circle-dredd-recipe-test/test.lst
@@ -0,0 +1,20 @@
+## EXAMPLE
+#
+# Add(RECIPE_REPO PASS pass1 pass2 ...)
+#
+## SUPPORTED PASS
+#
+# fuse_instnorm
+# resolve_customop_batchmatmul
+# resolve_customop_matmul
+
+## TFLITE RECIPE
+
+Add(Net_InstanceNorm_001 PASS fuse_instnorm)
+# Add(Net_InstanceNorm_002 PASS fuse_instnorm)
+Add(BatchMatMulV2_000 PASS resolve_customop_batchmatmul)
+Add(MatMul_000 PASS resolve_customop_matmul)
+
+## CIRCLE RECIPE
+
+Add(CircleBatchMatMul_000)
diff --git a/compiler/circle2circle-dredd-recipe-test/testall.sh b/compiler/circle2circle-dredd-recipe-test/testall.sh
new file mode 100755
index 000000000..33a2036bb
--- /dev/null
+++ b/compiler/circle2circle-dredd-recipe-test/testall.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Need at least 2 arguments
+if [[ $# -lt 2 ]]; then
+ echo "USAGE: $0 ..."
+ echo
+ echo "ARGUMENTS:"
+ echo " [test.config path]"
+ echo " [WORKDIR]"
+ echo " [Prefix1]"
+ echo " [Prefix2]"
+ echo " ..."
+ exit 255
+fi
+
+CONFIG_PATH="$1"; shift
+WORKDIR="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found circle-inspect: ${CIRCLE_INSPECT_PATH}"
+echo "-- Found circle-verify: ${CIRCLE_VERIFY_PATH}"
+echo "-- Found circle2circle: ${CIRCLE2CIRCLE_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${PASSED_TAG}"
+
+ cat > "${PREFIX}.log" <(
+ exec 2>&1
+
+ echo "-- Found tflite: ${PREFIX}.tflite"
+
+ # Exit immediately if any command fails
+ set -e
+ # Show commands
+ set -x
+
+ #
+ # Check if rule is satisfied
+ #
+
+ # Note: turn off 'command printing'. Otherwise printing will be so messy
+ set +x
+
+ # (COMPILED_FILE, INSPECT_PROG_PATH, VERIFY_PROG_PATH, ERROR_LOG) must be set for rule-lib.sh
+ COMPILED_FILE="${WORKDIR}/${PREFIX}.opt.circle"
+ INSPECT_PROG_PATH=${CIRCLE_INSPECT_PATH}
+ VERIFY_PROG_PATH=${CIRCLE_VERIFY_PATH}
+ ERROR_LOG="${PREFIX}.error"
+
+ rm -f "${ERROR_LOG}"
+
+ # in case error while running rule-lib.sh, prints error msg
+ trap 'echo "** ERROR **" ; cat "${ERROR_LOG}"' ERR
+
+ source rule-lib.sh
+ source "${PREFIX}.rule"
+
+ # unset
+ trap - ERR
+ set -x
+
+ # At this point, the exit code of all commands is 0
+ # If not 0, execution of this script ends because of "set -e"
+ touch "${PASSED_TAG}"
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ else
+ FAILED+=("$PREFIX")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/circle2circle/CMakeLists.txt b/compiler/circle2circle/CMakeLists.txt
index 644179941..7b2bf9b02 100644
--- a/compiler/circle2circle/CMakeLists.txt
+++ b/compiler/circle2circle/CMakeLists.txt
@@ -5,18 +5,20 @@ list(REMOVE_ITEM SOURCES ${TESTS})
add_executable(circle2circle "${SOURCES}")
target_include_directories(circle2circle PRIVATE include)
target_include_directories(circle2circle PRIVATE src)
+target_link_libraries(circle2circle foder)
target_link_libraries(circle2circle nncc_common)
target_link_libraries(circle2circle safemain)
-target_link_libraries(circle2circle stdex)
target_link_libraries(circle2circle oops)
target_link_libraries(circle2circle hermes)
target_link_libraries(circle2circle hermes_std)
target_link_libraries(circle2circle loco)
target_link_libraries(circle2circle mio_circle)
+target_link_libraries(circle2circle luci_env)
target_link_libraries(circle2circle luci_import)
target_link_libraries(circle2circle luci_service)
target_link_libraries(circle2circle luci_pass)
target_link_libraries(circle2circle luci_export)
+target_link_libraries(circle2circle arser)
install(TARGETS circle2circle DESTINATION bin)
@@ -29,14 +31,16 @@ nnas_find_package(GTest REQUIRED)
GTest_AddTest(circle2circle_test ${TESTS} ${SOURCES})
target_include_directories(circle2circle_test PRIVATE include)
target_include_directories(circle2circle_test PRIVATE src)
+target_link_libraries(circle2circle_test foder)
target_link_libraries(circle2circle_test nncc_common)
-target_link_libraries(circle2circle_test stdex)
target_link_libraries(circle2circle_test oops)
target_link_libraries(circle2circle_test hermes)
target_link_libraries(circle2circle_test hermes_std)
target_link_libraries(circle2circle_test loco)
target_link_libraries(circle2circle_test mio_circle)
+target_link_libraries(circle2circle_test luci_env)
target_link_libraries(circle2circle_test luci_import)
target_link_libraries(circle2circle_test luci_service)
target_link_libraries(circle2circle_test luci_pass)
target_link_libraries(circle2circle_test luci_export)
+target_link_libraries(circle2circle_test arser)
diff --git a/compiler/circle2circle/README.md b/compiler/circle2circle/README.md
index 7bc1b7f59..3e94d2540 100644
--- a/compiler/circle2circle/README.md
+++ b/compiler/circle2circle/README.md
@@ -1,3 +1,3 @@
# circle2circle
-_circle2circle_ provides Circle optimizations and quantizations as executable tool
+_circle2circle_ provides Circle optimizations as executable tool
diff --git a/compiler/circle2circle/requires.cmake b/compiler/circle2circle/requires.cmake
index 5b1e657ca..8cbb90dbf 100644
--- a/compiler/circle2circle/requires.cmake
+++ b/compiler/circle2circle/requires.cmake
@@ -1,10 +1,11 @@
+require("foder")
require("loco")
require("locop")
require("logo-core")
-require("stdex")
require("safemain")
require("mio-circle")
require("oops")
require("hermes")
require("hermes-std")
require("luci")
+require("arser")
diff --git a/compiler/circle2circle/src/Circle2Circle.cpp b/compiler/circle2circle/src/Circle2Circle.cpp
index 781825fdd..6888d26e3 100644
--- a/compiler/circle2circle/src/Circle2Circle.cpp
+++ b/compiler/circle2circle/src/Circle2Circle.cpp
@@ -14,91 +14,145 @@
* limitations under the License.
*/
-#include "Model.h"
#include "CircleExpContract.h"
+#include <foder/FileLoader.h>
+
#include <luci/Importer.h>
#include <luci/CircleOptimizer.h>
#include <luci/Service/Validate.h>
#include <luci/CircleExporter.h>
+#include <luci/UserSettings.h>
-#include <stdex/Memory.h>
#include <oops/InternalExn.h>
+#include <arser/arser.h>
#include <functional>
#include <iostream>
-#include <map>
#include <string>
-using OptionHook = std::function<int(const char **)>;
-
using Algorithms = luci::CircleOptimizer::Options::Algorithm;
-
-void print_help(const char *progname)
-{
- std::cerr << "USAGE: " << progname << " [options] input output" << std::endl;
- std::cerr << " --fuse_instnorm : Enable FuseInstanceNormalization Pass" << std::endl;
- std::cerr << std::endl;
-}
+using AlgorithmParameters = luci::CircleOptimizer::Options::AlgorithmParameters;
int entry(int argc, char **argv)
{
- if (argc < 3)
- {
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- print_help(argv[0]);
- return 255;
- }
-
// Simple argument parser (based on map)
- std::map<std::string, OptionHook> argparse;
luci::CircleOptimizer optimizer;
auto options = optimizer.options();
-
- // TODO merge this with help message
- argparse["--fuse_instnorm"] = [&options](const char **) {
- options->enable(Algorithms::FuseInstanceNorm);
- return 0;
- };
-
- for (int n = 1; n < argc - 2; ++n)
+ auto settings = luci::UserSettings::settings();
+
+ arser::Arser arser("circle2circle provides circle model optimization and transformations");
+
+ arser.add_argument("--all").nargs(0).required(false).default_value(false).help(
+ "Enable all optimize options");
+
+ arser.add_argument("--fuse_bcq")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will fuse operators and apply Binary Coded Quantization");
+
+ arser.add_argument("--fuse_instnorm")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will fuse operators to InstanceNorm operator");
+
+ arser.add_argument("--resolve_customop_add")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will convert Custom(Add) to Add operator");
+
+ arser.add_argument("--resolve_customop_batchmatmul")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will convert Custom(BatchMatmul) to BatchMatmul operator");
+
+ arser.add_argument("--resolve_customop_matmul")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will convert Custom(Matmul) to Matmul operator");
+
+ arser.add_argument("--mute_warnings")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will turn off warning messages");
+
+ arser.add_argument("--disable_validation")
+ .nargs(0)
+ .required(false)
+ .default_value(false)
+ .help("This will turn off operator vaidations. May help input model investigation.");
+
+ arser.add_argument("input").nargs(1).type(arser::DataType::STR).help("Input circle model");
+ arser.add_argument("output").nargs(1).type(arser::DataType::STR).help("Output circle model");
+
+ try
{
- const std::string tag{argv[n]};
- auto it = argparse.find(tag);
- if (it == argparse.end())
- {
- std::cerr << "Option '" << tag << "' is not supported" << std::endl;
- std::cerr << std::endl;
- print_help(argv[0]);
- return 255;
- }
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 255;
+ }
- n += it->second((const char **)&argv[n + 1]);
+ if (arser.get<bool>("--all"))
+ {
+ options->enable(Algorithms::FuseBCQ);
+ options->enable(Algorithms::FuseInstanceNorm);
+ options->enable(Algorithms::ResolveCustomOpAdd);
+ options->enable(Algorithms::ResolveCustomOpBatchMatMul);
+ options->enable(Algorithms::ResolveCustomOpMatMul);
}
+ if (arser.get<bool>("--fuse_bcq"))
+ options->enable(Algorithms::FuseBCQ);
+ if (arser.get<bool>("--fuse_instnorm"))
+ options->enable(Algorithms::FuseInstanceNorm);
+ if (arser.get<bool>("--resolve_customop_add"))
+ options->enable(Algorithms::ResolveCustomOpAdd);
+ if (arser.get<bool>("--resolve_customop_batchmatmul"))
+ options->enable(Algorithms::ResolveCustomOpBatchMatMul);
+ if (arser.get<bool>("--resolve_customop_matmul"))
+ options->enable(Algorithms::ResolveCustomOpMatMul);
+
+ if (arser.get<bool>("--mute_warnings"))
+ settings->set(luci::UserSettings::Key::MuteWarnings, true);
+ if (arser.get<bool>("--disable_validation"))
+ settings->set(luci::UserSettings::Key::DisableValidation, true);
- std::string input_path = argv[argc - 2];
- std::string output_path = argv[argc - 1];
+ std::string input_path = arser.get<std::string>("input");
+ std::string output_path = arser.get<std::string>("output");
// Load model from the file
- std::unique_ptr<luci::Model> model = luci::load_model(input_path);
- if (model == nullptr)
+ foder::FileLoader file_loader{input_path};
+ std::vector<char> model_data;
+
+ try
{
- std::cerr << "ERROR: Failed to load '" << input_path << "'" << std::endl;
- return 255;
+ model_data = file_loader.load();
}
-
- const circle::Model *input_model = model->model();
- if (input_model == nullptr)
+ catch (const std::runtime_error &err)
{
- std::cerr << "ERROR: Failed to read '" << input_path << "'" << std::endl;
- return 255;
+ std::cerr << err.what() << std::endl;
+ return EXIT_FAILURE;
+ }
+ const circle::Model *circle_model = circle::GetModel(model_data.data());
+ if (circle_model == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load circle '" << input_path << "'" << std::endl;
+ return EXIT_FAILURE;
}
// Import from input Circle file
luci::Importer importer;
- auto module = importer.importModule(input_model);
+ auto module = importer.importModule(circle_model);
for (size_t idx = 0; idx < module->size(); ++idx)
{
@@ -108,7 +162,10 @@ int entry(int argc, char **argv)
optimizer.optimize(graph);
if (!luci::validate(graph))
+ {
+ std::cerr << "ERROR: Optimized graph is invalid" << std::endl;
return 255;
+ }
}
// Export to output Circle file
@@ -116,5 +173,11 @@ int entry(int argc, char **argv)
CircleExpContract contract(module.get(), output_path);
- return exporter.invoke(&contract) ? 0 : 255;
+ if (!exporter.invoke(&contract))
+ {
+ std::cerr << "ERROR: Failed to export '" << output_path << "'" << std::endl;
+ return 255;
+ }
+
+ return 0;
}
diff --git a/compiler/circle2circle/src/Circle2Circle.test.cpp b/compiler/circle2circle/src/Circle2Circle.test.cpp
index 015358ae7..d8a3ed030 100644
--- a/compiler/circle2circle/src/Circle2Circle.test.cpp
+++ b/compiler/circle2circle/src/Circle2Circle.test.cpp
@@ -25,5 +25,5 @@ TEST(Circle2CircleTest, NoArg_NEG)
::testing::internal::CaptureStdout();
int result = entry(1, argv.argv());
- ASSERT_EQ(result, 255);
+ ASSERT_EQ(255, result);
}
diff --git a/compiler/circle2circle/src/Model.cpp b/compiler/circle2circle/src/Model.cpp
deleted file mode 100644
index 20d55a131..000000000
--- a/compiler/circle2circle/src/Model.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Model.h"
-
-#include <fstream>
-#include <vector>
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-namespace
-{
-
-class FileModel final : public luci::Model
-{
-public:
- explicit FileModel(const std::string &filename) : _filename(filename) {}
-
-public:
- FileModel(const FileModel &) = delete;
- FileModel(FileModel &&) = delete;
-
-public:
- const ::circle::Model *model(void) override
- {
- std::ifstream file(_filename, std::ios::binary | std::ios::in);
- if (!file.good())
- return nullptr;
-
- file.unsetf(std::ios::skipws);
-
- std::streampos fileSize;
- file.seekg(0, std::ios::end);
- fileSize = file.tellg();
- file.seekg(0, std::ios::beg);
-
- // reserve capacity
- _data.reserve(fileSize);
-
- // read the data
- file.read(_data.data(), fileSize);
- if (file.fail())
- return nullptr;
-
- return ::circle::GetModel(_data.data());
- }
-
-private:
- const std::string _filename;
- std::vector<char> _data;
-};
-
-} // namespace
-
-namespace luci
-{
-
-std::unique_ptr<Model> load_model(const std::string &path)
-{
- return std::unique_ptr<Model>{new FileModel(path)};
-}
-
-} // namespace luci
diff --git a/compiler/circlechef/CMakeLists.txt b/compiler/circlechef/CMakeLists.txt
new file mode 100644
index 000000000..cba7d0a4e
--- /dev/null
+++ b/compiler/circlechef/CMakeLists.txt
@@ -0,0 +1,21 @@
+nnas_find_package(Protobuf QUIET)
+
+if(NOT Protobuf_FOUND)
+ return()
+endif(NOT Protobuf_FOUND)
+
+if(NOT TARGET mio_circle)
+ return()
+endif(NOT TARGET mio_circle)
+
+# Recipe Parser
+add_subdirectory(proto)
+# Log
+add_subdirectory(log)
+# Core Library
+add_subdirectory(core)
+# Circle Library
+add_subdirectory(circle)
+# Tools
+add_subdirectory(tools)
+add_subdirectory(tests)
diff --git a/compiler/circlechef/README.md b/compiler/circlechef/README.md
new file mode 100644
index 000000000..1871a0660
--- /dev/null
+++ b/compiler/circlechef/README.md
@@ -0,0 +1,8 @@
+# circlechef
+
+## What is circlechef?
+
+Do you need a circle model for testing? Ask it to _circlechef_.
+Given a recipe, _circlechef_ will cook a circle model for you.
+
+**NOTE** _circlechef_ covers only what _tflchef_ does not cover. This is to support ops that exist only in circle shema, and other things can be made using _tflchef_ and _tflite2circle_.
diff --git a/compiler/circlechef/circle/CMakeLists.txt b/compiler/circlechef/circle/CMakeLists.txt
new file mode 100644
index 000000000..75165ada3
--- /dev/null
+++ b/compiler/circlechef/circle/CMakeLists.txt
@@ -0,0 +1,9 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(circlechef_circle STATIC ${SOURCES})
+target_include_directories(circlechef_circle PUBLIC include)
+target_include_directories(circlechef_circle PRIVATE src)
+target_link_libraries(circlechef_circle circlechef_proto)
+target_link_libraries(circlechef_circle mio_circle)
+target_link_libraries(circlechef_circle stdex)
+target_link_libraries(circlechef_circle cwrap)
diff --git a/compiler/circlechef/circle/include/circlechef/RecipeChef.h b/compiler/circlechef/circle/include/circlechef/RecipeChef.h
new file mode 100644
index 000000000..d3cafa282
--- /dev/null
+++ b/compiler/circlechef/circle/include/circlechef/RecipeChef.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RECIPE_CHEF_H__
+#define __RECIPE_CHEF_H__
+
+#include <mio/circle/schema_generated.h>
+#include <circlechef.pb.h>
+
+#include <memory>
+#include <string>
+
+namespace circlechef
+{
+
+/**
+ * @brief Create ModelRecipe from circle::Model
+ */
+std::unique_ptr<ModelRecipe> generate_recipe(const circle::Model *model);
+
+/**
+ * @brief Write ModelRecipe to file with given name
+ */
+bool write_recipe(const std::string &filename, std::unique_ptr<ModelRecipe> &recipe);
+
+} // namespace circlechef
+
+#endif // __RECIPE_CHEF_H__
diff --git a/compiler/circlechef/circle/src/CircleImport.cpp b/compiler/circlechef/circle/src/CircleImport.cpp
new file mode 100644
index 000000000..e970fbce3
--- /dev/null
+++ b/compiler/circlechef/circle/src/CircleImport.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleImport.h"
+
+#include "Convert.h"
+
+#include <sstream>
+
+namespace circlechef
+{
+
+const char *kEmptyTensorName = "(noname)";
+
+const char *tensor_type(const circle::Tensor *tensor)
+{
+ return circle::EnumNameTensorType(tensor->type());
+}
+
+const char *tensor_name(const circle::Tensor *tensor)
+{
+ auto name = tensor->name();
+ if (name)
+ return name->c_str();
+ return kEmptyTensorName;
+}
+
+bool is_valid(const circle::OperatorCode *opcode)
+{
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return (circle::BuiltinOperator_MIN <= code && code <= circle::BuiltinOperator_MAX);
+}
+
+bool is_custom(const circle::OperatorCode *opcode)
+{
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return (code == circle::BuiltinOperator_CUSTOM);
+}
+
+CircleImport::CircleImport(const circle::Model *model)
+{
+ _subgraphs = model->subgraphs();
+ _buffers = model->buffers();
+
+ auto opcodes = model->operator_codes();
+ for (const ::circle::OperatorCode *opcode : *opcodes)
+ {
+ _op_codes.push_back(opcode);
+ }
+}
+
+bool CircleImport::select_sub_graph(uint32_t sgindex)
+{
+ _tensors = nullptr;
+ _operators = nullptr;
+ _inputs.clear();
+ _outputs.clear();
+
+ if (_subgraphs->Length() <= sgindex)
+ {
+ assert(false);
+ return false;
+ }
+
+ const circle::SubGraph *subgraph = (*_subgraphs)[sgindex];
+
+ _tensors = subgraph->tensors();
+ _operators = subgraph->operators();
+
+ _inputs = as_index_vector(subgraph->inputs());
+ _outputs = as_index_vector(subgraph->outputs());
+
+ return true;
+}
+
+circle::BuiltinOperator CircleImport::builtin_code(const circle::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _op_codes.size());
+ const circle::OperatorCode *opcode = _op_codes.at(index);
+
+ return opcode->builtin_code();
+}
+
+std::string CircleImport::opcode_name(const circle::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _op_codes.size());
+ const circle::OperatorCode *opcode = _op_codes.at(index);
+
+ if (!is_valid(opcode))
+ {
+ std::ostringstream oss;
+ oss << "(invalid: " << index << ")";
+ return oss.str();
+ }
+
+ if (is_custom(opcode))
+ {
+ if (!opcode->custom_code())
+ return "(invalid custom)";
+
+ return opcode->custom_code()->c_str();
+ }
+
+ circle::BuiltinOperator code = opcode->builtin_code();
+ return EnumNameBuiltinOperator(code);
+}
+
+size_t CircleImport::buffer_info(const circle::Tensor *tensor, const uint8_t **buff_data)
+{
+ *buff_data = nullptr;
+
+ if (tensor->buffer() == 0)
+ return 0;
+
+ if (auto *buffer = (*_buffers)[tensor->buffer()])
+ {
+ if (auto *array = buffer->data())
+ {
+ if (size_t size = array->size())
+ {
+ *buff_data = reinterpret_cast<const uint8_t *>(array->data());
+ return size;
+ }
+ }
+ }
+
+ return 0;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/CircleImport.h b/compiler/circlechef/circle/src/CircleImport.h
new file mode 100644
index 000000000..a8ef3ee44
--- /dev/null
+++ b/compiler/circlechef/circle/src/CircleImport.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_IMPORT_H__
+#define __CIRCLE_IMPORT_H__
+
+#include <mio/circle/schema_generated.h>
+
+#include <circlechef.pb.h>
+
+#include <map>
+#include <vector>
+
+namespace circlechef
+{
+
+using CircleSubGraphs_t = flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>>;
+using CircleTensors_t = flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>>;
+using CircleBuffers_t = flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>>;
+using CircleOperators_t = flatbuffers::Vector<flatbuffers::Offset<circle::Operator>>;
+
+const char *tensor_type(const circle::Tensor *tensor);
+const char *tensor_name(const circle::Tensor *tensor);
+bool is_valid(const circle::OperatorCode *opcode);
+bool is_custom(const circle::OperatorCode *opcode);
+
+/**
+ * @brief Loads TF lite file and provides helpers to access attributes
+ */
+class CircleImport
+{
+public:
+ CircleImport(const circle::Model *model);
+
+ CircleImport() = delete;
+
+public:
+ bool select_sub_graph(uint32_t subgraph);
+
+public:
+ const CircleBuffers_t *buffers() { return _buffers; }
+ const CircleTensors_t *tensors() { return _tensors; }
+ const CircleOperators_t *operators() { return _operators; }
+ const std::vector<int32_t> &inputs() const { return _inputs; }
+ const std::vector<int32_t> &outputs() const { return _outputs; }
+
+ uint32_t num_subgraph() const { return _subgraphs->Length(); }
+
+ circle::BuiltinOperator builtin_code(const circle::Operator *op) const;
+ std::string opcode_name(const circle::Operator *op) const;
+ size_t buffer_info(const circle::Tensor *tensor, const uint8_t **buff_data);
+
+ /**
+ * @brief This will record the tensor by index, if it needs filler option,
+ * such as kernel, bias.
+ */
+ void set_tensor_filler(uint32_t tensor_index) { _tensor_filler[tensor_index] = true; }
+
+ /**
+ * @brief This will store int32 filler values such as reshape information for the tensor
+ */
+ void set_tensor_filler(uint32_t tensor_index, std::vector<int32_t> &expvalues)
+ {
+ _tensor_filler_vint32[tensor_index] = expvalues;
+ }
+
+ void set_tensor_filler(uint32_t tensor_index, std::vector<float> &expvalues)
+ {
+ _tensor_filler_vfloat[tensor_index] = expvalues;
+ }
+
+ /**
+ * @brief This will return true if the tensor by index, needs a filler option.
+ */
+ bool get_tensor_filler(uint32_t tensor_index)
+ {
+ auto it = _tensor_filler.find(tensor_index);
+ if (it != _tensor_filler.end())
+ {
+ return it->second;
+ }
+ return false;
+ }
+
+ /**
+ * @brief This will return true if the tensor by index, needs a int array filler option.
+ */
+ bool get_tensor_filler(uint32_t tensor_index, std::vector<int32_t> &expvalues)
+ {
+ auto it = _tensor_filler_vint32.find(tensor_index);
+ if (it != _tensor_filler_vint32.end())
+ {
+ expvalues = it->second;
+ return true;
+ }
+ return false;
+ }
+
+ bool get_tensor_filler(uint32_t tensor_index, std::vector<float> &expvalues)
+ {
+ auto it = _tensor_filler_vfloat.find(tensor_index);
+ if (it != _tensor_filler_vfloat.end())
+ {
+ expvalues = it->second;
+ return true;
+ }
+ return false;
+ }
+
+private:
+ const CircleSubGraphs_t *_subgraphs{nullptr};
+ const CircleBuffers_t *_buffers{nullptr};
+ const CircleTensors_t *_tensors{nullptr};
+ const CircleOperators_t *_operators{nullptr};
+
+ std::vector<const circle::OperatorCode *> _op_codes{};
+ std::vector<int32_t> _inputs{};
+ std::vector<int32_t> _outputs{};
+
+ std::map<uint32_t, bool> _tensor_filler{};
+ std::map<uint32_t, std::vector<int32_t>> _tensor_filler_vint32{};
+ std::map<uint32_t, std::vector<float>> _tensor_filler_vfloat{};
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_IMPORT_H__
diff --git a/compiler/circlechef/circle/src/CircleOpChef.h b/compiler/circlechef/circle/src/CircleOpChef.h
new file mode 100644
index 000000000..a3bcd97d4
--- /dev/null
+++ b/compiler/circlechef/circle/src/CircleOpChef.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_CHEF_H__
+#define __CIRCLE_OP_CHEF_H__
+
+#include <mio/circle/schema_generated.h>
+
+#include <circlechef.pb.h>
+
+#include "CircleImport.h"
+
+namespace circlechef
+{
+
+/**
+ * @brief Interface for each operators to build circlechef
+ */
+class CircleOpChef
+{
+public:
+ virtual void filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const = 0;
+ virtual ::circlechef::Operation *build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const = 0;
+ virtual ~CircleOpChef() {}
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_CHEF_H__
diff --git a/compiler/circlechef/circle/src/CircleOpChefs.h b/compiler/circlechef/circle/src/CircleOpChefs.h
new file mode 100644
index 000000000..6a0ce5dc3
--- /dev/null
+++ b/compiler/circlechef/circle/src/CircleOpChefs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_CHEFS_H__
+#define __CIRCLE_OP_CHEFS_H__
+
+// In alphabet order
+#include "Op/BatchMatMul.h"
+#include "Op/BCQFullyConnected.h"
+#include "Op/BCQGather.h"
+#include "Op/InstanceNorm.h"
+
+#endif // __CIRCLE_OP_CHEFS_H__
diff --git a/compiler/circlechef/circle/src/CircleOpRegistry.h b/compiler/circlechef/circle/src/CircleOpRegistry.h
new file mode 100644
index 000000000..2bf1e19ed
--- /dev/null
+++ b/compiler/circlechef/circle/src/CircleOpRegistry.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_REGISTRY_H__
+#define __CIRCLE_OP_REGISTRY_H__
+
+#include "CircleOpChef.h"
+#include "CircleOpChefs.h"
+
+#include <memory>
+
+namespace circlechef
+{
+
+/**
+ * @brief circlechef operator registry
+ */
+class CircleOpRegistry
+{
+public:
+ /**
+ * @brief Returns registered CircleOpChef pointer for BuiltinOperator or
+ * nullptr if not registered
+ */
+ const CircleOpChef *lookup(circle::BuiltinOperator op) const
+ {
+ if (_circleop_map.find(op) == _circleop_map.end())
+ return nullptr;
+
+ return _circleop_map.at(op).get();
+ }
+
+ static CircleOpRegistry &get()
+ {
+ static CircleOpRegistry me;
+ return me;
+ }
+
+private:
+ CircleOpRegistry()
+ {
+#define REG_TFL_OP(OPCODE, CLASS) \
+ _circleop_map[circle::BuiltinOperator_##OPCODE] = std::make_unique<CLASS>()
+
+ REG_TFL_OP(BATCH_MATMUL, CircleOpBatchMatMul);
+ REG_TFL_OP(BCQ_FULLY_CONNECTED, CircleOpBCQFullyConnected);
+ REG_TFL_OP(BCQ_GATHER, CircleOpBCQGather);
+ REG_TFL_OP(INSTANCE_NORM, CircleOpInstanceNorm);
+#undef REG_TFL_OP
+ }
+
+private:
+ std::map<circle::BuiltinOperator, std::unique_ptr<CircleOpChef>> _circleop_map;
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_REGISTRY_H__
diff --git a/compiler/circlechef/circle/src/Convert.cpp b/compiler/circlechef/circle/src/Convert.cpp
new file mode 100644
index 000000000..77614d9b5
--- /dev/null
+++ b/compiler/circlechef/circle/src/Convert.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+namespace circlechef
+{
+
+circlechef::TensorType as_circlechef_type(const circle::TensorType type)
+{
+ switch (type)
+ {
+ case circle::TensorType_FLOAT32:
+ return circlechef::FLOAT32;
+ case circle::TensorType_INT32:
+ return circlechef::INT32;
+ case circle::TensorType_INT64:
+ return circlechef::INT64;
+ case circle::TensorType_UINT8:
+ return circlechef::UINT8;
+ case circle::TensorType_BOOL:
+ return circlechef::BOOL;
+ // TODO handle other types
+ // TensorType_FLOAT16
+ // TensorType_STRING
+ // TensorType_INT16
+ // TensorType_COMPLEX64
+ default:
+ throw std::runtime_error{"unsupported tensor type"};
+ }
+}
+
+circlechef::Activation as_circlechef_activation(const circle::ActivationFunctionType type)
+{
+ switch (type)
+ {
+ case circle::ActivationFunctionType_NONE:
+ return circlechef::NONE;
+ case circle::ActivationFunctionType_RELU:
+ return circlechef::RELU;
+ case circle::ActivationFunctionType_RELU6:
+ return circlechef::RELU6;
+ // TODO handle other types
+ // ActivationFunctionType_RELU_N1_TO_1
+ // ActivationFunctionType_TANH
+ // ActivationFunctionType_SIGN_BIT
+ default:
+ throw std::runtime_error{"unsupported activation type"};
+ }
+}
+
+circlechef::Padding as_circlechef_padding(const circle::Padding padding)
+{
+ switch (padding)
+ {
+ case circle::Padding_SAME:
+ return circlechef::SAME;
+ case circle::Padding_VALID:
+ return circlechef::VALID;
+ default:
+ throw std::runtime_error{"unsupported padding"};
+ }
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/Convert.h b/compiler/circlechef/circle/src/Convert.h
new file mode 100644
index 000000000..7842c4b01
--- /dev/null
+++ b/compiler/circlechef/circle/src/Convert.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVERT_H__
+#define __CONVERT_H__
+
+#include <mio/circle/schema_generated.h>
+
+#include <circlechef.pb.h>
+
+namespace circlechef
+{
+
+circlechef::TensorType as_circlechef_type(const circle::TensorType type);
+circlechef::Activation as_circlechef_activation(const circle::ActivationFunctionType type);
+circlechef::Padding as_circlechef_padding(const circle::Padding padding);
+
+/**
+ * @brief extract buffer data to std::vector<DT>
+ */
+template <typename DT> std::vector<DT> extract_buffer(const circle::Buffer *buffer)
+{
+ auto buffer_length = buffer->data()->size();
+ auto num_elements = buffer_length / sizeof(DT);
+ std::vector<DT> result(num_elements);
+ std::memcpy(result.data(), buffer->data()->data(), buffer_length);
+ return result;
+}
+
+template <typename T> std::vector<T> as_index_vector(const flatbuffers::Vector<T> *flat_array)
+{
+ if (flat_array == nullptr)
+ throw std::runtime_error("flat_array is nullptr");
+
+ std::vector<T> ret(flat_array->Length());
+ for (uint32_t i = 0; i < flat_array->Length(); i++)
+ {
+ ret[i] = flat_array->Get(i);
+ }
+ return ret;
+}
+
+} // namespace circlechef
+
+#endif // __CONVERT_H__
diff --git a/compiler/circlechef/circle/src/Op/BCQFullyConnected.cpp b/compiler/circlechef/circle/src/Op/BCQFullyConnected.cpp
new file mode 100644
index 000000000..0e85f3969
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BCQFullyConnected.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BCQFullyConnected.h"
+
+#include "Convert.h"
+
+namespace circlechef
+{
+
+void CircleOpBCQFullyConnected::filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ import->set_tensor_filler(inputs[1]);
+ import->set_tensor_filler(inputs[3]);
+
+ const circle::Tensor *tensor2 = import->tensors()->Get(inputs[2]);
+ assert(tensor2->type() == circle::TensorType::TensorType_INT32);
+ const circle::Buffer *buffer2 = import->buffers()->Get(tensor2->buffer());
+ auto vec2 = extract_buffer<int32_t>(buffer2);
+ import->set_tensor_filler(inputs[2], vec2);
+
+ const circle::Tensor *tensor4 = import->tensors()->Get(inputs[4]);
+ assert(tensor4->type() == circle::TensorType::TensorType_INT32);
+ const circle::Buffer *buffer4 = import->buffers()->Get(tensor4->buffer());
+ auto vec4 = extract_buffer<int32_t>(buffer4);
+ import->set_tensor_filler(inputs[4], vec4);
+}
+
+circlechef::Operation *CircleOpBCQFullyConnected::build(const circle::Operator *op,
+ CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_BCQFullyConnectedOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("BCQFullyConnected");
+
+ auto op_options = operation->mutable_bcq_fully_connected_options();
+
+ op_options->set_weights_hidden_size(op_params->weights_hidden_size());
+ op_options->set_activation(as_circlechef_activation(op_params->fused_activation_function()));
+
+ return operation;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/Op/BCQFullyConnected.h b/compiler/circlechef/circle/src/Op/BCQFullyConnected.h
new file mode 100644
index 000000000..c0ea581d9
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BCQFullyConnected.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_BCQFULLYCONNECTED_H__
+#define __CIRCLE_OP_BCQFULLYCONNECTED_H__
+
+#include "CircleOpChef.h"
+
+namespace circlechef
+{
+
+/**
+ * @brief circlechef operator builder for BCQFullyConnected
+ */
+class CircleOpBCQFullyConnected : public CircleOpChef
+{
+public:
+ void filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+ circlechef::Operation *build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_BCQFULLYCONNECTED_H__
diff --git a/compiler/circlechef/circle/src/Op/BCQGather.cpp b/compiler/circlechef/circle/src/Op/BCQGather.cpp
new file mode 100644
index 000000000..cde345a34
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BCQGather.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BCQGather.h"
+
+#include "Convert.h"
+
+namespace circlechef
+{
+
+void CircleOpBCQGather::filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ import->set_tensor_filler(inputs[0]);
+
+ const circle::Tensor *tensor1 = import->tensors()->Get(inputs[1]);
+ assert(tensor1->type() == circle::TensorType::TensorType_INT32);
+ const circle::Buffer *buffer1 = import->buffers()->Get(tensor1->buffer());
+ auto vec1 = extract_buffer<int32_t>(buffer1);
+ import->set_tensor_filler(inputs[1], vec1);
+
+ const circle::Tensor *tensor2 = import->tensors()->Get(inputs[2]);
+ assert(tensor2->type() == circle::TensorType::TensorType_INT32);
+ const circle::Buffer *buffer2 = import->buffers()->Get(tensor2->buffer());
+ auto vec2 = extract_buffer<int32_t>(buffer2);
+ import->set_tensor_filler(inputs[2], vec2);
+
+ const circle::Tensor *tensor3 = import->tensors()->Get(inputs[3]);
+ assert(tensor3->type() == circle::TensorType::TensorType_INT32);
+ const circle::Buffer *buffer3 = import->buffers()->Get(tensor3->buffer());
+ auto vec3 = extract_buffer<int32_t>(buffer3);
+ import->set_tensor_filler(inputs[3], vec3);
+}
+
+circlechef::Operation *CircleOpBCQGather::build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_BCQGatherOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("BCQGather");
+
+ auto op_options = operation->mutable_bcq_gather_options();
+
+ op_options->set_input_hidden_size(op_params->input_hidden_size());
+ op_options->set_axis(op_params->axis());
+
+ return operation;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/Op/BCQGather.h b/compiler/circlechef/circle/src/Op/BCQGather.h
new file mode 100644
index 000000000..0ff040551
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BCQGather.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_BCQGATHER_H__
+#define __CIRCLE_OP_BCQGATHER_H__
+
+#include "CircleOpChef.h"
+
+namespace circlechef
+{
+
+/**
+ * @brief circlechef operator builder for BCQGather
+ */
+class CircleOpBCQGather : public CircleOpChef
+{
+public:
+ void filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+ circlechef::Operation *build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_BCQGATHER_H__
diff --git a/compiler/circlechef/circle/src/Op/BatchMatMul.cpp b/compiler/circlechef/circle/src/Op/BatchMatMul.cpp
new file mode 100644
index 000000000..bcf218865
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BatchMatMul.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMul.h"
+
+#include "Convert.h"
+
+namespace circlechef
+{
+
+void CircleOpBatchMatMul::filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+circlechef::Operation *CircleOpBatchMatMul::build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_BatchMatMulOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("BatchMatMul");
+
+ auto op_options = operation->mutable_batch_matmul_options();
+
+ op_options->set_adjoint_lhs(op_params->adjoint_lhs());
+ op_options->set_adjoint_rhs(op_params->adjoint_rhs());
+
+ return operation;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/Op/BatchMatMul.h b/compiler/circlechef/circle/src/Op/BatchMatMul.h
new file mode 100644
index 000000000..3d4036877
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/BatchMatMul.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_BATCHMATMUL_H__
+#define __CIRCLE_OP_BATCHMATMUL_H__
+
+#include "CircleOpChef.h"
+
+namespace circlechef
+{
+
+/**
+ * @brief circlechef operator builder for batchmatmul
+ */
+class CircleOpBatchMatMul : public CircleOpChef
+{
+public:
+ void filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+ circlechef::Operation *build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_BATCHMATMUL_H__
diff --git a/compiler/circlechef/circle/src/Op/InstanceNorm.cpp b/compiler/circlechef/circle/src/Op/InstanceNorm.cpp
new file mode 100644
index 000000000..a1395a578
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/InstanceNorm.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InstanceNorm.h"
+
+#include "Convert.h"
+
+namespace circlechef
+{
+
+void CircleOpInstanceNorm::filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ // index 1 and 2 maybe constant
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 3);
+
+ import->set_tensor_filler(inputs[1]); // set gaussian filler
+ import->set_tensor_filler(inputs[2]);
+}
+
+circlechef::Operation *CircleOpInstanceNorm::build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("InstanceNorm");
+
+ auto op_options = operation->mutable_instance_norm_options();
+
+ auto op_params = op->builtin_options_as_InstanceNormOptions();
+ assert(op_params != nullptr);
+
+ op_options->set_epsilon(op_params->epsilon());
+ op_options->set_activation(as_circlechef_activation(op_params->fused_activation_function()));
+
+ return operation;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/circle/src/Op/InstanceNorm.h b/compiler/circlechef/circle/src/Op/InstanceNorm.h
new file mode 100644
index 000000000..9cb48e184
--- /dev/null
+++ b/compiler/circlechef/circle/src/Op/InstanceNorm.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLE_OP_INSTANCE_NORM_H__
+#define __CIRCLE_OP_INSTANCE_NORM_H__
+
+#include "CircleOpChef.h"
+
+namespace circlechef
+{
+
+/**
+ * @brief circlechef operator builder for INSTANCE_NORM
+ */
+class CircleOpInstanceNorm : public CircleOpChef
+{
+public:
+ void filler(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+ circlechef::Operation *build(const circle::Operator *op, CircleImport *import,
+ circlechef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLE_OP_INSTANCE_NORM_H__
diff --git a/compiler/circlechef/circle/src/RecipeChef.cpp b/compiler/circlechef/circle/src/RecipeChef.cpp
new file mode 100644
index 000000000..17ef1be6e
--- /dev/null
+++ b/compiler/circlechef/circle/src/RecipeChef.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <circlechef/RecipeChef.h>
+
+#include "Convert.h"
+#include "CircleImport.h"
+#include "CircleOpChef.h"
+#include "CircleOpChefs.h"
+#include "CircleOpRegistry.h"
+
+#include <fstream>
+#include <sstream>
+
+namespace circlechef
+{
+
+void set_inputs(CircleImport *import, circlechef::Operation *operation, const circle::Operator *op)
+{
+ auto tensors = import->tensors();
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ for (auto input : inputs)
+ {
+ if (input == -1)
+ {
+ operation->add_input("");
+ }
+ else
+ {
+ auto tensor = tensors->Get(input);
+ std::string name = tensor_name(tensor);
+ operation->add_input(name);
+ }
+ }
+}
+
+void set_outputs(CircleImport *import, circlechef::Operation *operation, const circle::Operator *op)
+{
+ auto tensors = import->tensors();
+ const std::vector<int32_t> &outputs = as_index_vector(op->outputs());
+
+ for (auto output : outputs)
+ {
+ auto tensor = tensors->Get(output);
+ std::string name = tensor_name(tensor);
+ operation->add_output(name);
+ }
+}
+
+/**
+ * @brief This will build ModelRecipe from circle::Model
+ * First to check operand filler options by scanning all operators,
+ * then translate all operands and operators.
+ * Last will set network inputs and outputs.
+ */
+std::unique_ptr<ModelRecipe> generate_recipe(const circle::Model *model)
+{
+ std::unique_ptr<ModelRecipe> model_recipe{new ModelRecipe()};
+
+ CircleImport circle_import(model);
+
+ assert(circle_import.num_subgraph() == 1);
+ circle_import.select_sub_graph(0);
+
+ auto tensors = circle_import.tensors();
+ auto buffers = circle_import.buffers();
+ auto operators = circle_import.operators();
+
+ // operand fillers for adding all operators
+ for (uint32_t i = 0; i < operators->Length(); ++i)
+ {
+ const auto *op = operators->Get(i);
+ circle::BuiltinOperator builtincode = circle_import.builtin_code(op);
+
+ if (const auto *graph_builder = CircleOpRegistry::get().lookup(builtincode))
+ {
+ graph_builder->filler(op, &circle_import, model_recipe.get());
+ }
+ else
+ {
+ std::string opcodename = circle_import.opcode_name(op);
+ throw std::runtime_error{"Not supported: " + opcodename};
+ }
+ }
+
+ // add all operands(tensors)
+ for (uint32_t i = 0; i < tensors->Length(); ++i)
+ {
+ auto tensor = tensors->Get(i);
+
+ // check buffer
+ if (tensor->buffer() >= buffers->size())
+ throw std::runtime_error{"file load failed"};
+
+ ::circlechef::Operand *operand = model_recipe->add_operand();
+
+ operand->set_name(tensor_name(tensor));
+ operand->set_type(as_circlechef_type(tensor->type()));
+
+ std::vector<int32_t> dims = as_index_vector(tensor->shape());
+ ::circlechef::TensorShape *shape = operand->mutable_shape();
+ for (auto dim : dims)
+ {
+ shape->add_dim(dim);
+ }
+
+ // filler for weights, bias and so on
+ std::vector<int32_t> expvalues;
+ std::vector<float> expfvalues;
+ if (circle_import.get_tensor_filler(i))
+ {
+ circlechef::TensorFiller *filler = operand->mutable_filler();
+ // Note: it is OK to use random weights for functionality validation
+ filler->set_tag("gaussian");
+ filler->add_arg("0.0"); // average
+ filler->add_arg("0.1"); // standard deviation
+ }
+ else if (circle_import.get_tensor_filler(i, expvalues))
+ {
+ circlechef::TensorFiller *filler = operand->mutable_filler();
+ filler->set_tag("explicit");
+ for (auto value : expvalues)
+ {
+ std::ostringstream ss;
+ ss << value;
+ filler->add_arg(ss.str());
+ }
+ }
+ else if (circle_import.get_tensor_filler(i, expfvalues))
+ {
+ circlechef::TensorFiller *filler = operand->mutable_filler();
+ filler->set_tag("explicit");
+ for (auto value : expfvalues)
+ {
+ std::ostringstream ss;
+ ss << value;
+ filler->add_arg(ss.str());
+ }
+ }
+
+ auto quant = tensor->quantization();
+ if (quant != nullptr)
+ {
+ // Note: Calling 'operand->mutable_quant()' will create empty 'quant' node
+ // in the recipe file. We want this only when valid parameter exist.
+ if (quant->min() != nullptr && quant->min()->size() > 0)
+ {
+ circlechef::TensorQuantization *chef_quant = operand->mutable_quant();
+ for (uint32_t idx = 0; idx < quant->min()->size(); ++idx)
+ chef_quant->add_min(quant->min()->Get(idx));
+ }
+ if (quant->max() != nullptr && quant->max()->size() > 0)
+ {
+ circlechef::TensorQuantization *chef_quant = operand->mutable_quant();
+ for (uint32_t idx = 0; idx < quant->max()->size(); idx++)
+ chef_quant->add_max(quant->max()->Get(idx));
+ }
+ if (quant->scale() != nullptr && quant->scale()->size() > 0)
+ {
+ circlechef::TensorQuantization *chef_quant = operand->mutable_quant();
+ for (uint32_t idx = 0; idx < quant->scale()->size(); ++idx)
+ chef_quant->add_scale(quant->scale()->Get(idx));
+ }
+ if (quant->zero_point() != nullptr && quant->zero_point()->size() > 0)
+ {
+ circlechef::TensorQuantization *chef_quant = operand->mutable_quant();
+ for (uint32_t idx = 0; idx < quant->zero_point()->size(); ++idx)
+ chef_quant->add_zero_point(quant->zero_point()->Get(idx));
+ }
+ }
+ }
+
+ // add all operators
+ for (uint32_t i = 0; i < operators->Length(); ++i)
+ {
+ const auto *op = operators->Get(i);
+ circle::BuiltinOperator builtincode = circle_import.builtin_code(op);
+
+ if (const auto *graph_builder = CircleOpRegistry::get().lookup(builtincode))
+ {
+ auto operation = graph_builder->build(op, &circle_import, model_recipe.get());
+
+ // common for all operators: inputs, outputs
+ set_inputs(&circle_import, operation, op);
+ set_outputs(&circle_import, operation, op);
+ }
+ else
+ {
+ std::string opcodename = circle_import.opcode_name(op);
+ throw std::runtime_error{"Not supported: " + opcodename};
+ }
+ }
+
+ // network inputs/outputs
+ const std::vector<int32_t> &inputs = circle_import.inputs();
+ const std::vector<int32_t> &outputs = circle_import.outputs();
+
+ for (const auto input : inputs)
+ {
+ auto tensor = tensors->Get(input);
+ std::string name = tensor_name(tensor);
+
+ model_recipe->add_input(name);
+ }
+ for (const auto output : outputs)
+ {
+ auto tensor = tensors->Get(output);
+ std::string name = tensor_name(tensor);
+
+ model_recipe->add_output(name);
+ }
+
+ return std::move(model_recipe);
+}
+
+bool write_recipe(const std::string &filename, std::unique_ptr<ModelRecipe> &recipe)
+{
+ std::fstream fo(filename, std::ios::binary | std::ios::out);
+
+ if (!fo.is_open())
+ {
+ throw std::runtime_error{"file store failed"};
+ }
+
+ // Note: SerializeToString() or SerializeToOstream() writes in binary mode
+ // DebugString() and Utf8DebugString() will print as a human readable text
+ fo << recipe->Utf8DebugString();
+
+ fo.close();
+
+ return true;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/core/CMakeLists.txt b/compiler/circlechef/core/CMakeLists.txt
new file mode 100644
index 000000000..54b3ea53d
--- /dev/null
+++ b/compiler/circlechef/core/CMakeLists.txt
@@ -0,0 +1,9 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(circlechef_core STATIC ${SOURCES})
+target_include_directories(circlechef_core PUBLIC include)
+target_include_directories(circlechef_core PRIVATE src)
+target_link_libraries(circlechef_core circlechef_proto)
+target_link_libraries(circlechef_core circlechef_log)
+target_link_libraries(circlechef_core mio_circle)
+target_link_libraries(circlechef_core souschef)
diff --git a/compiler/circlechef/core/include/circlechef/ModelChef.h b/compiler/circlechef/core/include/circlechef/ModelChef.h
new file mode 100644
index 000000000..64326179c
--- /dev/null
+++ b/compiler/circlechef/core/include/circlechef/ModelChef.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MODEL_CHEF_H__
+#define __MODEL_CHEF_H__
+
+#include <circlechef.pb.h>
+
+#include <memory>
+
+namespace circlechef
+{
+
+class GeneratedModel final
+{
+public:
+ struct Impl
+ {
+ virtual ~Impl() = default;
+
+ virtual const char *base(void) const = 0;
+ virtual size_t size(void) const = 0;
+ };
+
+public:
+ GeneratedModel(std::unique_ptr<Impl> &&impl) : _impl{std::move(impl)}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const char *base(void) const { return _impl->base(); }
+ size_t size(void) const { return _impl->size(); }
+
+private:
+ std::unique_ptr<Impl> _impl;
+};
+
+GeneratedModel cook(const ModelRecipe &model_recipe);
+
+} // namespace circlechef
+
+#endif // __MODEL_CHEF_H__
diff --git a/compiler/circlechef/core/src/Arguments.h b/compiler/circlechef/core/src/Arguments.h
new file mode 100644
index 000000000..9fe7bbb77
--- /dev/null
+++ b/compiler/circlechef/core/src/Arguments.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ARGUMENTS_H__
+#define __ARGUMENTS_H__
+
+#include <cstdint>
+#include <string>
+
+/**
+ * @brief Read-only string sequence view
+ */
+struct Arguments
+{
+ virtual ~Arguments() = default;
+
+ virtual uint32_t count(void) const = 0;
+ virtual const std::string &value(uint32_t n) const = 0;
+};
+
+#endif // __ARGUMENTS_H__
diff --git a/compiler/circlechef/core/src/Convert.cpp b/compiler/circlechef/core/src/Convert.cpp
new file mode 100644
index 000000000..2db0a6212
--- /dev/null
+++ b/compiler/circlechef/core/src/Convert.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+#include <stdexcept>
+
+circle::Padding as_circle_padding(const circlechef::Padding &value)
+{
+ switch (value)
+ {
+ case circlechef::SAME:
+ return circle::Padding_SAME;
+ case circlechef::VALID:
+ return circle::Padding_VALID;
+ default:
+ break;
+ }
+
+ throw std::runtime_error{"Unknown padding value"};
+}
+
+circle::ActivationFunctionType as_circle_activation(const circlechef::Activation &value)
+{
+ switch (value)
+ {
+ case circlechef::NONE:
+ return circle::ActivationFunctionType_NONE;
+ case circlechef::RELU:
+ return circle::ActivationFunctionType_RELU;
+ case circlechef::RELU6:
+ return circle::ActivationFunctionType_RELU6;
+ default:
+ break;
+ }
+
+ throw std::runtime_error{"Unknown activation"};
+}
+
+circle::TensorType as_circle_tensortype(const circlechef::TensorType &value)
+{
+ switch (value)
+ {
+ case circlechef::FLOAT32:
+ return circle::TensorType_FLOAT32;
+ case circlechef::INT32:
+ return circle::TensorType_INT32;
+ case circlechef::UINT8:
+ return circle::TensorType_UINT8;
+ case circlechef::INT64:
+ return circle::TensorType_INT64;
+ case circlechef::BOOL:
+ return circle::TensorType_BOOL;
+ default:
+ break;
+ }
+
+ throw std::runtime_error{"Unknown tensor type"};
+}
diff --git a/compiler/circlechef/core/src/Convert.h b/compiler/circlechef/core/src/Convert.h
new file mode 100644
index 000000000..766098da2
--- /dev/null
+++ b/compiler/circlechef/core/src/Convert.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Convert.h
+ * @brief This header declares various as_circle_TYPE functions
+ */
+#ifndef __CONVERT_H__
+#define __CONVERT_H__
+
+#include <circlechef.pb.h>
+#include <mio/circle/schema_generated.h>
+
+circle::Padding as_circle_padding(const circlechef::Padding &value);
+circle::ActivationFunctionType as_circle_activation(const circlechef::Activation &value);
+circle::TensorType as_circle_tensortype(const circlechef::TensorType &value);
+
+#endif // __CONVERT_H__
diff --git a/compiler/circlechef/core/src/ModelChef.cpp b/compiler/circlechef/core/src/ModelChef.cpp
new file mode 100644
index 000000000..76aeacdd9
--- /dev/null
+++ b/compiler/circlechef/core/src/ModelChef.cpp
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "circlechef/ModelChef.h"
+#include <souschef/RangedArguments.h>
+#include <souschef/Registry.h>
+
+#include "Convert.h"
+
+#include <souschef/DataChefs.h>
+
+#include "OpChef.h"
+#include "OpChefs.h"
+
+#include <souschef/Dataset.h>
+
+#include "Log.h"
+
+#include <iterator>
+#include <map>
+#include <string>
+#include <vector>
+
+#include <cassert>
+#include <fstream>
+#include <iostream>
+#include <numeric>
+#include <sstream>
+#include <stdexcept>
+
+namespace
+{
+
+using namespace souschef;
+
+template <typename T> std::vector<T> as_vector(const ::google::protobuf::RepeatedPtrField<T> &field)
+{
+ std::vector<T> res;
+ for (const auto &elem : field)
+ {
+ res.emplace_back(elem);
+ }
+ return res;
+}
+
+template <typename T> Dataset<T> as_dataset(const ::google::protobuf::RepeatedPtrField<T> &field)
+{
+ return Dataset<T>(as_vector<T>(field));
+}
+
+} // namespace
+
+namespace
+{
+
+template <typename T> using Dims = std::vector<T>;
+
+Dims<int32_t> as_dims(const circlechef::TensorShape &shape)
+{
+ std::vector<int32_t> res;
+
+ for (auto &dim : shape.dim())
+ {
+ res.emplace_back(static_cast<int32_t>(dim));
+ }
+
+ return res;
+}
+
+int32_t element_count(const Dims<int32_t> &dims)
+{
+ return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int32_t>());
+}
+
+} // namespace
+
+namespace
+{
+
+class GeneratedModelImpl final : public circlechef::GeneratedModel::Impl
+{
+public:
+ GeneratedModelImpl(std::unique_ptr<flatbuffers::FlatBufferBuilder> &&builder)
+ : _builder{std::move(builder)}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const char *base(void) const override
+ {
+ // Return the base address of generated flatbuffer model
+ return reinterpret_cast<const char *>(_builder->GetBufferPointer());
+ }
+
+public:
+ size_t size(void) const override
+ {
+ // Return the size of generated flatbuffer model
+ return _builder->GetSize();
+ }
+
+private:
+ std::unique_ptr<flatbuffers::FlatBufferBuilder> _builder;
+};
+
+} // namespace
+
+namespace
+{
+
+struct DataChefRegistry final : public Registry<DataChefFactory>
+{
+};
+
+DataChefRegistry &data_chef_registry(const circlechef::TensorType &type)
+{
+ static DataChefRegistry s32;
+ static DataChefRegistry s64;
+ static DataChefRegistry fp32;
+ static DataChefRegistry u8;
+ static DataChefRegistry boolean;
+
+ switch (type)
+ {
+ case circlechef::INT32:
+ return s32;
+ case circlechef::INT64:
+ return s64;
+ case circlechef::FLOAT32:
+ return fp32;
+ case circlechef::UINT8:
+ return u8;
+ case circlechef::BOOL:
+ return boolean;
+ default:
+ break;
+ }
+
+ throw std::runtime_error{"Unknown tensor type"};
+}
+
+struct OpChefRegistry final : public Registry<OpChefFactory>
+{
+};
+
+OpChefRegistry &op_chef_registry(void)
+{
+ static OpChefRegistry registry;
+ return registry;
+}
+
+/// @brief This will prepare a map of unique builtin codes in the model recipe
+std::map<circle::BuiltinOperator, int32_t>
+gather_builtincode_map(const ::circlechef::ModelRecipe &model_recipe)
+{
+ // Key and value of the map are BuiltinOperator and operator version
+ std::map<circle::BuiltinOperator, int32_t> builtin_map;
+
+ for (const auto &operation : model_recipe.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
+ continue;
+
+ // Various operation version is unified as the highest version among them
+ if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
+ builtin_map[op_chef->code()] < operation.version())
+ builtin_map[op_chef->code()] = operation.version();
+ }
+
+ // Add ops used in Graphs(subgraphs)
+ for (int g = 0; g < model_recipe.graph_size(); ++g)
+ {
+ const auto &graph = model_recipe.graph(g);
+ for (const auto &operation : graph.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
+ continue;
+
+ // Various operation version is unified as the highest version among them
+ if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
+ builtin_map[op_chef->code()] < operation.version())
+ builtin_map[op_chef->code()] = operation.version();
+ }
+ }
+
+ return builtin_map;
+}
+
+/// @brief This will prepare a set of unique custom codes in the mode recipe
+std::set<std::string> gather_customcode_set(const ::circlechef::ModelRecipe &model_recipe)
+{
+ std::set<std::string> customcode_set;
+ for (const auto &operation : model_recipe.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
+ customcode_set.insert(operation.type());
+ }
+
+ // Add ops used in Graphs(subgraphs)
+ for (int g = 0; g < model_recipe.graph_size(); ++g)
+ {
+ const auto &graph = model_recipe.graph(g);
+ for (const auto &operation : graph.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
+ customcode_set.insert(operation.type());
+ }
+ }
+
+ return customcode_set;
+}
+
+} // namespace
+
+namespace
+{
+
+struct CookParams
+{
+ std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec;
+ std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec;
+ std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec;
+ std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder;
+ std::map<circle::BuiltinOperator, int32_t> &builtin_code_map;
+ std::string noname;
+};
+
+template <typename T> void cook_graph(const T &graph, CookParams &cp)
+{
+ LOGGER(l);
+
+ std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec = cp.buffer_vec;
+ std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec = cp.code_vec;
+ std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec = cp.subgraph_vec;
+ std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder = cp.flatbuffer_builder;
+ std::map<circle::BuiltinOperator, int32_t> &builtin_code_map = cp.builtin_code_map;
+
+ // Operand-related
+ std::vector<flatbuffers::Offset<::circle::Tensor>> tensor_vec;
+
+ // Operation-related
+ std::vector<flatbuffers::Offset<::circle::Operator>> operator_vec;
+
+ // default name for graph
+ std::string graph_name = cp.noname;
+ if (graph.has_name())
+ graph_name = graph.name();
+
+ // Tensor Name -> Tensor ID mapping (per Graph)
+ std::map<std::string, int32_t> symbol_table;
+
+ auto lookup = [&symbol_table, &graph_name](const std::string &name) {
+ if (symbol_table.find(name) != symbol_table.end())
+ return symbol_table.at(name);
+ else if (name == "")
+ return -1; // -1 in circle means that optional input tensor is empty.
+ else
+ {
+ std::string msg = "circlechef : input not found in " + graph_name + " graph";
+ throw std::runtime_error(msg.c_str());
+ }
+ };
+
+ int32_t buffer_start = buffer_vec.size();
+ int32_t buffer_index = 0;
+
+ // Create buffer(s) 1~n(I) for input(s)
+ const auto size_input = graph.input_size();
+ for (int ci = 0; ci < size_input; ++ci)
+ {
+ circle::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
+ // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
+ const auto size_output = graph.output_size();
+ for (int co = 0; co < size_output; ++co)
+ {
+ circle::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
+
+ auto input_names = as_dataset(graph.input()).vectorize();
+ auto output_names = as_dataset(graph.output()).vectorize();
+
+ for (const auto &operand : graph.operand())
+ {
+ assert(operand.has_name());
+
+ assert(operand.has_type());
+
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape;
+ std::vector<int32_t> dims;
+ if (operand.has_shape())
+ {
+ dims = as_dims(operand.shape());
+ shape = flatbuffer_builder->CreateVector(dims);
+ }
+
+ auto name = flatbuffer_builder->CreateString(operand.name());
+
+ buffer_index = 0;
+
+ // Create Buffer if filler is specified
+ if (operand.has_filler())
+ {
+ const auto &filler = operand.filler();
+
+ assert(filler.has_tag());
+
+ auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
+ auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
+
+ assert(chef != nullptr);
+
+ // Create Data
+ int32_t count = (element_count(dims) > 0) ? element_count(dims) : filler.arg_size();
+ auto data_vec = chef->generate(count);
+ auto data = flatbuffer_builder->CreateVector(data_vec);
+
+ // Create Buffer
+ circle::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_builder.add_data(data);
+ auto buffer = buffer_builder.Finish();
+
+ // Update Buffer Index & Vector
+ buffer_index = buffer_vec.size();
+ buffer_vec.emplace_back(buffer);
+ }
+ else
+ {
+ // if this is input or output, assign to that buffer_index
+ int idx = 0;
+ for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
+ {
+ if (*it == operand.name())
+ {
+ buffer_index = buffer_start + idx;
+ break;
+ }
+ }
+ if (buffer_index == 0)
+ {
+ idx = 0;
+ for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
+ {
+ if (*it == operand.name())
+ {
+ buffer_index = buffer_start + size_input + idx;
+ break;
+ }
+ }
+ }
+ if (buffer_index == 0)
+ {
+ // we couldn't find the buffer; create an empty buffer for this tensor
+ buffer_index = buffer_vec.size();
+
+ circle::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
+ }
+ assert(buffer_index != 0);
+
+ flatbuffers::Offset<circle::QuantizationParameters> quant_index;
+
+ // Create QuantizationParameters if quant is specified
+ if (operand.has_quant())
+ {
+ const auto &quant = operand.quant();
+
+ // Create each parameters
+ // NOTE if some parameters are not given, those will be set to default value
+ std::vector<float> quant_max_vec(quant.max_size());
+ std::vector<float> quant_min_vec(quant.min_size());
+ std::vector<float> quant_scale_vec(quant.scale_size());
+ std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
+
+ for (uint32_t i = 0; i < quant.max_size(); ++i)
+ quant_max_vec.at(i) = quant.max(i);
+ for (uint32_t i = 0; i < quant.min_size(); ++i)
+ quant_min_vec.at(i) = quant.min(i);
+ for (uint32_t i = 0; i < quant.scale_size(); ++i)
+ quant_scale_vec.at(i) = quant.scale(i);
+ for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
+ quant_zero_point_vec.at(i) = quant.zero_point(i);
+
+ auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
+ auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
+ auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
+ auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
+
+ // Create QuantizationParameters
+ circle::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
+ quant_builder.add_max(quant_max);
+ quant_builder.add_min(quant_min);
+ quant_builder.add_scale(quant_scale);
+ quant_builder.add_zero_point(quant_zero_point);
+
+ // Update QuantizationParameters Index
+ quant_index = quant_builder.Finish();
+ }
+
+ // Create Tensor
+ circle::TensorBuilder tensor_builder{*flatbuffer_builder};
+
+ tensor_builder.add_shape(shape);
+ tensor_builder.add_type(as_circle_tensortype(operand.type()));
+ tensor_builder.add_buffer(buffer_index);
+ tensor_builder.add_name(name);
+ if (operand.has_quant())
+ tensor_builder.add_quantization(quant_index);
+
+ // Append!
+ tensor_vec.emplace_back(tensor_builder.Finish());
+
+ // Update Tensor Name -> Tensor Index Map
+ int32_t tensor_index = symbol_table.size();
+ const auto &tensor_name = operand.name();
+
+ INFO(l) << "Symbol [" << tensor_name << "] = Tensor " << tensor_index << std::endl;
+
+ symbol_table[tensor_name] = tensor_index;
+ }
+
+ // Create Operator
+ for (const auto &operation : graph.operation())
+ {
+ assert(operation.has_type());
+
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+
+ // Create 'inputs'
+ std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
+ auto inputs = flatbuffer_builder->CreateVector(input_vec);
+
+ // Create 'outputs'
+ std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
+ auto outputs = flatbuffer_builder->CreateVector(output_vec);
+
+ // Create Option
+ auto options = op_chef->value(*flatbuffer_builder);
+
+ // Create Custom option
+ auto circle_custom_options = op_chef->custom_value(*flatbuffer_builder);
+
+ // Create Operator
+ circle::OperatorBuilder op_builder{*flatbuffer_builder};
+
+ // Get operator code index from builtin_code_map with assumption, order of
+ // builtin_code_map is same as that of code_vec
+ auto op_it = builtin_code_map.find(op_chef->code());
+ assert(op_it != builtin_code_map.end());
+ uint32_t opcode_index = std::distance(builtin_code_map.begin(), op_it);
+
+ op_builder.add_opcode_index(opcode_index);
+ op_builder.add_inputs(inputs);
+ op_builder.add_outputs(outputs);
+ op_builder.add_builtin_options_type(op_chef->type());
+ op_builder.add_builtin_options(options);
+ op_builder.add_custom_options(circle_custom_options);
+ op_builder.add_custom_options_format(circle::CustomOptionsFormat_FLEXBUFFERS);
+ // Append Operator
+ operator_vec.emplace_back(op_builder.Finish());
+ }
+
+ // Create network input/output vector
+ std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
+ std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
+
+ // Create "SubGraph" arguments
+ auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
+ auto inputs = flatbuffer_builder->CreateVector(input_vec);
+ auto outputs = flatbuffer_builder->CreateVector(output_vec);
+ auto operators = flatbuffer_builder->CreateVector(operator_vec);
+ auto name = flatbuffer_builder->CreateString(graph_name);
+
+ circle::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
+
+ subgraph_builder.add_tensors(tensors);
+ subgraph_builder.add_inputs(inputs);
+ subgraph_builder.add_outputs(outputs);
+ subgraph_builder.add_operators(operators);
+ subgraph_builder.add_name(name);
+
+ subgraph_vec.emplace_back(subgraph_builder.Finish());
+}
+
+} // namespace
+
+namespace circlechef
+{
+
+/**
+ * @brief Generate a (in-memory) TensorFlow Lite model from a given model recipe
+ */
+GeneratedModel cook(const ::circlechef::ModelRecipe &model_recipe)
+{
+// Initialize Op Chef Registry
+#define OP_CHEF(NAME, FACTORY_CLASS) \
+ op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
+#include "OpChef.def"
+#undef OP_CHEF
+
+// Initialize Data Chef Registry
+#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
+ data_chef_registry(::circlechef::TYPE) \
+ .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
+#include <souschef/DataChef.def>
+#undef DATA_CHEF
+
+ //
+ // Create FlatBufferBuilder
+ //
+ auto flatbuffer_builder =
+ std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
+
+ // Operand-related
+ std::vector<flatbuffers::Offset<::circle::Buffer>> buffer_vec;
+
+ // Operation-related
+ std::vector<flatbuffers::Offset<::circle::OperatorCode>> code_vec;
+
+ // Graphs-related
+ std::vector<flatbuffers::Offset<::circle::SubGraph>> subgraph_vec;
+
+ // Create OperatorCode with Builtin Operator
+ std::map<circle::BuiltinOperator, int32_t> builtin_code_map =
+ gather_builtincode_map(model_recipe);
+ for (auto const &opcode : builtin_code_map)
+ {
+ circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
+ code_builder.add_builtin_code(opcode.first);
+ code_builder.add_version(opcode.second);
+ auto code = code_builder.Finish();
+ // Update OperatorCode vector
+ code_vec.emplace_back(code);
+ }
+
+ // Create OperatorCode with Custom Operator
+ std::set<std::string> custom_code_set = gather_customcode_set(model_recipe);
+ if (custom_code_set.size() &&
+ builtin_code_map.find(circle::BuiltinOperator_CUSTOM) == builtin_code_map.end())
+ builtin_code_map[circle::BuiltinOperator_CUSTOM] = 1;
+
+ for (auto opcode : custom_code_set)
+ {
+ auto custom_code = flatbuffer_builder->CreateString(opcode);
+ circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
+ code_builder.add_builtin_code(circle::BuiltinOperator_CUSTOM);
+ code_builder.add_custom_code(custom_code);
+ auto code = code_builder.Finish();
+ // Update OperatorCode vector
+ code_vec.emplace_back(code);
+ }
+
+ // Create an Empty Buffer
+ //
+ // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
+ // (Please refer to the comment for Tensor.buffer field in schema)
+ {
+ circle::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
+
+ //
+ // Create Main graph
+ //
+ CookParams cp{buffer_vec, code_vec, subgraph_vec, flatbuffer_builder, builtin_code_map, "main"};
+
+ cook_graph<::circlechef::ModelRecipe>(model_recipe, cp);
+
+ //
+ // Create subgraphs if exist
+ //
+ for (int g = 0; g < model_recipe.graph_size(); ++g)
+ {
+ const auto &graph = model_recipe.graph(g);
+
+ std::ostringstream stringStream;
+ stringStream << "sub_" << (g + 1);
+
+ CookParams cp{buffer_vec, code_vec, subgraph_vec,
+ flatbuffer_builder, builtin_code_map, stringStream.str()};
+
+ cook_graph<::circlechef::Graph>(graph, cp);
+ }
+
+ // Create "Model" arguments
+ auto buffers = flatbuffer_builder->CreateVector(buffer_vec);
+ auto operator_codes = flatbuffer_builder->CreateVector(code_vec);
+ auto subgraphs = flatbuffer_builder->CreateVector(subgraph_vec);
+ auto description = flatbuffer_builder->CreateString("Generated by circlechef");
+
+ // Create "Model"
+ circle::ModelBuilder model_builder{*flatbuffer_builder};
+
+ model_builder.add_version(3);
+ model_builder.add_operator_codes(operator_codes);
+ model_builder.add_subgraphs(subgraphs);
+ model_builder.add_description(description);
+ model_builder.add_buffers(buffers);
+
+ auto model = model_builder.Finish();
+
+ // Finalize
+ ::circle::FinishModelBuffer(*flatbuffer_builder, model);
+
+ // Return "GenerateModel"
+ return GeneratedModel{
+ std::unique_ptr<GeneratedModelImpl>(new GeneratedModelImpl(std::move(flatbuffer_builder)))};
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/core/src/Op/BCQFullyConnected.cpp b/compiler/circlechef/core/src/Op/BCQFullyConnected.cpp
new file mode 100644
index 000000000..4c82c52cc
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BCQFullyConnected.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BCQFullyConnected.h"
+
+#include "Convert.h"
+
+flatbuffers::Offset<void> BCQFullyConnectedChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_bcq_fully_connected_options());
+
+ circle::BCQFullyConnectedOptionsBuilder bcq_fully_connected_options_builder{fbb};
+ bcq_fully_connected_options_builder.add_weights_hidden_size(
+ operation.bcq_fully_connected_options().weights_hidden_size());
+ bcq_fully_connected_options_builder.add_fused_activation_function(
+ as_circle_activation(operation.bcq_fully_connected_options().activation()));
+
+ return bcq_fully_connected_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+BCQFullyConnectedChefFactory::create(const circlechef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new BCQFullyConnectedChef{operation}};
+}
diff --git a/compiler/circlechef/core/src/Op/BCQFullyConnected.h b/compiler/circlechef/core/src/Op/BCQFullyConnected.h
new file mode 100644
index 000000000..41e6b53d5
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BCQFullyConnected.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_BCQFULLYCONNECTED_H__
+#define __OP_BCQFULLYCONNECTED_H__
+
+#include "OpChef.h"
+
+class BCQFullyConnectedChef final : public OpChef
+{
+public:
+ explicit BCQFullyConnectedChef(const circlechef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ circle::BuiltinOperator code(void) const override
+ {
+ return circle::BuiltinOperator_BCQ_FULLY_CONNECTED;
+ }
+
+ circle::BuiltinOptions type(void) const override
+ {
+ return circle::BuiltinOptions_BCQFullyConnectedOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const circlechef::Operation *_operation;
+};
+
+struct BCQFullyConnectedChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const circlechef::Operation *operation) const override;
+};
+
+#endif // __OP_BCQFULLYCONNECTED_H__
diff --git a/compiler/circlechef/core/src/Op/BCQGather.cpp b/compiler/circlechef/core/src/Op/BCQGather.cpp
new file mode 100644
index 000000000..08f6f611f
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BCQGather.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BCQGather.h"
+
+flatbuffers::Offset<void> BCQGatherChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_bcq_gather_options());
+
+ circle::BCQGatherOptionsBuilder bcq_gather_options_builder{fbb};
+ bcq_gather_options_builder.add_input_hidden_size(
+ operation.bcq_gather_options().input_hidden_size());
+ bcq_gather_options_builder.add_axis(operation.bcq_gather_options().axis());
+
+ return bcq_gather_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> BCQGatherChefFactory::create(const circlechef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new BCQGatherChef{operation}};
+}
diff --git a/compiler/circlechef/core/src/Op/BCQGather.h b/compiler/circlechef/core/src/Op/BCQGather.h
new file mode 100644
index 000000000..24a797a41
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BCQGather.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_BCQGATHER_H__
+#define __OP_BCQGATHER_H__
+
+#include "OpChef.h"
+
+class BCQGatherChef final : public OpChef
+{
+public:
+ explicit BCQGatherChef(const circlechef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ circle::BuiltinOperator code(void) const override { return circle::BuiltinOperator_BCQ_GATHER; }
+
+ circle::BuiltinOptions type(void) const override
+ {
+ return circle::BuiltinOptions_BCQGatherOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const circlechef::Operation *_operation;
+};
+
+struct BCQGatherChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const circlechef::Operation *operation) const override;
+};
+
+#endif // __OP_BCQGATHER_H__
diff --git a/compiler/circlechef/core/src/Op/BatchMatMul.cpp b/compiler/circlechef/core/src/Op/BatchMatMul.cpp
new file mode 100644
index 000000000..d98c0801a
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BatchMatMul.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMul.h"
+
+flatbuffers::Offset<void> BatchMatMulChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_batch_matmul_options());
+
+ circle::BatchMatMulOptionsBuilder batch_matmul_options_options_builder{fbb};
+ batch_matmul_options_options_builder.add_adjoint_lhs(
+ operation.batch_matmul_options().adjoint_lhs());
+ batch_matmul_options_options_builder.add_adjoint_rhs(
+ operation.batch_matmul_options().adjoint_rhs());
+
+ return batch_matmul_options_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> BatchMatMulChefFactory::create(const circlechef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new BatchMatMulChef{operation}};
+}
diff --git a/compiler/circlechef/core/src/Op/BatchMatMul.h b/compiler/circlechef/core/src/Op/BatchMatMul.h
new file mode 100644
index 000000000..fbb411eff
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/BatchMatMul.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_BATCH_MATMUL_H__
+#define __OP_BATCH_MATMUL_H__
+
+#include "OpChef.h"
+
+class BatchMatMulChef final : public OpChef
+{
+public:
+ explicit BatchMatMulChef(const circlechef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ circle::BuiltinOperator code(void) const override { return circle::BuiltinOperator_BATCH_MATMUL; }
+
+ circle::BuiltinOptions type(void) const override
+ {
+ return circle::BuiltinOptions_BatchMatMulOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const circlechef::Operation *_operation;
+};
+
+struct BatchMatMulChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const circlechef::Operation *operation) const override;
+};
+
+#endif // __OP_BATCH_MATMUL_H__
diff --git a/compiler/circlechef/core/src/Op/InstanceNorm.cpp b/compiler/circlechef/core/src/Op/InstanceNorm.cpp
new file mode 100644
index 000000000..115eceffc
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/InstanceNorm.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InstanceNorm.h"
+
+#include "Convert.h"
+
+flatbuffers::Offset<void> InstanceNormChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_instance_norm_options());
+ auto circle_activation = as_circle_activation(operation.instance_norm_options().activation());
+
+ circle::InstanceNormOptionsBuilder options_builder{fbb};
+ options_builder.add_epsilon(operation.instance_norm_options().epsilon());
+ options_builder.add_fused_activation_function(circle_activation);
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+InstanceNormChefFactory::create(const circlechef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new InstanceNormChef{operation}};
+}
diff --git a/compiler/circlechef/core/src/Op/InstanceNorm.h b/compiler/circlechef/core/src/Op/InstanceNorm.h
new file mode 100644
index 000000000..f36b5d7b9
--- /dev/null
+++ b/compiler/circlechef/core/src/Op/InstanceNorm.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_INSTANCE_NORM_H__
+#define __OP_INSTANCE_NORM_H__
+
+#include "OpChef.h"
+
+class InstanceNormChef final : public OpChef
+{
+public:
+ explicit InstanceNormChef(const circlechef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ circle::BuiltinOperator code(void) const override
+ {
+ return circle::BuiltinOperator_INSTANCE_NORM;
+ }
+
+ circle::BuiltinOptions type(void) const override
+ {
+ return circle::BuiltinOptions_InstanceNormOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const circlechef::Operation *_operation;
+};
+
+struct InstanceNormChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const circlechef::Operation *operation) const override;
+};
+
+#endif // __OP_INSTANCE_NORM_H__
diff --git a/compiler/circlechef/core/src/OpChef.def b/compiler/circlechef/core/src/OpChef.def
new file mode 100644
index 000000000..3128d3ba2
--- /dev/null
+++ b/compiler/circlechef/core/src/OpChef.def
@@ -0,0 +1,10 @@
+#ifndef OP_CHEF
+#error "Define OP first"
+#endif // OP_CHEF
+
+// Please keep the list in alphabetical order
+// OP_CHEF(NAME, FACTORY_CLASS)
+OP_CHEF(BatchMatMul, BatchMatMulChefFactory)
+OP_CHEF(BCQFullyConnected, BCQFullyConnectedChefFactory)
+OP_CHEF(BCQGather, BCQGatherChefFactory)
+OP_CHEF(InstanceNorm, InstanceNormChefFactory)
diff --git a/compiler/circlechef/core/src/OpChef.h b/compiler/circlechef/core/src/OpChef.h
new file mode 100644
index 000000000..3479e51ef
--- /dev/null
+++ b/compiler/circlechef/core/src/OpChef.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CHEF_H__
+#define __OP_CHEF_H__
+
+#include <circlechef.pb.h>
+#include <mio/circle/schema_generated.h>
+
+#include <memory>
+
+struct OpChef
+{
+ virtual ~OpChef() = default;
+
+ virtual circle::BuiltinOperator code(void) const = 0;
+ virtual circle::BuiltinOptions type(void) const = 0;
+ virtual flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const = 0;
+
+ // TODO Find a way to place this method in a better place
+ virtual flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+ {
+ return flatbuffers::Offset<flatbuffers::Vector<uint8_t>>();
+ }
+};
+
+struct OpChefFactory
+{
+ virtual ~OpChefFactory() = default;
+
+ virtual std::unique_ptr<OpChef> create(const circlechef::Operation *operation) const = 0;
+};
+
+#endif // __OP_CHEF_H__
diff --git a/compiler/circlechef/core/src/OpChefs.h b/compiler/circlechef/core/src/OpChefs.h
new file mode 100644
index 000000000..e13c5e0c6
--- /dev/null
+++ b/compiler/circlechef/core/src/OpChefs.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CHEFS_H__
+#define __OP_CHEFS_H__
+
+#include "Op/BatchMatMul.h"
+#include "Op/BCQFullyConnected.h"
+#include "Op/BCQGather.h"
+#include "Op/InstanceNorm.h"
+
+#endif // __OP_CHEFS_H__
diff --git a/compiler/circlechef/log/CMakeLists.txt b/compiler/circlechef/log/CMakeLists.txt
new file mode 100644
index 000000000..6527ca7d8
--- /dev/null
+++ b/compiler/circlechef/log/CMakeLists.txt
@@ -0,0 +1,7 @@
+# TODO Find how to test logging framework
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(circlechef_log STATIC ${SOURCES})
+target_include_directories(circlechef_log PUBLIC include)
+target_link_libraries(circlechef_log PUBLIC hermes)
+target_link_libraries(circlechef_log PRIVATE hermes_std)
diff --git a/compiler/circlechef/log/include/Log.h b/compiler/circlechef/log/include/Log.h
new file mode 100644
index 000000000..ef00b26cc
--- /dev/null
+++ b/compiler/circlechef/log/include/Log.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLECHEF_LOG_H__
+#define __CIRCLECHEF_LOG_H__
+
+#include <hermes.h>
+
+namespace circlechef
+{
+
+/**
+ * @brief Logger Implementation
+ */
+class Logger final : public hermes::Source
+{
+public:
+ Logger(hermes::Context *ctx);
+ ~Logger();
+};
+
+/**
+ * @brief Logger Configuration
+ *
+ * Users are able to turn logging on/off via CIRCLECHEF_LOG environment variable.
+ */
+class LoggerConfig final : public hermes::Config
+{
+public:
+ LoggerConfig();
+
+public:
+ void configure(const hermes::Source *, hermes::Source::Setting &) const final;
+ void configure(const Logger *, hermes::Source::Setting &) const;
+
+private:
+ bool _enabled;
+};
+
+} // namespace circlechef
+
+#include "LoggingContext.h"
+
+/**
+ * HOW TO USE:
+ *
+ * LOGGER(l);
+ *
+ * INFO(l) << "Hello, World" << std::endl;
+ *
+ */
+#define LOGGER(name) ::circlechef::Logger name{::circlechef::LoggingContext::get()};
+
+// TODO Support FATAL, ERROR, WARN, and VERBOSE
+#define INFO(name) HERMES_INFO(name)
+
+// WARNING!
+//
+// THE CURRENT IMPLEMENTATION IS NOT THREAD SAFE.
+//
+
+#endif // __CIRCLECHEF_LOG_H__
diff --git a/compiler/circlechef/log/include/LoggingContext.h b/compiler/circlechef/log/include/LoggingContext.h
new file mode 100644
index 000000000..1282cfd45
--- /dev/null
+++ b/compiler/circlechef/log/include/LoggingContext.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CIRCLECHEF_LOGGING_CONTEXT_H__
+#define __CIRCLECHEF_LOGGING_CONTEXT_H__
+
+#include <hermes.h>
+
+namespace circlechef
+{
+
+/**
+ * @brief Global logging context
+ */
+struct LoggingContext
+{
+ static hermes::Context *get(void);
+};
+
+} // namespace circlechef
+
+#endif // __CIRCLECHEF_LOGGING_CONTEXT_H__
diff --git a/compiler/circlechef/log/src/Log.cpp b/compiler/circlechef/log/src/Log.cpp
new file mode 100644
index 000000000..11a60fb8d
--- /dev/null
+++ b/compiler/circlechef/log/src/Log.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Log.h"
+
+#include <cassert>
+#include <cstdlib>
+#include <iostream>
+
+// TODO Extract these lexical conversion routines as a library
+namespace
+{
+
+/**
+ * @brief Convert C-string as a value of type T
+ *
+ * safecast(s, v) returns v if s is nullptr.
+ */
+template <typename T> T safecast(const char *, const T &);
+
+template <> bool safecast<bool>(const char *s, const bool &value)
+{
+ return (s == nullptr) ? value : (std::stoi(s) != 0);
+}
+
+} // namespace
+
+//
+// Logger
+//
+namespace circlechef
+{
+
+Logger::Logger(hermes::Context *ctx) { activate(ctx->sources(), ctx->bus()); }
+Logger::~Logger() { deactivate(); }
+
+} // namespace circlechef
+
+//
+// LoggerConfig
+//
+namespace circlechef
+{
+
+LoggerConfig::LoggerConfig()
+{
+ // Turn on logging if CIRCLECHEF_LOG is set as non-zero value
+ _enabled = safecast<bool>(std::getenv("CIRCLECHEF_LOG"), false);
+}
+
+void LoggerConfig::configure(const hermes::Source *source, hermes::Source::Setting &setting) const
+{
+ // Let's ignore hermes::Sources if that is not a moco logger
+ if (auto logger = dynamic_cast<const Logger *>(source))
+ {
+ configure(logger, setting);
+ }
+}
+
+void LoggerConfig::configure(const Logger *, hermes::Source::Setting &setting) const
+{
+ if (_enabled)
+ {
+ // Enable all catagories
+ setting.accept_all();
+ }
+ else
+ {
+ // Disable all catagories
+ setting.reject_all();
+ }
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/log/src/LoggingContext.cpp b/compiler/circlechef/log/src/LoggingContext.cpp
new file mode 100644
index 000000000..b64bd3f3d
--- /dev/null
+++ b/compiler/circlechef/log/src/LoggingContext.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LoggingContext.h"
+#include "Log.h"
+
+#include <hermes/ConsoleReporter.h>
+
+#include <memory>
+
+namespace circlechef
+{
+
+hermes::Context *LoggingContext::get(void)
+{
+ static hermes::Context *ctx = nullptr;
+
+ if (ctx == nullptr)
+ {
+ ctx = new hermes::Context;
+ ctx->sinks()->append(std::make_unique<hermes::ConsoleReporter>());
+ ctx->config(std::make_unique<LoggerConfig>());
+ }
+
+ return ctx;
+}
+
+} // namespace circlechef
diff --git a/compiler/circlechef/proto/CMakeLists.txt b/compiler/circlechef/proto/CMakeLists.txt
new file mode 100644
index 000000000..3cc26de84
--- /dev/null
+++ b/compiler/circlechef/proto/CMakeLists.txt
@@ -0,0 +1,5 @@
+Protobuf_Generate(CIRCLECHEF_PROTO "${CMAKE_CURRENT_BINARY_DIR}/generated" "${CMAKE_CURRENT_SOURCE_DIR}" "circlechef.proto")
+
+add_library(circlechef_proto STATIC ${CIRCLECHEF_PROTO_SOURCES})
+target_include_directories(circlechef_proto PUBLIC ${CIRCLECHEF_PROTO_INCLUDE_DIRS})
+target_link_libraries(circlechef_proto libprotobuf)
diff --git a/compiler/circlechef/proto/circlechef.proto b/compiler/circlechef/proto/circlechef.proto
new file mode 100644
index 000000000..b8c009b38
--- /dev/null
+++ b/compiler/circlechef/proto/circlechef.proto
@@ -0,0 +1,110 @@
+syntax = "proto2";
+
+package circlechef;
+
+//
+// Initial version
+// - Our initial version
+//
+// Version 1
+// - Backward compatible with Initial version
+// - Added Graph to represent sub graphs
+// - Added name, version(default as 1), graph in ModelRecipe
+//
+
+// This enum value corresponds to TensorType in TensorFlow Lite schema
+enum TensorType {
+ FLOAT32 = 0;
+ INT32 = 2;
+ UINT8 = 3;
+ INT64 = 4;
+ BOOL = 6;
+}
+
+message TensorShape {
+ repeated uint32 dim = 3;
+}
+
+message TensorFiller {
+ optional string tag = 1;
+ repeated string arg = 2;
+}
+
+message TensorQuantization {
+ repeated float min = 1;
+ repeated float max = 2;
+ repeated float scale = 3;
+ repeated int64 zero_point = 4;
+}
+
+message Operand {
+ optional string name = 1;
+ optional TensorType type = 2;
+ optional TensorShape shape = 3;
+ optional TensorFiller filler = 4;
+ optional TensorQuantization quant = 5;
+}
+
+// This enum value corresponds to Padding in TensorFlow Lite schema
+enum Padding {
+ SAME = 0;
+ VALID = 1;
+}
+
+// This enum value corresponds to ActivationFunctionType in TensorFlow Lite schema
+enum Activation {
+ NONE = 0;
+ RELU = 1;
+ RELU6 = 3;
+}
+
+message BatchMatMulOptions {
+ optional bool adjoint_lhs = 1 [default = false];
+ optional bool adjoint_rhs = 2 [default = false];
+}
+
+message InstanceNormOptions {
+ optional float epsilon = 1 [default = 1e-05];
+ optional Activation activation = 2 [default = NONE];
+}
+
+message BCQFullyConnectedOptions {
+ optional int32 weights_hidden_size = 1 [default = 0];
+ optional Activation activation = 2 [default = NONE];
+}
+
+message BCQGatherOptions {
+ optional int32 input_hidden_size = 1 [default = 0];
+ optional int32 axis = 2 [default = 0];
+}
+
+message Operation {
+ optional string type = 1;
+ repeated string input = 2;
+ repeated string output = 3;
+ optional int32 version = 4 [default = 1];
+
+ optional BatchMatMulOptions batch_matmul_options = 100;
+ optional InstanceNormOptions instance_norm_options = 101;
+ optional BCQFullyConnectedOptions bcq_fully_connected_options = 102;
+ optional BCQGatherOptions bcq_gather_options = 103;
+}
+
+// For additional subgraphs
+message Graph {
+ repeated Operand operand = 1;
+ repeated Operation operation = 2;
+ repeated string input = 3;
+ repeated string output = 4;
+ optional string name = 5;
+}
+
+message ModelRecipe {
+ repeated Operand operand = 1;
+ repeated Operation operation = 2;
+ repeated string input = 3;
+ repeated string output = 4;
+ optional string name = 5;
+ optional uint32 version = 6 [default = 1];
+ repeated Graph graph = 7;
+}
diff --git a/compiler/circlechef/requires.cmake b/compiler/circlechef/requires.cmake
new file mode 100644
index 000000000..2106146d7
--- /dev/null
+++ b/compiler/circlechef/requires.cmake
@@ -0,0 +1,9 @@
+require("arser")
+require("nnkit")
+require("cwrap")
+require("mio-circle")
+require("safemain")
+require("hermes")
+require("hermes-std")
+require("foder")
+require("souschef")
diff --git a/compiler/circlechef/tests/CMakeLists.txt b/compiler/circlechef/tests/CMakeLists.txt
new file mode 100644
index 000000000..4dc58addf
--- /dev/null
+++ b/compiler/circlechef/tests/CMakeLists.txt
@@ -0,0 +1,70 @@
+nncc_find_resource(CircleRecipes)
+set(CIRCLERECIPES_DIR "${CircleRecipes_DIR}")
+
+file(GLOB RECIPES RELATIVE ${CIRCLERECIPES_DIR} "${CIRCLERECIPES_DIR}/*/test.recipe")
+
+foreach(RECIPE IN ITEMS ${RECIPES})
+ get_filename_component(RECIPE_PREFIX ${RECIPE} DIRECTORY)
+
+ set(RECIPE_SOURCE_FILE "${RECIPE_PREFIX}.recipe")
+ set(RECIPE_OUTPUT_FILE "${RECIPE_PREFIX}.circle")
+
+ # Copy .recipe
+ add_custom_command(OUTPUT ${RECIPE_SOURCE_FILE}
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ "${CIRCLERECIPES_DIR}/${RECIPE}" ${RECIPE_SOURCE_FILE}
+ DEPENDS "${CIRCLERECIPES_DIR}/${RECIPE}"
+ COMMENT "Generating ${RECIPE_SOURCE_FILE}")
+
+ # Generate .circle
+ add_custom_command(OUTPUT ${RECIPE_OUTPUT_FILE}
+ COMMAND circlechef-file ${RECIPE_SOURCE_FILE} ${RECIPE_OUTPUT_FILE}
+ DEPENDS circlechef-file ${RECIPE_SOURCE_FILE}
+ COMMENT "Generating ${RECIPE_OUTPUT_FILE}")
+
+ list(APPEND TESTS ${RECIPE_PREFIX})
+ list(APPEND TESTFILES ${RECIPE_OUTPUT_FILE})
+endforeach(RECIPE)
+
+#Test circlechef-reverse
+file(GLOB GEN_CIRCLEFILES RELATIVE ${CIRCLERECIPES_DIR} "${CIRCLERECIPES_DIR}/*/test.reverse")
+# Note: While in development, circlechef-reverse may not handle the operator.
+# To separate this linkage scan empty test.reverse for test targets for circlechef-reverse.
+
+foreach(CIRCLEFILE IN ITEMS ${GEN_CIRCLEFILES})
+ get_filename_component(CIRCLE_PREFIX ${CIRCLEFILE} DIRECTORY)
+
+ # file from above circlechef-file block
+ # use circle file as input of circlechef-reverse generated from circlechef-file
+ set(RECIPE_OUTPUT_FILE "${CIRCLE_PREFIX}.circle")
+ set(RECIPE_GEN_OUTPUT_FILE "${CIRCLE_PREFIX}.gen.recipe")
+ set(RECIPE_GEN_OUTPUT_FILE2 "${CIRCLE_PREFIX}.gen.circle")
+
+ # Generate .gen.recipe from generated .circle
+ add_custom_command(OUTPUT ${RECIPE_GEN_OUTPUT_FILE}
+ COMMAND circlechef-reverse ${RECIPE_OUTPUT_FILE} ${RECIPE_GEN_OUTPUT_FILE}
+ DEPENDS circlechef-reverse ${RECIPE_OUTPUT_FILE}
+ COMMENT "Generating ${RECIPE_GEN_OUTPUT_FILE}")
+
+ # now we are going to generate .gen.circle from .gen.recipe
+ # to check generated .gen.recipe file is correct by using it.
+ # as weight values may be different, binary comparision is not acceptable.
+ add_custom_command(OUTPUT ${RECIPE_GEN_OUTPUT_FILE2}
+ COMMAND circlechef-file ${RECIPE_GEN_OUTPUT_FILE} ${RECIPE_GEN_OUTPUT_FILE2}
+ DEPENDS circlechef-file ${RECIPE_GEN_OUTPUT_FILE}
+ COMMENT "Generating ${RECIPE_GEN_OUTPUT_FILE2}")
+
+ list(APPEND TESTS ${CIRCLE_PREFIX}.gen)
+ list(APPEND TESTFILES ${RECIPE_GEN_OUTPUT_FILE2})
+endforeach(CIRCLEFILE)
+
+# Add a dummy target to create a target-level dependency.
+# TODO Find a way to create a dependency between circlechef_test and generated testfiles.
+add_custom_target(circlechef_testfiles ALL DEPENDS ${TESTFILES})
+
+# Using circle_verify for temporary as it only calls flatbuffer validate
+# TODO do testing with running the model with runtime/interpreter
+add_test(NAME circlechef_test
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/runvalidate.sh"
+ $<TARGET_FILE:circle-verify>
+ ${TESTS})
diff --git a/compiler/circlechef/tests/runvalidate.sh b/compiler/circlechef/tests/runvalidate.sh
new file mode 100755
index 000000000..46ad125ae
--- /dev/null
+++ b/compiler/circlechef/tests/runvalidate.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+if [[ $# -le 2 ]]; then
+ echo "USAGE: $0 [circle-verify path] [prefix 0] "
+ exit 255
+fi
+
+CIRCLE_VERIFY_PATH="$1"; shift
+
+echo "-- Found circle-verify: ${CIRCLE_VERIFY_PATH}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${PASSED_TAG}"
+
+ cat > "${PREFIX}.log" <(
+ exec 2>&1
+
+ echo "'${CIRCLE_VERIFY_PATH}' '${PREFIX}.circle'"
+ "${CIRCLE_VERIFY_PATH}" "${PREFIX}.circle"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ else
+ FAILED+=("$PREFIX")
+ fi
+done
+popd
+
+echo "SUMMARY: ${#PASSED[@]} PASS AND ${#FAILED[@]} FAIL AMONG ${#TESTED[@]} TESTS"
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+exit 0
diff --git a/compiler/circlechef/tools/CMakeLists.txt b/compiler/circlechef/tools/CMakeLists.txt
new file mode 100644
index 000000000..c958614b2
--- /dev/null
+++ b/compiler/circlechef/tools/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Console-based tool (circlechef)
+add_subdirectory(console)
+# File-based tool (circlechef-file)
+add_subdirectory(file)
+# Reverse tool to generate recipe from circle (circlechef-reverse)
+add_subdirectory(reverse)
diff --git a/compiler/circlechef/tools/console/CMakeLists.txt b/compiler/circlechef/tools/console/CMakeLists.txt
new file mode 100644
index 000000000..10168fca3
--- /dev/null
+++ b/compiler/circlechef/tools/console/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_executable(circlechef Driver.cpp)
+target_link_libraries(circlechef circlechef_core)
+target_link_libraries(circlechef safemain)
diff --git a/compiler/circlechef/tools/console/Driver.cpp b/compiler/circlechef/tools/console/Driver.cpp
new file mode 100644
index 000000000..0909f5927
--- /dev/null
+++ b/compiler/circlechef/tools/console/Driver.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "circlechef/ModelChef.h"
+
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/text_format.h>
+
+#include <iostream>
+
+int entry(int argc, char **argv)
+{
+ int32_t model_version = 1;
+
+ ::circlechef::ModelRecipe model_recipe;
+
+ // Read a model recipe from standard input
+ {
+ google::protobuf::io::IstreamInputStream iis{&std::cin};
+ if (!google::protobuf::TextFormat::Parse(&iis, &model_recipe))
+ {
+ std::cerr << "ERROR: Failed to parse recipe" << std::endl;
+ return 255;
+ }
+
+ if (model_recipe.has_version())
+ {
+ model_version = model_recipe.version();
+ }
+ }
+
+ if (model_version > 1)
+ {
+ std::cerr << "ERROR: Unsupported recipe version: " << model_version << std::endl;
+ return 255;
+ }
+
+ auto generated_model = circlechef::cook(model_recipe);
+
+ // Write a generated model into standard output
+ std::cout.write(generated_model.base(), generated_model.size());
+
+ return 0;
+}
diff --git a/compiler/circlechef/tools/file/CMakeLists.txt b/compiler/circlechef/tools/file/CMakeLists.txt
new file mode 100644
index 000000000..2524a657c
--- /dev/null
+++ b/compiler/circlechef/tools/file/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_executable(circlechef-file Driver.cpp)
+target_link_libraries(circlechef-file arser)
+target_link_libraries(circlechef-file circlechef_core)
+target_link_libraries(circlechef-file safemain)
diff --git a/compiler/circlechef/tools/file/Driver.cpp b/compiler/circlechef/tools/file/Driver.cpp
new file mode 100644
index 000000000..a15da4002
--- /dev/null
+++ b/compiler/circlechef/tools/file/Driver.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "circlechef/ModelChef.h"
+
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/text_format.h>
+
+#include <arser/arser.h>
+
+#include <fstream>
+#include <iostream>
+
+int entry(int argc, char **argv)
+{
+ arser::Arser arser;
+ arser.add_argument("recipe")
+ .type(arser::DataType::STR)
+ .help("Source recipe file path to convert");
+ arser.add_argument("circle").type(arser::DataType::STR).help("Target circle file path");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
+ }
+
+ int32_t model_version = 1;
+
+ ::circlechef::ModelRecipe model_recipe;
+
+ std::string recipe_path = arser.get<std::string>("recipe");
+ // Load model recipe from a file
+ {
+ std::ifstream is{recipe_path};
+ google::protobuf::io::IstreamInputStream iis{&is};
+ if (!google::protobuf::TextFormat::Parse(&iis, &model_recipe))
+ {
+ std::cerr << "ERROR: Failed to parse recipe '" << recipe_path << "'" << std::endl;
+ return 255;
+ }
+
+ if (model_recipe.has_version())
+ {
+ model_version = model_recipe.version();
+ }
+ }
+
+ if (model_version > 1)
+ {
+ std::cerr << "ERROR: Unsupported recipe version: " << model_version << ", '" << recipe_path
+ << "'" << std::endl;
+ return 255;
+ }
+
+ auto generated_model = circlechef::cook(model_recipe);
+
+ std::string circle_path = arser.get<std::string>("circle");
+ // Dump generated model into a file
+ {
+ std::ofstream os{circle_path, std::ios::binary};
+ os.write(generated_model.base(), generated_model.size());
+ }
+
+ return 0;
+}
diff --git a/compiler/circlechef/tools/reverse/CMakeLists.txt b/compiler/circlechef/tools/reverse/CMakeLists.txt
new file mode 100644
index 000000000..a1606c94e
--- /dev/null
+++ b/compiler/circlechef/tools/reverse/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_executable(circlechef-reverse Driver.cpp)
+target_link_libraries(circlechef-reverse arser)
+target_link_libraries(circlechef-reverse circlechef_circle)
+target_link_libraries(circlechef-reverse safemain)
+target_link_libraries(circlechef-reverse foder)
diff --git a/compiler/circlechef/tools/reverse/Driver.cpp b/compiler/circlechef/tools/reverse/Driver.cpp
new file mode 100644
index 000000000..9c0b9ea24
--- /dev/null
+++ b/compiler/circlechef/tools/reverse/Driver.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <circlechef/RecipeChef.h>
+
+#include <arser/arser.h>
+#include <foder/FileLoader.h>
+
+#include <memory>
+#include <iostream>
+
+int entry(int argc, char **argv)
+{
+ arser::Arser arser;
+ arser.add_argument("circle")
+ .type(arser::DataType::STR)
+ .help("Source circle file path to convert");
+ arser.add_argument("recipe").type(arser::DataType::STR).help("Target recipe file path");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
+ }
+
+ std::string circle_path = arser.get<std::string>("circle");
+ // Load TF lite model from a circle file
+ const foder::FileLoader fileLoader{circle_path};
+ std::vector<char> modelData = fileLoader.load();
+ const circle::Model *circlemodel = circle::GetModel(modelData.data());
+ if (circlemodel == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load circle '" << circle_path << "'" << std::endl;
+ return 255;
+ }
+
+ // Generate ModelRecipe recipe
+ std::unique_ptr<circlechef::ModelRecipe> recipe = circlechef::generate_recipe(circlemodel);
+ if (recipe.get() == nullptr)
+ {
+ std::cerr << "ERROR: Failed to generate recipe" << std::endl;
+ return 255;
+ }
+
+ std::string recipe_path = arser.get<std::string>("recipe");
+ // Save to a file
+ bool result = circlechef::write_recipe(recipe_path, recipe);
+ if (!result)
+ {
+ std::cerr << "ERROR: Failed to write to recipe '" << recipe_path << "'" << std::endl;
+ return 255;
+ }
+ return 0;
+}
diff --git a/compiler/circledump/CMakeLists.txt b/compiler/circledump/CMakeLists.txt
index a117e7285..fb72b1d66 100644
--- a/compiler/circledump/CMakeLists.txt
+++ b/compiler/circledump/CMakeLists.txt
@@ -8,7 +8,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(circledump ${DRIVER} ${SOURCES})
target_include_directories(circledump PRIVATE include)
+target_link_libraries(circledump arser)
target_link_libraries(circledump mio_circle)
target_link_libraries(circledump safemain)
-target_link_libraries(circledump stdex)
target_link_libraries(circledump flatbuffers)
diff --git a/compiler/circledump/driver/Driver.cpp b/compiler/circledump/driver/Driver.cpp
index 8ed88e1d8..b8f561fee 100644
--- a/compiler/circledump/driver/Driver.cpp
+++ b/compiler/circledump/driver/Driver.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <arser/arser.h>
#include <circleread/Model.h>
#include <circledump/Dump.h>
@@ -21,30 +22,37 @@
int entry(int argc, char **argv)
{
- if (argc != 2)
+ arser::Arser arser;
+ arser.add_argument("circle").type(arser::DataType::STR).help("Circle file path to dump");
+
+ try
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [circle]" << std::endl;
- return 255;
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << '\n';
+ std::cout << arser;
+ return 0;
}
+ std::string circle_path = arser.get<std::string>("circle");
// Load Circle model from a circle file
- std::unique_ptr<circleread::Model> model = circleread::load_circle(argv[1]);
+ std::unique_ptr<circleread::Model> model = circleread::load_circle(circle_path);
if (model == nullptr)
{
- std::cerr << "ERROR: Failed to load circle '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load circle '" << circle_path << "'" << std::endl;
return 255;
}
const circle::Model *circlemodel = model->model();
if (circlemodel == nullptr)
{
- std::cerr << "ERROR: Failed to load circle '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load circle '" << circle_path << "'" << std::endl;
return 255;
}
- std::cout << "Dump: " << argv[1] << std::endl << std::endl;
+ std::cout << "Dump: " << circle_path << std::endl << std::endl;
std::cout << circlemodel << std::endl;
diff --git a/compiler/circledump/requires.cmake b/compiler/circledump/requires.cmake
index b090dbd4d..81e0f0dbd 100644
--- a/compiler/circledump/requires.cmake
+++ b/compiler/circledump/requires.cmake
@@ -1,3 +1,3 @@
+require("arser")
require("mio-circle")
require("safemain")
-require("stdex")
diff --git a/compiler/circledump/src/Dump.cpp b/compiler/circledump/src/Dump.cpp
index 3d99189f9..c695b0721 100644
--- a/compiler/circledump/src/Dump.cpp
+++ b/compiler/circledump/src/Dump.cpp
@@ -123,8 +123,8 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
os << std::endl;
// dump operands(tensors)
- os << "Operands: T(subgraph index : tensor index) TYPE (shape) B(buffer index) OperandName"
- << std::endl;
+ os << "Operands: T(subgraph index : tensor index) TYPE (shape) (shape_signature) "
+ << "B(buffer index) OperandName" << std::endl;
for (uint32_t i = 0; i < tensors->Length(); ++i)
{
// TODO refactor to some better structure
@@ -137,6 +137,11 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
os << "T(" << reader.subgraph_index() << ":" << i << ") " << circleread::tensor_type(tensor)
<< " ";
os << "(" << dims << ") ";
+ if (tensor->shape_signature())
+ {
+ std::vector<int32_t> dims_sig = circleread::as_index_vector(tensor->shape_signature());
+ os << "(" << dims_sig << ") ";
+ }
os << "B(" << tensor->buffer() << ") ";
os << circleread::tensor_name(tensor) << std::endl;
@@ -167,7 +172,12 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
os << std::endl << strqindent;
}
if (q_params->zero_point())
+ {
os << "zeropt(" << q_params->zero_point() << ") ";
+ if (q_params->zero_point()->size() > 1)
+ os << std::endl << strqindent;
+ }
+ os << "quantized_dimension(" << q_params->quantized_dimension() << ")";
os << std::endl;
}
@@ -199,7 +209,7 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
for (auto input : inputs)
{
- os << " I T(" << input << ") ";
+ os << " I T(" << reader.subgraph_index() << ":" << input << ") ";
if (input >= 0)
{
auto tensor = tensors->Get(input);
@@ -209,7 +219,7 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
}
for (auto output : outputs)
{
- os << " O T(" << output << ") ";
+ os << " O T(" << reader.subgraph_index() << ":" << output << ") ";
if (output >= 0)
{
auto tensor = tensors->Get(output);
@@ -227,14 +237,14 @@ void dump_sub_graph(std::ostream &os, circleread::Reader &reader)
{
auto tensor = tensors->Get(input);
std::string name = circleread::tensor_name(tensor);
- os << "I T(" << input << ") " << name << std::endl;
+ os << "I T(" << reader.subgraph_index() << ":" << input << ") " << name << std::endl;
}
for (const auto output : reader.outputs())
{
auto tensor = tensors->Get(output);
std::string name = circleread::tensor_name(tensor);
- os << "O T(" << output << ") " << name << std::endl;
+ os << "O T(" << reader.subgraph_index() << ":" << output << ") " << name << std::endl;
}
os << std::endl;
diff --git a/compiler/circledump/src/OpPrinter.cpp b/compiler/circledump/src/OpPrinter.cpp
index f9daab494..2c0320396 100644
--- a/compiler/circledump/src/OpPrinter.cpp
+++ b/compiler/circledump/src/OpPrinter.cpp
@@ -17,11 +17,11 @@
#include "OpPrinter.h"
#include "Read.h"
-#include <stdex/Memory.h>
+#include <memory>
#include <flatbuffers/flexbuffers.h>
-using stdex::make_unique;
+using std::make_unique;
namespace circledump
{
@@ -60,6 +60,51 @@ public:
}
};
+class ArgMinPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_ArgMinOptions())
+ {
+ os << " ";
+ os << "OutputType(" << EnumNameTensorType(params->output_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class BatchMatMulPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_BatchMatMulOptions())
+ {
+ os << " ";
+ os << std::boolalpha;
+ os << "adjoint_lhs(" << params->adjoint_lhs() << ") ";
+ os << "adjoint_rhs(" << params->adjoint_rhs() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class CastPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto cast_params = op->builtin_options_as_CastOptions())
+ {
+ os << " ";
+ os << "in_data_type(" << circle::EnumNameTensorType(cast_params->in_data_type()) << ") ";
+ os << "out_data_type(" << circle::EnumNameTensorType(cast_params->out_data_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class Conv2DPrinter : public OpPrinter
{
public:
@@ -71,6 +116,8 @@ public:
os << "Padding(" << conv_params->padding() << ") ";
os << "Stride.W(" << conv_params->stride_w() << ") ";
os << "Stride.H(" << conv_params->stride_h() << ") ";
+ os << "Dilation.W(" << conv_params->dilation_w_factor() << ") ";
+ os << "Dilation.H(" << conv_params->dilation_h_factor() << ") ";
os << "Activation("
<< EnumNameActivationFunctionType(conv_params->fused_activation_function()) << ")";
os << std::endl;
@@ -78,6 +125,20 @@ public:
}
};
+class DepthToSpacePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_DepthToSpaceOptions())
+ {
+ os << " ";
+ os << "BlockSize(" << std_params->block_size() << ")";
+ os << std::endl;
+ }
+ }
+};
+
class DivPrinter : public OpPrinter
{
public:
@@ -130,6 +191,20 @@ public:
}
};
+class ReducerPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto reducer_params = op->builtin_options_as_ReducerOptions())
+ {
+ os << " ";
+ os << "keep_dims(" << reducer_params->keep_dims() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class ReshapePrinter : public OpPrinter
{
public:
@@ -145,6 +220,52 @@ public:
}
};
+class ResizeBilinearPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *resize_params = op->builtin_options_as_ResizeBilinearOptions())
+ {
+ os << " ";
+ os << std::boolalpha;
+ os << "align_corners(" << resize_params->align_corners() << ")";
+ os << "half_pixel_centers(" << resize_params->half_pixel_centers() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class ResizeNearestNeighborPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *resize_params = op->builtin_options_as_ResizeNearestNeighborOptions())
+ {
+ os << " ";
+ os << std::boolalpha;
+ os << "align_corners(" << resize_params->align_corners() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class ReverseSequencePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_ReverseSequenceOptions())
+ {
+ os << " ";
+ os << "seq_dim(" << params->seq_dim() << ") ";
+ os << "batch_dim(" << params->batch_dim() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class DepthwiseConv2DPrinter : public OpPrinter
{
public:
@@ -184,6 +305,95 @@ public:
}
};
+class GatherPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_GatherOptions())
+ {
+ os << " ";
+ os << "Axis(" << params->axis() << ") ";
+
+ os << std::endl;
+ }
+ }
+};
+
+class IfPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_IfOptions())
+ {
+ os << " ";
+ os << "then_subgraph_index(" << params->then_subgraph_index() << ") ";
+ os << "else_subgraph_index(" << params->else_subgraph_index() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class L2NormPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_L2NormOptions())
+ {
+ os << " ";
+ os << "Activation(" << EnumNameActivationFunctionType(params->fused_activation_function())
+ << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class LeakyReluPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_LeakyReluOptions())
+ {
+ os << " ";
+ os << "alpha(" << params->alpha() << ") ";
+ }
+ }
+};
+
+class LocalResponseNormalizationPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_LocalResponseNormalizationOptions())
+ {
+ os << " ";
+ os << "radius(" << params->radius() << ") ";
+ os << "bias(" << params->bias() << ") ";
+ os << "alpha(" << params->alpha() << ") ";
+ os << "beta(" << params->beta() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class MirrorPadPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_MirrorPadOptions())
+ {
+ os << " ";
+ os << "mode(" << EnumNameMirrorPadMode(params->mode()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class MulPrinter : public OpPrinter
{
public:
@@ -199,6 +409,21 @@ public:
}
};
+class OneHotPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_OneHotOptions())
+ {
+ os << " ";
+ os << "Axis(" << params->axis() << ") ";
+
+ os << std::endl;
+ }
+ }
+};
+
class PackPrinter : public OpPrinter
{
public:
@@ -214,6 +439,20 @@ public:
}
};
+class ShapePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_ShapeOptions())
+ {
+ os << " ";
+ os << "out_type(" << EnumNameTensorType(params->out_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class SoftmaxPrinter : public OpPrinter
{
public:
@@ -228,6 +467,101 @@ public:
}
};
+class SpaceToDepthPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_SpaceToDepthOptions())
+ {
+ os << " ";
+ os << "BlockSize(" << std_params->block_size() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class SparseToDensePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_SparseToDenseOptions())
+ {
+ os << " ";
+ os << "ValidateIndices(" << std_params->validate_indices() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class SplitPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SplitOptions())
+ {
+ os << " ";
+ os << "num_splits(" << params->num_splits() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class SplitVPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SplitVOptions())
+ {
+ os << " ";
+ os << "num_splits(" << params->num_splits() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class SqueezePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SqueezeOptions())
+ {
+ os << " ";
+ os << "SqueezeDims(";
+ for (int i = 0; i < params->squeeze_dims()->size(); ++i)
+ {
+ if (i != 0)
+ os << ", ";
+ os << params->squeeze_dims()->Get(i);
+ }
+ os << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class StridedSlicePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *strided_slice_params = op->builtin_options_as_StridedSliceOptions())
+ {
+ os << " ";
+ os << "begin_mask(" << strided_slice_params->begin_mask() << ") ";
+ os << "end_mask(" << strided_slice_params->end_mask() << ") ";
+ os << "ellipsis_mask(" << strided_slice_params->ellipsis_mask() << ") ";
+ os << "new_axis_mask(" << strided_slice_params->new_axis_mask() << ") ";
+ os << "shrink_axis_mask(" << strided_slice_params->shrink_axis_mask() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class SubPrinter : public OpPrinter
{
public:
@@ -243,6 +577,37 @@ public:
}
};
+class TransposeConvPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto conv_params = op->builtin_options_as_TransposeConvOptions())
+ {
+ os << " ";
+ os << "Padding(" << conv_params->padding() << ") ";
+ os << "Stride.W(" << conv_params->stride_w() << ") ";
+ os << "Stride.H(" << conv_params->stride_h() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class WhilePrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_WhileOptions())
+ {
+ os << " ";
+ os << "cond_subgraph_index(" << params->cond_subgraph_index() << ") ";
+ os << "body_subgraph_index(" << params->body_subgraph_index() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class CustomOpPrinter : public OpPrinter
{
public:
@@ -283,25 +648,108 @@ public:
}
};
+class BCQFullyConnectedPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_BCQFullyConnectedOptions())
+ {
+ os << " ";
+ os << "Activation(" << EnumNameActivationFunctionType(params->fused_activation_function())
+ << ") ";
+ os << "weights_hidden_size(" << params->weights_hidden_size() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class BCQGatherPrinter : public OpPrinter
+{
+public:
+ void options(const circle::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_BCQGatherOptions())
+ {
+ os << " ";
+ os << "axis(" << params->axis() << ") ";
+ os << "weights_hidden_size(" << params->input_hidden_size() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
OpPrinterRegistry::OpPrinterRegistry()
{
_op_map[circle::BuiltinOperator_ADD] = make_unique<AddPrinter>();
+ // There is no Option for ADD_N
_op_map[circle::BuiltinOperator_ARG_MAX] = make_unique<ArgMaxPrinter>();
+ _op_map[circle::BuiltinOperator_ARG_MIN] = make_unique<ArgMinPrinter>();
_op_map[circle::BuiltinOperator_AVERAGE_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[circle::BuiltinOperator_BATCH_MATMUL] = make_unique<BatchMatMulPrinter>();
+ _op_map[circle::BuiltinOperator_CAST] = make_unique<CastPrinter>();
+ // There is no Option for CEIL
_op_map[circle::BuiltinOperator_CONCATENATION] = make_unique<ConcatenationPrinter>();
_op_map[circle::BuiltinOperator_CONV_2D] = make_unique<Conv2DPrinter>();
+ _op_map[circle::BuiltinOperator_DEPTH_TO_SPACE] = make_unique<DepthToSpacePrinter>();
_op_map[circle::BuiltinOperator_DEPTHWISE_CONV_2D] = make_unique<DepthwiseConv2DPrinter>();
_op_map[circle::BuiltinOperator_DIV] = make_unique<DivPrinter>();
+ // There is no Option for FLOOR
+ // There is no Option for FLOOR_MOD
_op_map[circle::BuiltinOperator_FULLY_CONNECTED] = make_unique<FullyConnectedPrinter>();
+ _op_map[circle::BuiltinOperator_GATHER] = make_unique<GatherPrinter>();
+ _op_map[circle::BuiltinOperator_IF] = make_unique<IfPrinter>();
+ _op_map[circle::BuiltinOperator_L2_NORMALIZATION] = make_unique<L2NormPrinter>();
+ _op_map[circle::BuiltinOperator_L2_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[circle::BuiltinOperator_LEAKY_RELU] = make_unique<LeakyReluPrinter>();
+ _op_map[circle::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION] =
+ make_unique<LocalResponseNormalizationPrinter>();
+ // There is no Option for LOG
+ // There is no Option for LOGISTIC
+ // There is no Option for LOG_SOFTMAX
_op_map[circle::BuiltinOperator_MAX_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[circle::BuiltinOperator_MIRROR_PAD] = make_unique<MirrorPadPrinter>();
_op_map[circle::BuiltinOperator_MUL] = make_unique<MulPrinter>();
+ _op_map[circle::BuiltinOperator_ONE_HOT] = make_unique<OneHotPrinter>();
_op_map[circle::BuiltinOperator_PACK] = make_unique<PackPrinter>();
- // There is no Option for Pad
- // There is no Option for ReLU and ReLU6
+ // There is no Option for PAD
+ // There is no Option for PRELU
+ // There is no Option for RELU
+ // There is no Option for RELU6
+ // There is no Option for RELU_N1_TO_1
+ _op_map[circle::BuiltinOperator_REDUCE_ANY] = make_unique<ReducerPrinter>();
+ _op_map[circle::BuiltinOperator_REDUCE_MAX] = make_unique<ReducerPrinter>();
+ _op_map[circle::BuiltinOperator_REDUCE_MIN] = make_unique<ReducerPrinter>();
+ _op_map[circle::BuiltinOperator_REDUCE_PROD] = make_unique<ReducerPrinter>();
_op_map[circle::BuiltinOperator_RESHAPE] = make_unique<ReshapePrinter>();
+ _op_map[circle::BuiltinOperator_RESIZE_BILINEAR] = make_unique<ResizeBilinearPrinter>();
+ _op_map[circle::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] =
+ make_unique<ResizeNearestNeighborPrinter>();
+ _op_map[circle::BuiltinOperator_REVERSE_SEQUENCE] = make_unique<ReverseSequencePrinter>();
+ // There is no Option for ROUND
+ // There is no Option for SELECT
+ // There is no Option for SELECT_V2
+ _op_map[circle::BuiltinOperator_SHAPE] = make_unique<ShapePrinter>();
+ // There is no Option for SIN
+ // There is no Option for SLICE
_op_map[circle::BuiltinOperator_SOFTMAX] = make_unique<SoftmaxPrinter>();
+ _op_map[circle::BuiltinOperator_SPACE_TO_DEPTH] = make_unique<SpaceToDepthPrinter>();
+ // There is no Option for SPACE_TO_BATCH_ND
+ _op_map[circle::BuiltinOperator_SPARSE_TO_DENSE] = make_unique<SparseToDensePrinter>();
+ _op_map[circle::BuiltinOperator_SPLIT] = make_unique<SplitPrinter>();
+ _op_map[circle::BuiltinOperator_SPLIT_V] = make_unique<SplitVPrinter>();
+ _op_map[circle::BuiltinOperator_SQUEEZE] = make_unique<SqueezePrinter>();
+ _op_map[circle::BuiltinOperator_STRIDED_SLICE] = make_unique<StridedSlicePrinter>();
_op_map[circle::BuiltinOperator_SUB] = make_unique<SubPrinter>();
+ _op_map[circle::BuiltinOperator_SUM] = make_unique<ReducerPrinter>();
+ _op_map[circle::BuiltinOperator_TRANSPOSE_CONV] = make_unique<TransposeConvPrinter>();
+ // There is no Option for TOPK_V2
+ _op_map[circle::BuiltinOperator_WHILE] = make_unique<WhilePrinter>();
_op_map[circle::BuiltinOperator_CUSTOM] = make_unique<CustomOpPrinter>();
+
+ // Circle only
+ _op_map[circle::BuiltinOperator_BCQ_FULLY_CONNECTED] = make_unique<BCQFullyConnectedPrinter>();
+ _op_map[circle::BuiltinOperator_BCQ_GATHER] = make_unique<BCQGatherPrinter>();
}
} // namespace circledump
diff --git a/compiler/common-artifacts/CMakeLists.txt b/compiler/common-artifacts/CMakeLists.txt
new file mode 100644
index 000000000..ee4191d73
--- /dev/null
+++ b/compiler/common-artifacts/CMakeLists.txt
@@ -0,0 +1,258 @@
+#[[ Generate common python virtual enviornment ]]
+find_package(PythonInterp 3 QUIET)
+find_package(PythonLibs 3 QUIET)
+
+if(NOT ${PYTHONINTERP_FOUND})
+ message(STATUS "Build common-artifacts: FALSE (Python3 is missing)")
+ return()
+endif()
+
+if(${PYTHON_VERSION_MINOR} LESS 3)
+ message(STATUS "Build common-artifacts: FALSE (You need to install Python version higher than 3.3)")
+ return()
+endif()
+
+# Create python virtual environment with tensorflow 1.13.2
+set(VIRTUALENV_OVERLAY "${NNCC_OVERLAY_DIR}/venv_1_13_2")
+
+add_custom_command(
+ OUTPUT ${VIRTUALENV_OVERLAY}
+ COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUALENV_OVERLAY}
+)
+
+# Create requirements.txt and install required pip packages
+set(REQUIREMENTS_FILE "requirements.txt")
+set(REQUIREMENTS_OVERLAY_PATH "${NNCC_OVERLAY_DIR}/${REQUIREMENTS_FILE}")
+
+add_custom_command(
+ OUTPUT ${REQUIREMENTS_OVERLAY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E echo "tensorflow==1.13.2" > ${REQUIREMENTS_OVERLAY_PATH}
+ COMMAND ${VIRTUALENV_OVERLAY}/bin/python -m pip --default-timeout=1000 install --upgrade pip setuptools
+ COMMAND ${VIRTUALENV_OVERLAY}/bin/python -m pip --default-timeout=1000 install -r ${REQUIREMENTS_OVERLAY_PATH} --upgrade
+ DEPENDS ${VIRTUALENV_OVERLAY} ${REQUIREMENTS_OVERLAY_PATH}
+)
+
+add_custom_target(common_artifacts_python_deps ALL
+ DEPENDS ${VIRTUALENV_OVERLAY} ${REQUIREMENTS_OVERLAY_PATH}
+)
+
+# TODO Create python virtual environment with tensorflow 2.3.0-rc0
+
+#[[ Generate common resources ]]
+# TODO add pbtxt
+nnas_find_package(HDF5 QUIET)
+
+if(NOT HDF5_FOUND)
+ message(STATUS "Build common-artifacts: FAILED (missing HDF5)")
+ return()
+endif(NOT HDF5_FOUND)
+
+set(SOURCES src/TestDataGenerator.cpp)
+
+add_executable(testDataGenerator ${SOURCES})
+target_include_directories(testDataGenerator PRIVATE ${HDF5_INCLUDE_DIRS})
+target_link_libraries(testDataGenerator PRIVATE ${HDF5_CXX_LIBRARIES})
+target_link_libraries(testDataGenerator PRIVATE foder)
+target_link_libraries(testDataGenerator PRIVATE luci_import)
+target_link_libraries(testDataGenerator PRIVATE luci_interpreter)
+target_link_libraries(testDataGenerator PRIVATE mio_circle)
+target_link_libraries(testDataGenerator PRIVATE safemain)
+
+unset(TEST_DEPS)
+
+# Include recipe repo
+nncc_find_resource(TensorFlowLiteRecipes)
+nncc_find_resource(CircleRecipes)
+set(TFLITE_RECIPE_REPO "${TensorFlowLiteRecipes_DIR}")
+set(CIRCLE_RECIPE_REPO "${CircleRecipes_DIR}")
+set(TEST_RECIPE_FILENAME "test.recipe")
+set(TEST_RULE_FILENAME "test.rule")
+
+set(MODEL2NNPKG "${NNAS_PROJECT_SOURCE_DIR}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh")
+# Get test case list
+unset(RECIPES)
+file(GLOB TFLITE_SUBDIR RELATIVE ${TFLITE_RECIPE_REPO} ${TFLITE_RECIPE_REPO}/*)
+foreach(DIR IN ITEMS ${TFLITE_SUBDIR})
+ if(IS_DIRECTORY ${TFLITE_RECIPE_REPO}/${DIR})
+ list(APPEND RECIPES ${DIR})
+ endif()
+endforeach()
+file(GLOB CIRCLE_SUBDIR RELATIVE ${CIRCLE_RECIPE_REPO} ${CIRCLE_RECIPE_REPO}/*)
+foreach(DIR IN ITEMS ${CIRCLE_SUBDIR})
+ if(IS_DIRECTORY ${CIRCLE_RECIPE_REPO}/${DIR})
+ list(APPEND RECIPES ${DIR})
+ endif()
+endforeach()
+
+macro(circlize NAME)
+ set(NO_CIRCLIZE_${NAME} TRUE)
+ set(NO_OPTIMIZE_${NAME} TRUE)
+ set(NO_TCGEN_${NAME} TRUE)
+endmacro()
+macro(optimize NAME)
+ set(NO_OPTIMIZE_${NAME} TRUE)
+endmacro()
+macro(tcgenerate NAME)
+ set(NO_TCGEN_${NAME} TRUE)
+endmacro()
+
+include("exclude.lst")
+
+foreach(RECIPE IN ITEMS ${RECIPES})
+ unset(OPT_FORMAT)
+ unset(MODEL_FORMAT)
+
+ set(RECIPE_FILE "${RECIPE}.recipe")
+ set(RULE_FILE "${RECIPE}.rule")
+ set(TFLITE_RECIPE_SOURCE_PATH "${TFLITE_RECIPE_REPO}/${RECIPE}/${TEST_RECIPE_FILENAME}")
+ set(CIRCLE_RECIPE_SOURCE_PATH "${CIRCLE_RECIPE_REPO}/${RECIPE}/${TEST_RECIPE_FILENAME}")
+
+ if(NOT EXISTS "${TFLITE_RECIPE_SOURCE_PATH}")
+ if(NOT EXISTS "${CIRCLE_RECIPE_SOURCE_PATH}")
+ message(FATAL_ERROR "Missing recipe of '${RECIPE}' test")
+ else()
+ # circle recipe
+ set(MODEL_FORMAT "circle")
+ set(RECIPE_SOURCE_PATH ${CIRCLE_RECIPE_SOURCE_PATH})
+ endif()
+ else()
+ # tflite recipe
+ set(MODEL_FORMAT "tflite")
+ set(RECIPE_SOURCE_PATH ${TFLITE_RECIPE_SOURCE_PATH})
+ endif()
+
+ set(TFLITE_RULE_SOURCE_PATH "${TFLITE_RECIPE_REPO}/${RECIPE}/${TEST_RULE_FILENAME}")
+ set(CIRCLE_RULE_SOURCE_PATH "${CIRCLE_RECIPE_REPO}/${RECIPE}/${TEST_RULE_FILENAME}")
+
+ unset(RULE_SOURCE_PATH)
+ if(EXISTS "${TFLITE_RULE_SOURCE_PATH}")
+ set(RULE_SOURCE_PATH ${TFLITE_RULE_SOURCE_PATH})
+ endif()
+ if(EXISTS "${CIRCLE_RULE_SOURCE_PATH}")
+ set(RULE_SOURCE_PATH ${CIRCLE_RULE_SOURCE_PATH})
+ endif()
+
+ set(RECIPE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}")
+ set(RULE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RULE_FILE}")
+
+ set(TFLITE_FILE "${RECIPE}.tflite")
+ set(TFLITE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${TFLITE_FILE}")
+ set(CIRCLE_FILE "${RECIPE}.circle")
+ set(CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${CIRCLE_FILE}")
+
+ # Copy .recipe
+ add_custom_command(OUTPUT ${RECIPE_BINARY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${RECIPE_SOURCE_PATH}" "${RECIPE_BINARY_PATH}"
+ DEPENDS ${RECIPE_SOURCE_PATH}
+ COMMENT "Generate ${RECIPE_FILE}"
+ )
+ list(APPEND TEST_DEPS ${RECIPE_BINARY_PATH})
+
+ if(DEFINED RULE_SOURCE_PATH)
+ # Copy .rule
+ add_custom_command(OUTPUT ${RULE_BINARY_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${RULE_SOURCE_PATH}" "${RULE_BINARY_PATH}"
+ DEPENDS ${RULE_SOURCE_PATH}
+ COMMENT "Generate ${RULE_FILE}"
+ )
+ list(APPEND TEST_DEPS ${RULE_BINARY_PATH})
+ endif()
+
+ if(${MODEL_FORMAT} STREQUAL "tflite")
+ # Generate .tflite
+ add_custom_command(OUTPUT ${TFLITE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:tflchef-file> ${RECIPE_BINARY_PATH} ${TFLITE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:tflchef-file> ${RECIPE_BINARY_PATH}
+ COMMENT "Generate ${TFLITE_FILE}"
+ )
+ list(APPEND TEST_DEPS ${TFLITE_OUTPUT_PATH})
+
+ if(NOT DEFINED NO_CIRCLIZE_${RECIPE})
+ # Generate .circle
+ add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:tflite2circle> ${TFLITE_OUTPUT_PATH} ${CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:tflite2circle> ${TFLITE_OUTPUT_PATH}
+ COMMENT "Generate ${CIRCLE_FILE}"
+ )
+ set(MODEL_FORMAT "circle")
+ list(APPEND TEST_DEPS ${CIRCLE_OUTPUT_PATH})
+ endif()
+ else()
+ # Generate .circle
+ add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:circlechef-file> ${RECIPE_BINARY_PATH} ${CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:circlechef-file> ${RECIPE_BINARY_PATH}
+ COMMENT "Generate ${CIRCLE_FILE}"
+ )
+ list(APPEND TEST_DEPS ${CIRCLE_OUTPUT_PATH})
+ endif()
+
+ set(OPT_CIRCLE_FILE "${RECIPE}.opt.circle")
+ set(OPT_CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${OPT_CIRCLE_FILE}")
+
+ if(NOT DEFINED NO_OPTIMIZE_${RECIPE})
+ # Generate optimized .circle
+ add_custom_command(OUTPUT ${OPT_CIRCLE_OUTPUT_PATH}
+ COMMAND $<TARGET_FILE:circle2circle> --all ${CIRCLE_OUTPUT_PATH} ${OPT_CIRCLE_OUTPUT_PATH}
+ DEPENDS $<TARGET_FILE:circle2circle> ${CIRCLE_OUTPUT_PATH}
+ COMMENT "Generate ${OPT_CIRCLE_FILE}"
+ )
+ set(OPT_FORMAT ".opt")
+ list(APPEND TEST_DEPS ${OPT_CIRCLE_OUTPUT_PATH})
+ endif()
+
+ set(MODEL_FILE "${RECIPE}${OPT_FORMAT}.${MODEL_FORMAT}")
+ set(MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/${MODEL_FILE}")
+ set(NNPKG_FILE "${RECIPE}${OPT_FORMAT}")
+ set(NNPKG_PATH "${CMAKE_CURRENT_BINARY_DIR}/${NNPKG_FILE}")
+
+ add_custom_command(OUTPUT ${NNPKG_PATH}
+ COMMAND ${MODEL2NNPKG} ${MODEL_PATH}
+ DEPENDS ${MODEL2NNPKG} ${MODEL_PATH}
+ COMMENT "Generate ${RECIPE} nnpackage"
+ )
+ list(APPEND TEST_DEPS ${NNPKG_PATH})
+
+ set(INPUT_HDF5_FILE "${RECIPE}${OPT_FORMAT}.input.h5")
+ set(INPUT_BIN_PATH "${CMAKE_CURRENT_BINARY_DIR}/${INPUT_HDF5_FILE}")
+
+ set(EXPECTED_HDF5_FILE "${RECIPE}${OPT_FORMAT}.expected.h5")
+ set(EXPECTED_BIN_PATH "${CMAKE_CURRENT_BINARY_DIR}/${EXPECTED_HDF5_FILE}")
+
+ if(NOT DEFINED NO_TCGEN_${RECIPE})
+ # Generate input.h5, expected.h5
+ add_custom_command(OUTPUT ${INPUT_BIN_PATH} ${EXPECTED_BIN_PATH}
+ COMMAND $<TARGET_FILE:testDataGenerator> ${MODEL_FILE}
+ DEPENDS $<TARGET_FILE:testDataGenerator> ${MODEL_FILE}
+ COMMENT "Generate ${INPUT_BIN_PATH} and ${EXPECTED_BIN_PATH}"
+ )
+
+ # Generate test directory
+ set(TC_DIRECTORY "${NNPKG_PATH}/metadata/tc")
+ add_custom_command(OUTPUT ${TC_DIRECTORY}
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TC_DIRECTORY}
+ DEPENDS ${NNPKG_PATH}
+ COMMENT "Generate ${RECIPE} nnpackage test directory"
+ )
+
+ # Move input hdf5 file to test directory
+ set(INPUT_NNPKG_PATH "${TC_DIRECTORY}/input.h5")
+ add_custom_command(OUTPUT ${INPUT_NNPKG_PATH}
+ COMMAND ${CMAKE_COMMAND} -E rename ${INPUT_BIN_PATH} ${INPUT_NNPKG_PATH}
+ DEPENDS ${INPUT_BIN_PATH} ${TC_DIRECTORY}
+ COMMENT "Move ${INPUT_HDF5_FILE} to nnpackage"
+ )
+
+ # Move expected hdf5 file to test directory
+ set(EXPECTED_NNPKG_PATH "${TC_DIRECTORY}/expected.h5")
+ add_custom_command(OUTPUT ${EXPECTED_NNPKG_PATH}
+ COMMAND ${CMAKE_COMMAND} -E rename ${EXPECTED_BIN_PATH} ${EXPECTED_NNPKG_PATH}
+ DEPENDS ${EXPECTED_BIN_PATH} ${TC_DIRECTORY}
+ COMMENT "Move ${EXPECTED_HDF5_FILE} to nnpackage"
+ )
+ list(APPEND TEST_DEPS ${TC_DIRECTORY} ${INPUT_BIN_PATH} ${EXPECTED_BIN_PATH}
+ ${INPUT_NNPKG_PATH} ${EXPECTED_NNPKG_PATH})
+ endif()
+endforeach()
+
+add_custom_target(common_artifacts_deps ALL DEPENDS ${TEST_DEPS})
diff --git a/compiler/common-artifacts/README.md b/compiler/common-artifacts/README.md
new file mode 100644
index 000000000..cb6ba402e
--- /dev/null
+++ b/compiler/common-artifacts/README.md
@@ -0,0 +1,37 @@
+# common-artifacts
+
+`common-artifacts` is a module that produces intermediate artifacts that are commonly generated for compiler testing.
+
+There are four modules used here.
+- tflchef : recipe -> tflite
+- tflite2circle : tflite -> circle (circlize)
+- circle2circle : circle -> circle (optimize)
+- TestDataGenerator : generate input.h5 and expected.h5 (tcgenerate)
+
+## List of intermediate artifacts
+- recipe
+- tflite
+- circle
+- circle (applied all optimizations in circle2circle)
+- input data for nnpackage (.h5)
+- expected output data for nnpackage (.h5)
+
+## How to exclude from resource generation
+Sometimes a specific module that generates a resource does not support the generation of the resource, so exclusion is sometimes required.
+
+There is a `exclude.lst` that performs the function. If you enter the name of steps(circlize, optimize, tcgenerate) and operator you want to exclude there, you can omit the module's step.
+
+e.g.
+```
+$ cat exclude.lst
+# circlize : Exclude from tflite-to-circle conversion(tflite2circle)
+
+# optimize : Exclude from circle optimization(circle2circle)
+optimize(ReLU6_000)
+optimize(Where_000)
+optimize(Where_001)
+
+# tcgenerate : Exclude from test data generation(TestDataGenerator)
+tcgenerate(Abs_000)
+tcgenerate(AddN_000)
+```
diff --git a/compiler/common-artifacts/exclude.lst b/compiler/common-artifacts/exclude.lst
new file mode 100644
index 000000000..b614b7182
--- /dev/null
+++ b/compiler/common-artifacts/exclude.lst
@@ -0,0 +1,191 @@
+#[[ circlize : Exclude from tflite-to-circle conversion(tflite2circle) ]]
+## TensorFlowLiteRecipes
+
+## CircleRecipes
+
+#[[ optimize : Exclude from circle optimization(circle2circle) ]]
+## TensorFlowLiteRecipes
+optimize(ReLU6_000)
+optimize(Where_000)
+optimize(Where_001)
+
+## CircleRecipes
+
+#[[ tcgenerate : Exclude from test data generation(TestDataGenerator) ]]
+## TensorFlowLiteRecipes
+tcgenerate(Abs_000)
+tcgenerate(AddN_000)
+tcgenerate(Add_001) # runtime doesn't support
+tcgenerate(Add_U8_000)
+tcgenerate(All_000)
+tcgenerate(ArgMax_U8_000)
+tcgenerate(ArgMax_U8_001)
+tcgenerate(ArgMax_U8_002)
+tcgenerate(ArgMax_U8_003)
+tcgenerate(ArgMin_000)
+tcgenerate(ArgMin_001)
+tcgenerate(ArgMin_002)
+tcgenerate(ArgMin_003)
+tcgenerate(ArgMin_U8_000)
+tcgenerate(ArgMin_U8_001)
+tcgenerate(ArgMin_U8_002)
+tcgenerate(ArgMin_U8_003)
+tcgenerate(BatchMatMul_000)
+tcgenerate(BatchMatMulV2_000)
+tcgenerate(BatchMatMulV2_001)
+tcgenerate(BatchToSpaceND_000)
+tcgenerate(Cast_000)
+tcgenerate(Cast_001)
+tcgenerate(Ceil_000)
+tcgenerate(Concatenation_U8_000)
+tcgenerate(Conv2D_003) # runtime doesn't support dilation
+tcgenerate(Conv2D_U8_000)
+tcgenerate(Conv2D_U8_001)
+tcgenerate(Cos_000)
+tcgenerate(DepthToSpace_000)
+tcgenerate(DepthwiseConv2D_001) # runtime doesn't support dilation
+tcgenerate(DepthwiseConv2D_003) # runtime doesn't support dilation
+tcgenerate(DepthwiseConv2D_U8_000)
+tcgenerate(Div_000)
+tcgenerate(ELU_000)
+tcgenerate(Equal_000)
+tcgenerate(Exp_000)
+tcgenerate(ExpandDims_000)
+tcgenerate(ExpandDims_001)
+tcgenerate(ExpandDims_002)
+tcgenerate(ExpandDims_003)
+tcgenerate(Fill_000)
+tcgenerate(Fill_001)
+tcgenerate(Floor_000)
+tcgenerate(FloorDiv_000)
+tcgenerate(FloorDiv_001)
+tcgenerate(FloorMod_000)
+tcgenerate(FloorMod_001)
+tcgenerate(FullyConnected_002)
+tcgenerate(FullyConnected_U8_000)
+tcgenerate(Gather_000)
+tcgenerate(GatherNd_000)
+tcgenerate(GatherNd_001)
+tcgenerate(Greater_000)
+tcgenerate(GreaterEqual_000)
+tcgenerate(If_000)
+tcgenerate(If_001)
+tcgenerate(L2Normalize_000) # runtime doesn't support
+tcgenerate(L2Pool2D_000) # runtime doesn't support
+tcgenerate(L2Pool2D_U8_000)
+tcgenerate(LeakyRelu_000) # runtime doesn't support
+tcgenerate(Less_000)
+tcgenerate(LessEqual_000)
+tcgenerate(LocalResponseNormalization_000) # runtime doesn't support
+tcgenerate(Log_000)
+tcgenerate(LogicalAnd_000)
+tcgenerate(LogicalNot_000)
+tcgenerate(LogicalOr_000)
+tcgenerate(LogSoftmax_000)
+tcgenerate(MatMul_000)
+tcgenerate(MatrixBandPart_000)
+tcgenerate(MatrixDiag_000)
+tcgenerate(MatrixSetDiag_000)
+tcgenerate(Maximum_000)
+tcgenerate(MaxPool2D_U8_000)
+tcgenerate(Mean_U8_000)
+tcgenerate(Minimum_000)
+tcgenerate(MirrorPad_000)
+tcgenerate(Mul_U8_000)
+tcgenerate(Neg_000)
+tcgenerate(Net_Dangle_001)
+tcgenerate(Net_InstanceNorm_001)
+tcgenerate(Net_InstanceNorm_002)
+tcgenerate(Net_ZeroDim_001) # fix luci
+tcgenerate(NotEqual_000)
+tcgenerate(OneHot_000)
+tcgenerate(OneHot_001)
+tcgenerate(OneHot_002)
+tcgenerate(OneHot_003)
+tcgenerate(Pack_000)
+tcgenerate(Pack_U8_000)
+tcgenerate(Pad_U8_000)
+tcgenerate(Pow_000)
+tcgenerate(PRelu_000)
+tcgenerate(Range_000)
+tcgenerate(Rank_000)
+tcgenerate(ReduceAny_000)
+tcgenerate(ReduceAny_001)
+tcgenerate(ReduceAny_002)
+tcgenerate(ReduceAny_003)
+tcgenerate(ReduceMax_000)
+tcgenerate(ReduceMin_000)
+tcgenerate(ReduceProd_000)
+tcgenerate(ReduceProd_001)
+tcgenerate(ReduceProd_002)
+tcgenerate(ReduceProd_003)
+tcgenerate(ReLU_000)
+tcgenerate(ReLU6_000) # luci NYI
+tcgenerate(ReLUN1To1_000)
+tcgenerate(Reshape_003) # fix luci
+tcgenerate(Reshape_U8_000)
+tcgenerate(ResizeBilinear_000)
+tcgenerate(ResizeNearestNeighbor_000)
+tcgenerate(ReverseSequence_000)
+tcgenerate(ReverseV2_000)
+tcgenerate(Round_000)
+tcgenerate(Rsqrt_000)
+tcgenerate(ScatterNd_000)
+tcgenerate(SegmentSum_000)
+tcgenerate(Select_000)
+tcgenerate(Select_001)
+tcgenerate(Select_002)
+tcgenerate(SelectV2_000)
+tcgenerate(SelectV2_001)
+tcgenerate(SelectV2_002)
+tcgenerate(Shape_000)
+tcgenerate(Sin_000)
+tcgenerate(Slice_000)
+tcgenerate(Softmax_U8_000)
+tcgenerate(SpaceToBatchND_000)
+tcgenerate(SpaceToBatchND_001)
+tcgenerate(SpaceToBatchND_002)
+tcgenerate(SpaceToBatchND_003)
+tcgenerate(SpaceToDepth_000)
+tcgenerate(SparseToDense_000)
+tcgenerate(SplitV_000) # fix luci
+tcgenerate(Sqrt_000)
+tcgenerate(Square_000)
+tcgenerate(SquaredDifference_000)
+tcgenerate(Squeeze_000)
+tcgenerate(StridedSlice_000)
+tcgenerate(StridedSlice_001)
+tcgenerate(StridedSlice_002)
+tcgenerate(Sub_000)
+tcgenerate(Sub_001)
+tcgenerate(Sub_U8_000)
+tcgenerate(Sum_000)
+tcgenerate(Sum_001)
+tcgenerate(Tanh_000)
+tcgenerate(Tile_000)
+tcgenerate(Tile_U8_000)
+tcgenerate(TopKV2_000) # fix luci
+tcgenerate(TopKV2_001) # fix luci
+tcgenerate(TransposeConv_000) # fix interpreter
+tcgenerate(Unique_000)
+tcgenerate(Unique_001)
+tcgenerate(Unique_002)
+tcgenerate(Unique_003)
+tcgenerate(Unique_U8_000)
+tcgenerate(Unique_U8_001)
+tcgenerate(Where_000) # luci NYI
+tcgenerate(Where_001) # luci NYI
+tcgenerate(While_000) # fix luci
+tcgenerate(While_001)
+tcgenerate(While_002)
+tcgenerate(While_003)
+tcgenerate(YUV_TO_RGB_000) # fix luci
+tcgenerate(YUV_TO_RGB_U8_000)
+tcgenerate(ZerosLike_000)
+
+## CircleRecipes
+tcgenerate(BCQFullyConnected_000)
+tcgenerate(BCQFullyConnected_001)
+tcgenerate(BCQGather_000)
+tcgenerate(CircleBatchMatMul_000)
+tcgenerate(InstanceNorm_000)
diff --git a/compiler/common-artifacts/requires.cmake b/compiler/common-artifacts/requires.cmake
new file mode 100644
index 000000000..8c27565cf
--- /dev/null
+++ b/compiler/common-artifacts/requires.cmake
@@ -0,0 +1,8 @@
+require("circle2circle")
+require("circlechef")
+require("foder")
+require("luci")
+require("luci-interpreter")
+require("mio-circle")
+require("safemain")
+require("tflchef")
diff --git a/compiler/common-artifacts/src/TestDataGenerator.cpp b/compiler/common-artifacts/src/TestDataGenerator.cpp
new file mode 100644
index 000000000..739300d18
--- /dev/null
+++ b/compiler/common-artifacts/src/TestDataGenerator.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <foder/FileLoader.h>
+#include <luci/Importer.h>
+#include <luci_interpreter/Interpreter.h>
+#include <mio/circle/schema_generated.h>
+
+#include <H5Cpp.h>
+
+#include <algorithm>
+#include <iostream>
+#include <memory>
+#include <random>
+#include <string>
+
+namespace
+{
+
+uint32_t element_num(std::vector<hsize_t> &vec)
+{
+ return static_cast<uint32_t>(
+ std::accumulate(std::begin(vec), std::end(vec), 1, std::multiplies<uint32_t>()));
+}
+
+H5::PredType hdf5_dtype_cast(const loco::DataType loco_dtype)
+{
+ switch (loco_dtype)
+ {
+ case loco::DataType::U8:
+ return H5::PredType::NATIVE_UINT8;
+ case loco::DataType::S32:
+ return H5::PredType::NATIVE_INT32;
+ case loco::DataType::S64:
+ return H5::PredType::NATIVE_INT64;
+ case loco::DataType::FLOAT32:
+ return H5::PredType::NATIVE_FLOAT;
+ default:
+ throw std::runtime_error("NYI data type.");
+ }
+}
+
+template <typename T> void geneate_random_data(std::mt19937 &gen, void *data, uint32_t size)
+{
+ std::normal_distribution<float> distrib(0, 2); // mean(0), stddev(2)
+ for (uint32_t i = 0; i < size; i++)
+ {
+ static_cast<T *>(data)[i] = static_cast<T>(distrib(gen));
+ }
+}
+
+void fill_random_data(void *data, uint32_t size, loco::DataType dtype)
+{
+ std::random_device rd; // used to obtain a seed for the random number engine
+ std::mt19937 gen(rd()); // standard mersenne_twister_engine seeded with rd()
+
+ switch (dtype)
+ {
+ case loco::DataType::U8:
+ geneate_random_data<uint8_t>(gen, data, size);
+ break;
+ case loco::DataType::S32:
+ geneate_random_data<int32_t>(gen, data, size);
+ break;
+ case loco::DataType::S64:
+ geneate_random_data<int64_t>(gen, data, size);
+ break;
+ case loco::DataType::FLOAT32:
+ geneate_random_data<float>(gen, data, size);
+ break;
+ default:
+ break;
+ }
+}
+
+} // namespace
+
+int entry(int argc, char **argv)
+{
+ std::string circle_file{argv[1]};
+ size_t last_dot_index = circle_file.find_last_of(".");
+ std::string prefix = circle_file.substr(0, last_dot_index);
+
+ // load circle file
+ foder::FileLoader file_loader{circle_file};
+ std::vector<char> model_data = file_loader.load();
+ const circle::Model *circle_model = circle::GetModel(model_data.data());
+ if (circle_model == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load circle '" << circle_file << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // load luci module
+ std::unique_ptr<luci::Module> module = luci::Importer().importModule(circle_model);
+ luci_interpreter::Interpreter interpreter(module.get());
+
+ /**
+ * HDF5 layout is like below
+ *
+ * GROUP "/"
+ * ã„´GROUP "name"
+ * ã„´ATTRIBUTE "0"
+ * ã„´DATA (0): "input_01:0"
+ * ã„´ATTRIBUTE "1"
+ * ã„´DATA (0): "input_02:0"
+ * ã„´GROUP "value"
+ * ã„´DATASET "0"
+ * ã„´DATA ...
+ * ã„´DATASET "1"
+ * ã„´DATA ...
+ */
+ // create random data and dump into hdf5 file
+ H5::H5File input_file{prefix + ".input.h5", H5F_ACC_TRUNC};
+ std::unique_ptr<H5::Group> input_name_group =
+ std::make_unique<H5::Group>(input_file.createGroup("name"));
+ std::unique_ptr<H5::Group> input_value_group =
+ std::make_unique<H5::Group>(input_file.createGroup("value"));
+
+ H5::H5File output_file{prefix + ".expected.h5", H5F_ACC_TRUNC};
+ std::unique_ptr<H5::Group> output_name_group =
+ std::make_unique<H5::Group>(output_file.createGroup("name"));
+ std::unique_ptr<H5::Group> output_value_group =
+ std::make_unique<H5::Group>(output_file.createGroup("value"));
+
+ uint32_t input_index = 0;
+ for (uint32_t g = 0; g < circle_model->subgraphs()->size(); g++)
+ {
+ const auto input_nodes = loco::input_nodes(module->graph(g));
+ for (const auto &node : input_nodes)
+ {
+ const auto *input_node = dynamic_cast<const luci::CircleInput *>(node);
+ std::string name = input_node->name();
+ if (name.find(":") == std::string::npos)
+ name += ":0";
+
+ // create attribute
+ H5::DataSpace name_dataspace(H5S_SCALAR);
+ H5::StrType name_datatype(H5::PredType::C_S1, name.size());
+
+ auto name_attr = input_name_group->createAttribute(std::to_string(input_index), name_datatype,
+ name_dataspace);
+
+ name_attr.write(name_datatype, name);
+
+ // create value
+ std::vector<hsize_t> dims(input_node->rank());
+ for (uint32_t d = 0; d < input_node->rank(); d++)
+ {
+ dims.at(d) = input_node->dim(d).value();
+ assert(dims.at(d) >= 0);
+ }
+ auto dataspace = std::make_unique<H5::DataSpace>(dims.size(), dims.data());
+ auto dtype = hdf5_dtype_cast(input_node->dtype());
+ auto dataset = std::make_unique<H5::DataSet>(
+ input_file.createDataSet("value/" + std::to_string(input_index), dtype, *dataspace));
+
+ auto data_size = ::element_num(dims);
+ auto dtype_size = loco::size(input_node->dtype());
+ auto byte_size = dtype_size * data_size;
+ std::vector<int8_t> data(byte_size);
+
+ // generate random data
+ fill_random_data(data.data(), data_size, input_node->dtype());
+
+ dataset->write(data.data(), dtype);
+
+ interpreter.writeInputTensor(input_node, data.data(), byte_size);
+
+ input_index++;
+ }
+ }
+
+ interpreter.interpret();
+
+ // dump output data into hdf5 file
+ uint32_t output_index = 0;
+ for (uint32_t g = 0; g < circle_model->subgraphs()->size(); g++)
+ {
+ const auto output_nodes = loco::output_nodes(module->graph(g));
+ for (const auto &node : output_nodes)
+ {
+ const auto *output_node = dynamic_cast<const luci::CircleOutput *>(node);
+ std::string name = output_node->name();
+ if (name.find(":") == std::string::npos)
+ name += ":0";
+
+ // create attribute
+ H5::DataSpace name_dataspace(H5S_SCALAR);
+ H5::StrType name_datatype(H5::PredType::C_S1, name.size());
+
+ auto name_attr = output_name_group->createAttribute(std::to_string(output_index),
+ name_datatype, name_dataspace);
+
+ name_attr.write(name_datatype, name);
+
+ // create value
+ std::vector<hsize_t> dims(output_node->rank());
+ for (uint32_t d = 0; d < output_node->rank(); d++)
+ {
+ dims.at(d) = output_node->dim(d).value();
+ assert(dims.at(d) >= 0);
+ }
+ auto dataspace = std::make_unique<H5::DataSpace>(dims.size(), dims.data());
+ auto dtype = hdf5_dtype_cast(output_node->dtype());
+ auto dataset = std::make_unique<H5::DataSet>(
+ output_file.createDataSet("value/" + std::to_string(output_index), dtype, *dataspace));
+
+ uint32_t tensor_bytesize = loco::size(output_node->dtype());
+ tensor_bytesize *= ::element_num(dims);
+ std::vector<int8_t> output_data(tensor_bytesize);
+ interpreter.readOutputTensor(output_node, output_data.data(), output_data.size());
+
+ dataset->write(output_data.data(), dtype);
+
+ output_index++;
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/compiler/cwrap/include/cwrap/Fildes.h b/compiler/cwrap/include/cwrap/Fildes.h
index f1061cc57..22a95ef7c 100644
--- a/compiler/cwrap/include/cwrap/Fildes.h
+++ b/compiler/cwrap/include/cwrap/Fildes.h
@@ -47,7 +47,7 @@ public:
int release(void);
private:
- int _value;
+ int _value{-1};
};
bool valid(const Fildes &);
diff --git a/compiler/cwrap/src/Fildes.test.cpp b/compiler/cwrap/src/Fildes.test.cpp
index 08e1e2a5e..f9fa20f9e 100644
--- a/compiler/cwrap/src/Fildes.test.cpp
+++ b/compiler/cwrap/src/Fildes.test.cpp
@@ -32,7 +32,9 @@ namespace
int make_temp(char *name_template)
{
+ mode_t mask = umask(S_IRWXU);
int fd = mkstemp(name_template);
+ umask(mask);
if (fd == -1)
{
@@ -55,9 +57,13 @@ TEST(FildesTest, value_constructor)
{
DECLARE_TEMPLATE(name_template);
- cwrap::Fildes fildes{make_temp(name_template)};
+ {
+ cwrap::Fildes fildes{make_temp(name_template)};
+
+ ASSERT_TRUE(cwrap::valid(fildes));
+ }
- ASSERT_TRUE(cwrap::valid(fildes));
+ unlink(name_template);
}
TEST(FildesTest, move_constructor)
@@ -68,19 +74,24 @@ TEST(FildesTest, move_constructor)
int src_fd = make_temp(src_template);
int dst_fd = make_temp(dst_template);
- cwrap::Fildes src{src_fd};
- cwrap::Fildes dst{dst_fd};
+ {
+ cwrap::Fildes src{src_fd};
+ cwrap::Fildes dst{dst_fd};
- dst = std::move(src);
+ dst = std::move(src);
- ASSERT_FALSE(cwrap::valid(src));
- ASSERT_TRUE(cwrap::valid(dst));
+ ASSERT_FALSE(cwrap::valid(src));
+ ASSERT_TRUE(cwrap::valid(dst));
- ASSERT_EQ(dst.get(), src_fd);
+ ASSERT_EQ(dst.get(), src_fd);
- // "src_fd" SHOULD be valid, and "dst_fd" SHOULD be closed
- ASSERT_NE(fcntl(src_fd, F_GETFD), -1);
- ASSERT_EQ(fcntl(dst_fd, F_GETFD), -1);
+ // "src_fd" SHOULD be valid, and "dst_fd" SHOULD be closed
+ ASSERT_NE(fcntl(src_fd, F_GETFD), -1);
+ ASSERT_EQ(fcntl(dst_fd, F_GETFD), -1);
+ }
+
+ unlink(src_template);
+ unlink(dst_template);
}
TEST(FildesTest, destructor)
@@ -94,4 +105,6 @@ TEST(FildesTest, destructor)
cwrap::Fildes fildes{fd};
}
ASSERT_EQ(fcntl(fd, F_GETFD), -1);
+
+ unlink(name_template);
}
diff --git a/compiler/dredd-rule-lib/rule-lib.sh b/compiler/dredd-rule-lib/rule-lib.sh
index 8ebe3d7af..9254cc9a7 100755
--- a/compiler/dredd-rule-lib/rule-lib.sh
+++ b/compiler/dredd-rule-lib/rule-lib.sh
@@ -200,4 +200,21 @@ verify_file_format()
echo ${ACTUAL}
}
+op_version()
+{
+ argc_check $# 1
+ file_path_check ${COMPILED_FILE}
+ file_path_check ${INSPECT_PROG_PATH}
+
+ set -o pipefail
+
+ ACTUAL=`init_error_log ; \
+ ${INSPECT_PROG_PATH} --op_version ${COMPILED_FILE} | \
+ awk -F, -v opname="$1" '{ if ($1 == opname) print $2}'`
+
+ check_success_exit_code $? 0
+
+ echo ${ACTUAL}
+}
+
# TODO define more qullity test function
diff --git a/compiler/enco-intf/README.md b/compiler/enco-intf/README.md
new file mode 100644
index 000000000..5f265bce0
--- /dev/null
+++ b/compiler/enco-intf/README.md
@@ -0,0 +1 @@
+# enco-intf
diff --git a/compiler/exo/src/Circle/CircleTensorExporter.cpp b/compiler/exo/src/Circle/CircleTensorExporter.cpp
index efceae55d..6adc31616 100644
--- a/compiler/exo/src/Circle/CircleTensorExporter.cpp
+++ b/compiler/exo/src/Circle/CircleTensorExporter.cpp
@@ -60,8 +60,8 @@ public:
private:
std::string _name;
- circle::TensorType _dtype;
- ShapeDescription _shape;
+ circle::TensorType _dtype{circle::TensorType_FLOAT32};
+ ShapeDescription _shape{};
// TODO Find a better design
loco::ConstGen *_content = nullptr; // TODO deprecate
diff --git a/compiler/exo/src/Conversion/DepthwiseConv2DConverter.cpp b/compiler/exo/src/Conversion/DepthwiseConv2DConverter.cpp
index 5959fcc45..e3884c3cc 100644
--- a/compiler/exo/src/Conversion/DepthwiseConv2DConverter.cpp
+++ b/compiler/exo/src/Conversion/DepthwiseConv2DConverter.cpp
@@ -56,7 +56,7 @@ bool DepthwiseConv2DConverter::convert(loco::DepthwiseConv2D *origin)
tfl_dw_conv2d->fusedActivationFunction(locoex::FusedActFunc::NONE);
uint32_t multiplier = filter_shape.multiplier().value();
- EXO_ASSERT(multiplier < std::numeric_limits<int32_t>::max(),
+ EXO_ASSERT(multiplier < static_cast<uint32_t>(std::numeric_limits<int32_t>::max()),
"Multiplier is too big that casting may occur unintended behavior")
tfl_dw_conv2d->depthMultiplier(static_cast<int32_t>(multiplier));
diff --git a/compiler/exo/src/Conversion/TensorBroadcastConverter.cpp b/compiler/exo/src/Conversion/TensorBroadcastConverter.cpp
index 532332742..daccbe688 100644
--- a/compiler/exo/src/Conversion/TensorBroadcastConverter.cpp
+++ b/compiler/exo/src/Conversion/TensorBroadcastConverter.cpp
@@ -139,7 +139,7 @@ bool TensorBroadcastConverter::run(loco::Graph *graph)
{
if (node->dialect() == locoex::TFLDialect::get())
{
- auto tfl_node = dynamic_cast<locoex::TFLNode *>(node);
+ auto tfl_node = loco::must_cast<locoex::TFLNode *>(node);
tfl_node->accept(&collector);
}
}
diff --git a/compiler/exo/src/Dialect/IR/CircleDialect.test.cpp b/compiler/exo/src/Dialect/IR/CircleDialect.test.cpp
index 6132eb361..6c85b67a1 100644
--- a/compiler/exo/src/Dialect/IR/CircleDialect.test.cpp
+++ b/compiler/exo/src/Dialect/IR/CircleDialect.test.cpp
@@ -27,5 +27,5 @@ TEST(CircleDialectTest, get)
// get() SHOULD return a valid(non-null) pointer
ASSERT_NE(d, nullptr);
// The return value SHOULD be stable across multiple invocations
- ASSERT_EQ(d, CircleDialect::get());
+ ASSERT_EQ(CircleDialect::get(), d);
}
diff --git a/compiler/exo/src/Dialect/IR/CircleNodeImpl.h b/compiler/exo/src/Dialect/IR/CircleNodeImpl.h
index d9f487111..8c8b99b4f 100644
--- a/compiler/exo/src/Dialect/IR/CircleNodeImpl.h
+++ b/compiler/exo/src/Dialect/IR/CircleNodeImpl.h
@@ -18,7 +18,6 @@
#define __LOCOEX_IR_CIRCLENODEIMPL_H__
#include "CircleNodes.h"
-#include "CircleNodeVisitor.h"
#include <oops/InternalExn.h>
diff --git a/compiler/exo/src/Dialect/IR/CircleNodeVisitor.h b/compiler/exo/src/Dialect/IR/CircleNodeVisitor.h
index fc70c9ebc..f5c28a184 100644
--- a/compiler/exo/src/Dialect/IR/CircleNodeVisitor.h
+++ b/compiler/exo/src/Dialect/IR/CircleNodeVisitor.h
@@ -18,7 +18,6 @@
#define __LOCOEX_IR_CIRCLENODE_VISITOR_H__
#include "CircleNode.h"
-#include "CircleNodes.h"
#include <oops/InternalExn.h>
diff --git a/compiler/exo/src/Dialect/IR/CircleNodes.test.cpp b/compiler/exo/src/Dialect/IR/CircleNodes.test.cpp
index b63e7ccae..3011bdc0a 100644
--- a/compiler/exo/src/Dialect/IR/CircleNodes.test.cpp
+++ b/compiler/exo/src/Dialect/IR/CircleNodes.test.cpp
@@ -25,12 +25,12 @@ TEST(CircleInstanceNormTest, constructor)
{
locoex::CircleInstanceNorm instance_norm;
- ASSERT_EQ(instance_norm.dialect(), locoex::CircleDialect::get());
- ASSERT_EQ(instance_norm.opcode(), locoex::CircleOpcode::INSTANCE_NORM);
+ ASSERT_EQ(locoex::CircleDialect::get(), instance_norm.dialect());
+ ASSERT_EQ(locoex::CircleOpcode::INSTANCE_NORM, instance_norm.opcode());
- ASSERT_EQ(instance_norm.input(), nullptr);
- ASSERT_EQ(instance_norm.gamma(), nullptr);
- ASSERT_EQ(instance_norm.beta(), nullptr);
- ASSERT_FLOAT_EQ(instance_norm.epsilon(), 1e-05);
- ASSERT_EQ(instance_norm.fusedActivationFunction(), locoex::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(nullptr, instance_norm.input());
+ ASSERT_EQ(nullptr, instance_norm.gamma());
+ ASSERT_EQ(nullptr, instance_norm.beta());
+ ASSERT_FLOAT_EQ(1e-05, instance_norm.epsilon());
+ ASSERT_EQ(locoex::FusedActFunc::UNDEFINED, instance_norm.fusedActivationFunction());
}
diff --git a/compiler/exo/src/Dialect/IR/NodeMixins.h b/compiler/exo/src/Dialect/IR/NodeMixins.h
index c35daebc6..9490ea1bc 100644
--- a/compiler/exo/src/Dialect/IR/NodeMixins.h
+++ b/compiler/exo/src/Dialect/IR/NodeMixins.h
@@ -58,7 +58,7 @@ protected:
loco::Use *at(unsigned n) const { return _args.at(n).get(); }
private:
- std::array<std::unique_ptr<loco::Use>, N> _args;
+ std::array<std::unique_ptr<loco::Use>, N> _args{};
};
} // namespace locoex
diff --git a/compiler/exo/src/Dialect/IR/TFLDialect.test.cpp b/compiler/exo/src/Dialect/IR/TFLDialect.test.cpp
index 136721e2d..1267540af 100644
--- a/compiler/exo/src/Dialect/IR/TFLDialect.test.cpp
+++ b/compiler/exo/src/Dialect/IR/TFLDialect.test.cpp
@@ -27,5 +27,5 @@ TEST(TFLDialectTest, get)
// get() SHOULD return a valid(non-null) pointer
ASSERT_NE(d, nullptr);
// The return value SHOULD be stable across multiple invocations
- ASSERT_EQ(d, TFLDialect::get());
+ ASSERT_EQ(TFLDialect::get(), d);
}
diff --git a/compiler/exo/src/Dialect/IR/TFLNodeImpl.h b/compiler/exo/src/Dialect/IR/TFLNodeImpl.h
index 63388279a..2ec94a268 100644
--- a/compiler/exo/src/Dialect/IR/TFLNodeImpl.h
+++ b/compiler/exo/src/Dialect/IR/TFLNodeImpl.h
@@ -18,7 +18,6 @@
#define __LOCOEX_IR_TFLNODEIMPL_H__
#include "TFLNodes.h"
-#include "TFLNodeVisitor.h"
#include <oops/InternalExn.h>
diff --git a/compiler/exo/src/Dialect/IR/TFLNodeVisitor.h b/compiler/exo/src/Dialect/IR/TFLNodeVisitor.h
index e1f5959c0..147b67398 100644
--- a/compiler/exo/src/Dialect/IR/TFLNodeVisitor.h
+++ b/compiler/exo/src/Dialect/IR/TFLNodeVisitor.h
@@ -18,7 +18,6 @@
#define __LOCOEX_IR_TFLNODE_VISITOR_H__
#include "TFLNode.h"
-#include "TFLNodes.h"
#include <oops/InternalExn.h>
diff --git a/compiler/exo/src/Dialect/IR/TFLNodes.h b/compiler/exo/src/Dialect/IR/TFLNodes.h
index 5f521a0a6..41a11e7c0 100644
--- a/compiler/exo/src/Dialect/IR/TFLNodes.h
+++ b/compiler/exo/src/Dialect/IR/TFLNodes.h
@@ -183,7 +183,7 @@ public:
void axis(uint32_t axis) { _axis = axis; }
private:
- uint32_t _axis;
+ uint32_t _axis{0};
};
/**
@@ -540,7 +540,7 @@ public:
Stride *stride(void) { return &_stride; }
private:
- Padding _padding;
+ Padding _padding{Padding::UNDEFINED};
Stride _stride;
};
diff --git a/compiler/exo/src/Dialect/IR/TFLNodes.test.cpp b/compiler/exo/src/Dialect/IR/TFLNodes.test.cpp
index 09c5c83a0..f17898b37 100644
--- a/compiler/exo/src/Dialect/IR/TFLNodes.test.cpp
+++ b/compiler/exo/src/Dialect/IR/TFLNodes.test.cpp
@@ -25,11 +25,11 @@ TEST(TFLAddTest, constructor)
{
locoex::TFLAdd add_node;
- ASSERT_EQ(add_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(add_node.opcode(), locoex::TFLOpcode::ADD);
+ ASSERT_EQ(locoex::TFLDialect::get(), add_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::ADD, add_node.opcode());
- ASSERT_EQ(add_node.x(), nullptr);
- ASSERT_EQ(add_node.y(), nullptr);
+ ASSERT_EQ(nullptr, add_node.x());
+ ASSERT_EQ(nullptr, add_node.y());
}
// TODO TFLAveragePool2D
@@ -38,14 +38,14 @@ TEST(TFLConcatTest, constructor)
{
locoex::TFLConcatenation concat_node(3);
- ASSERT_EQ(concat_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(concat_node.opcode(), locoex::TFLOpcode::CONCATENATION);
+ ASSERT_EQ(locoex::TFLDialect::get(), concat_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::CONCATENATION, concat_node.opcode());
- ASSERT_EQ(concat_node.numValues(), 3);
- ASSERT_EQ(concat_node.values(0), nullptr);
- ASSERT_EQ(concat_node.values(1), nullptr);
- ASSERT_EQ(concat_node.values(2), nullptr);
- ASSERT_EQ(concat_node.fusedActivationFunction(), locoex::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(3, concat_node.numValues());
+ ASSERT_EQ(nullptr, concat_node.values(0));
+ ASSERT_EQ(nullptr, concat_node.values(1));
+ ASSERT_EQ(nullptr, concat_node.values(2));
+ ASSERT_EQ(locoex::FusedActFunc::UNDEFINED, concat_node.fusedActivationFunction());
}
// TODO TFLConv2D
@@ -54,28 +54,28 @@ TEST(TFLDepthwiseConv2DTest, constructor)
{
locoex::TFLDepthwiseConv2D dw_conv2d_node;
- ASSERT_EQ(dw_conv2d_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(dw_conv2d_node.opcode(), locoex::TFLOpcode::DEPTHWISE_CONV_2D);
-
- ASSERT_EQ(dw_conv2d_node.input(), nullptr);
- ASSERT_EQ(dw_conv2d_node.filter(), nullptr);
- ASSERT_EQ(dw_conv2d_node.bias(), nullptr);
- ASSERT_EQ(dw_conv2d_node.padding(), locoex::Padding::UNDEFINED);
- ASSERT_EQ(dw_conv2d_node.stride()->h(), 1);
- ASSERT_EQ(dw_conv2d_node.stride()->w(), 1);
- ASSERT_EQ(dw_conv2d_node.depthMultiplier(), 0);
- ASSERT_EQ(dw_conv2d_node.fusedActivationFunction(), locoex::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(locoex::TFLDialect::get(), dw_conv2d_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::DEPTHWISE_CONV_2D, dw_conv2d_node.opcode());
+
+ ASSERT_EQ(nullptr, dw_conv2d_node.input());
+ ASSERT_EQ(nullptr, dw_conv2d_node.filter());
+ ASSERT_EQ(nullptr, dw_conv2d_node.bias());
+ ASSERT_EQ(locoex::Padding::UNDEFINED, dw_conv2d_node.padding());
+ ASSERT_EQ(1, dw_conv2d_node.stride()->h());
+ ASSERT_EQ(1, dw_conv2d_node.stride()->w());
+ ASSERT_EQ(0, dw_conv2d_node.depthMultiplier());
+ ASSERT_EQ(locoex::FusedActFunc::UNDEFINED, dw_conv2d_node.fusedActivationFunction());
}
TEST(TFLDivTest, constructor)
{
locoex::TFLDiv div_node;
- ASSERT_EQ(div_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(div_node.opcode(), locoex::TFLOpcode::DIV);
+ ASSERT_EQ(locoex::TFLDialect::get(), div_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::DIV, div_node.opcode());
- ASSERT_EQ(div_node.x(), nullptr);
- ASSERT_EQ(div_node.y(), nullptr);
+ ASSERT_EQ(nullptr, div_node.x());
+ ASSERT_EQ(nullptr, div_node.y());
}
// TODO TFLMaxPool2D
@@ -84,21 +84,21 @@ TEST(TFLMulTest, constructor)
{
locoex::TFLMul mul_node;
- ASSERT_EQ(mul_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(mul_node.opcode(), locoex::TFLOpcode::MUL);
+ ASSERT_EQ(locoex::TFLDialect::get(), mul_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::MUL, mul_node.opcode());
- ASSERT_EQ(mul_node.x(), nullptr);
- ASSERT_EQ(mul_node.y(), nullptr);
+ ASSERT_EQ(nullptr, mul_node.x());
+ ASSERT_EQ(nullptr, mul_node.y());
}
TEST(TFLReluTest, constructor)
{
locoex::TFLRelu relu_node;
- ASSERT_EQ(relu_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(relu_node.opcode(), locoex::TFLOpcode::RELU);
+ ASSERT_EQ(locoex::TFLDialect::get(), relu_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::RELU, relu_node.opcode());
- ASSERT_EQ(relu_node.features(), nullptr);
+ ASSERT_EQ(nullptr, relu_node.features());
}
// TODO TFLRelu6
@@ -107,12 +107,12 @@ TEST(TFLReshapeTest, constructor)
{
locoex::TFLReshape reshape;
- ASSERT_EQ(reshape.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(reshape.opcode(), locoex::TFLOpcode::RESHAPE);
+ ASSERT_EQ(locoex::TFLDialect::get(), reshape.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::RESHAPE, reshape.opcode());
- ASSERT_EQ(reshape.tensor(), nullptr);
- ASSERT_EQ(reshape.shape(), nullptr);
- ASSERT_EQ(reshape.newShape()->rank(), 0);
+ ASSERT_EQ(nullptr, reshape.tensor());
+ ASSERT_EQ(nullptr, reshape.shape());
+ ASSERT_EQ(0, reshape.newShape()->rank());
}
TEST(TFLReshapeTest, alloc_new_shape)
@@ -120,14 +120,14 @@ TEST(TFLReshapeTest, alloc_new_shape)
locoex::TFLReshape reshape;
reshape.newShape()->rank(2);
- ASSERT_EQ(reshape.newShape()->rank(), 2);
+ ASSERT_EQ(2, reshape.newShape()->rank());
reshape.newShape()->dim(0) = 0;
reshape.newShape()->dim(1) = 1;
auto &const_reshape = const_cast<const locoex::TFLReshape &>(reshape);
- ASSERT_EQ(const_reshape.newShape()->dim(0), 0);
- ASSERT_EQ(const_reshape.newShape()->dim(1), 1);
+ ASSERT_EQ(0, const_reshape.newShape()->dim(0));
+ ASSERT_EQ(1, const_reshape.newShape()->dim(1));
}
// TODO TFLSoftmax
@@ -138,11 +138,11 @@ TEST(TFLSubTest, constructor)
{
locoex::TFLSub sub_node;
- ASSERT_EQ(sub_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(sub_node.opcode(), locoex::TFLOpcode::SUB);
+ ASSERT_EQ(locoex::TFLDialect::get(), sub_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::SUB, sub_node.opcode());
- ASSERT_EQ(sub_node.x(), nullptr);
- ASSERT_EQ(sub_node.y(), nullptr);
+ ASSERT_EQ(nullptr, sub_node.x());
+ ASSERT_EQ(nullptr, sub_node.y());
}
// TODO TFLTanh
@@ -151,9 +151,9 @@ TEST(TFLTransposeTest, constructor)
{
locoex::TFLTranspose tr_node;
- ASSERT_EQ(tr_node.dialect(), locoex::TFLDialect::get());
- ASSERT_EQ(tr_node.opcode(), locoex::TFLOpcode::TRANSPOSE);
+ ASSERT_EQ(locoex::TFLDialect::get(), tr_node.dialect());
+ ASSERT_EQ(locoex::TFLOpcode::TRANSPOSE, tr_node.opcode());
- ASSERT_EQ(tr_node.a(), nullptr);
- ASSERT_EQ(tr_node.perm(), nullptr);
+ ASSERT_EQ(nullptr, tr_node.a());
+ ASSERT_EQ(nullptr, tr_node.perm());
}
diff --git a/compiler/exo/src/Dialect/Service/CircleTypeInferenceRule.cpp b/compiler/exo/src/Dialect/Service/CircleTypeInferenceRule.cpp
index 6bc95a1b5..dd64eacc7 100644
--- a/compiler/exo/src/Dialect/Service/CircleTypeInferenceRule.cpp
+++ b/compiler/exo/src/Dialect/Service/CircleTypeInferenceRule.cpp
@@ -49,7 +49,7 @@ bool CircleTypeInferenceRule::infer(const loco::Node *node, loco::DataType &dtyp
TypeInferenceAlgorithm alg;
- dtype = dynamic_cast<const CircleNode *>(node)->accept(&alg);
+ dtype = loco::must_cast<const CircleNode *>(node)->accept(&alg);
assert(dtype != loco::DataType::Unknown);
return true;
diff --git a/compiler/exo/src/Dialect/Service/TFLShapeInferenceRule.test.cpp b/compiler/exo/src/Dialect/Service/TFLShapeInferenceRule.test.cpp
index 35c8f0b2a..b68728b47 100644
--- a/compiler/exo/src/Dialect/Service/TFLShapeInferenceRule.test.cpp
+++ b/compiler/exo/src/Dialect/Service/TFLShapeInferenceRule.test.cpp
@@ -60,12 +60,12 @@ TEST(TFLShapeInferenceRuleTest, minimal_with_TFLRelu)
// Verify
{
ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(tfl_node).domain());
auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 2);
- ASSERT_EQ(shape.dim(0), 3);
- ASSERT_EQ(shape.dim(1), 4);
+ ASSERT_EQ(2, shape.rank());
+ ASSERT_EQ(3, shape.dim(0));
+ ASSERT_EQ(4, shape.dim(1));
}
}
@@ -105,14 +105,14 @@ TEST(TFLShapeInferenceRuleTest, avgpool2d_valid)
// Verify
{
ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(tfl_node).domain());
auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 4);
- ASSERT_EQ(shape.dim(0).value(), 1);
- ASSERT_EQ(shape.dim(1).value(), 2);
- ASSERT_EQ(shape.dim(2).value(), 1);
- ASSERT_EQ(shape.dim(3).value(), 1);
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0).value());
+ ASSERT_EQ(2, shape.dim(1).value());
+ ASSERT_EQ(1, shape.dim(2).value());
+ ASSERT_EQ(1, shape.dim(3).value());
}
}
@@ -152,14 +152,14 @@ TEST(TFLShapeInferenceRuleTest, avgpool2d_same)
// Verify
{
ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(tfl_node).domain());
auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 4);
- ASSERT_EQ(shape.dim(0).value(), 1);
- ASSERT_EQ(shape.dim(1).value(), 2);
- ASSERT_EQ(shape.dim(2).value(), 2);
- ASSERT_EQ(shape.dim(3).value(), 1);
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0).value());
+ ASSERT_EQ(2, shape.dim(1).value());
+ ASSERT_EQ(2, shape.dim(2).value());
+ ASSERT_EQ(1, shape.dim(3).value());
}
}
@@ -227,13 +227,13 @@ TEST(TFLShapeInferenceRuleTest, TFAdd_shapeinf_different)
// Verify
{
ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(tfl_node).domain());
auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 3);
- ASSERT_EQ(shape.dim(0), 2);
- ASSERT_EQ(shape.dim(1), 3);
- ASSERT_EQ(shape.dim(2), 5);
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(2, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ ASSERT_EQ(5, shape.dim(2));
}
}
@@ -268,10 +268,10 @@ TEST(TFLShapeInferenceRuleTest, TFLTranspose_simple)
ASSERT_TRUE(loco::shape_known(g.tfl_transpose));
auto shape = loco::shape_get(g.tfl_transpose).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 4);
- ASSERT_EQ(shape.dim(0), 30);
- ASSERT_EQ(shape.dim(1), 40);
- ASSERT_EQ(shape.dim(2), 10);
- ASSERT_EQ(shape.dim(3), 20);
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(30, shape.dim(0));
+ ASSERT_EQ(40, shape.dim(1));
+ ASSERT_EQ(10, shape.dim(2));
+ ASSERT_EQ(20, shape.dim(3));
}
}
diff --git a/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.cpp b/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.cpp
index 3f123a6db..b0fecffe5 100644
--- a/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.cpp
+++ b/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.cpp
@@ -132,7 +132,7 @@ bool TFLTypeInferenceRule::infer(const loco::Node *node, loco::DataType &dtype)
TypeInferenceAlgorithm alg;
- dtype = dynamic_cast<const TFLNode *>(node)->accept(&alg);
+ dtype = loco::must_cast<const TFLNode *>(node)->accept(&alg);
assert(dtype != loco::DataType::Unknown);
return true;
diff --git a/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.test.cpp b/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.test.cpp
index dd1f93c4d..9326e5e58 100644
--- a/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.test.cpp
+++ b/compiler/exo/src/Dialect/Service/TFLTypeInferenceRule.test.cpp
@@ -53,5 +53,5 @@ TEST(TFLTypeInferenceRuleTest, minimal_with_TFLRelu)
// Verify
ASSERT_TRUE(loco::dtype_known(tfl_node));
auto type = loco::dtype_get(tfl_node);
- ASSERT_EQ(type, loco::DataType::S32);
+ ASSERT_EQ(loco::DataType::S32, type);
}
diff --git a/compiler/exo/src/Pass/FuseBiasAddPass.cpp b/compiler/exo/src/Pass/FuseBiasAddPass.cpp
index aab820995..6338dff5d 100644
--- a/compiler/exo/src/Pass/FuseBiasAddPass.cpp
+++ b/compiler/exo/src/Pass/FuseBiasAddPass.cpp
@@ -166,7 +166,7 @@ template <class LatterT> locoex::TFLConst *Fuser<LatterT>::create_fused_bias_con
{
// we have to create a new bias const by adding/substracting bias and const node (of TFLAdd or
// TFLSub)
- auto bias = dynamic_cast<locoex::TFLConst *>(_former->bias());
+ auto bias = loco::must_cast<locoex::TFLConst *>(_former->bias());
assert(bias->dtype() == loco::DataType::FLOAT32 &&
_const_node->dtype() == loco::DataType::FLOAT32);
@@ -344,7 +344,7 @@ bool FuseBiasAddPass::run(loco::Graph *g)
{
if (node->dialect() == locoex::TFLDialect::get())
{
- auto tfl_node = dynamic_cast<locoex::TFLNode *>(node);
+ auto tfl_node = loco::must_cast<locoex::TFLNode *>(node);
tfl_node->accept(&collector);
}
}
diff --git a/compiler/exo/src/Pass/FuseBiasAddPass.test.cpp b/compiler/exo/src/Pass/FuseBiasAddPass.test.cpp
index 6ba728de0..c464d765f 100644
--- a/compiler/exo/src/Pass/FuseBiasAddPass.test.cpp
+++ b/compiler/exo/src/Pass/FuseBiasAddPass.test.cpp
@@ -263,8 +263,8 @@ TEST(FuseBiasAddPassTest, Conv2D_Sub_02_fusing_will_not_performed)
ASSERT_TRUE(a_bias != nullptr);
ASSERT_TRUE(a_bias->dim(0) == 2);
- ASSERT_FLOAT_EQ(a_bias->at<loco::DataType::FLOAT32>(0), 0);
- ASSERT_FLOAT_EQ(a_bias->at<loco::DataType::FLOAT32>(1), 0);
+ ASSERT_FLOAT_EQ(0, a_bias->at<loco::DataType::FLOAT32>(0));
+ ASSERT_FLOAT_EQ(0, a_bias->at<loco::DataType::FLOAT32>(1));
auto a_sub = exo::test::find_first_node_bytype<locoex::TFLSub>(g.graph());
ASSERT_TRUE(a_sub != nullptr);
diff --git a/compiler/exo/src/Pass/FuseReluPass.cpp b/compiler/exo/src/Pass/FuseReluPass.cpp
index d7af0c506..ae430bc46 100644
--- a/compiler/exo/src/Pass/FuseReluPass.cpp
+++ b/compiler/exo/src/Pass/FuseReluPass.cpp
@@ -97,7 +97,7 @@ bool FuseReluPass::run(loco::Graph *g)
{
if (node->dialect() == locoex::TFLDialect::get())
{
- auto tfl_node = dynamic_cast<locoex::TFLNode *>(node);
+ auto tfl_node = loco::must_cast<locoex::TFLNode *>(node);
tfl_node->accept(&collector);
}
}
diff --git a/compiler/exo/src/TFLite/TFLExporterImpl.test.cpp b/compiler/exo/src/TFLite/TFLExporterImpl.test.cpp
index 7d74223c5..866ede6a2 100644
--- a/compiler/exo/src/TFLite/TFLExporterImpl.test.cpp
+++ b/compiler/exo/src/TFLite/TFLExporterImpl.test.cpp
@@ -221,18 +221,18 @@ TEST(TFLExporterImplTest, Transpose_simple)
auto perm = operators->Get(n)->inputs()->Get(1);
auto perm_tensor = model->subgraphs()->Get(0)->tensors()->Get(perm);
- ASSERT_EQ(perm_tensor->type(), tflite::TensorType::TensorType_INT32);
- ASSERT_EQ(perm_tensor->shape()->size(), 1);
- ASSERT_EQ(perm_tensor->shape()->Get(0), 4);
+ ASSERT_EQ(tflite::TensorType::TensorType_INT32, perm_tensor->type());
+ ASSERT_EQ(1, perm_tensor->shape()->size());
+ ASSERT_EQ(4, perm_tensor->shape()->Get(0));
auto bufs = (model->buffers());
auto *perm_buf =
reinterpret_cast<const int32_t *>(bufs->Get(perm_tensor->buffer())->data()->data());
- ASSERT_EQ(perm_buf[0], 1);
- ASSERT_EQ(perm_buf[1], 2);
- ASSERT_EQ(perm_buf[2], 3);
- ASSERT_EQ(perm_buf[3], 0);
+ ASSERT_EQ(1, perm_buf[0]);
+ ASSERT_EQ(2, perm_buf[1]);
+ ASSERT_EQ(3, perm_buf[2]);
+ ASSERT_EQ(0, perm_buf[3]);
}
}
@@ -279,17 +279,17 @@ TEST(TFLExporterImplTest, Transpose_from_FilterEncode_FilterDecode)
auto perm = operators->Get(n)->inputs()->Get(1);
auto perm_tensor = model->subgraphs()->Get(0)->tensors()->Get(perm);
- ASSERT_EQ(perm_tensor->type(), tflite::TensorType::TensorType_INT32);
- ASSERT_EQ(perm_tensor->shape()->size(), 1);
- ASSERT_EQ(perm_tensor->shape()->Get(0), 4);
+ ASSERT_EQ(tflite::TensorType::TensorType_INT32, perm_tensor->type());
+ ASSERT_EQ(1, perm_tensor->shape()->size());
+ ASSERT_EQ(4, perm_tensor->shape()->Get(0));
auto bufs = (model->buffers());
auto *perm_buf =
reinterpret_cast<const int32_t *>(bufs->Get(perm_tensor->buffer())->data()->data());
- ASSERT_EQ(perm_buf[0], 3);
- ASSERT_EQ(perm_buf[1], 0);
- ASSERT_EQ(perm_buf[2], 1);
- ASSERT_EQ(perm_buf[3], 2);
+ ASSERT_EQ(3, perm_buf[0]);
+ ASSERT_EQ(0, perm_buf[1]);
+ ASSERT_EQ(1, perm_buf[2]);
+ ASSERT_EQ(2, perm_buf[3]);
}
}
@@ -317,8 +317,8 @@ TEST_F(TFLExporterImplTests, Regression_0000)
auto decode = exo::make_feature_decode<exo::FeatureLayout::NHWC>(relu);
auto push = make_node<loco::Push>();
- ASSERT_EQ(maxpool->window()->vertical(), 1);
- ASSERT_EQ(maxpool->window()->horizontal(), 1);
+ ASSERT_EQ(1, maxpool->window()->vertical());
+ ASSERT_EQ(1, maxpool->window()->horizontal());
maxpool->ifm(encode);
relu->input(maxpool);
@@ -350,11 +350,11 @@ TEST_F(TFLExporterImplTests, Regression_0000)
switch (model->operator_codes()->Get(opcode_index)->builtin_code())
{
case tflite::BuiltinOperator_RELU:
- ASSERT_EQ(relu_exeuction_index, -1);
+ ASSERT_EQ(-1, relu_exeuction_index);
relu_exeuction_index = static_cast<int64_t>(n);
break;
case tflite::BuiltinOperator_MAX_POOL_2D:
- ASSERT_EQ(maxpool_execution_index, -1);
+ ASSERT_EQ(-1, maxpool_execution_index);
maxpool_execution_index = static_cast<int64_t>(n);
break;
default:
@@ -397,17 +397,17 @@ TEST_F(TFLExporterImplTests, Regression_0001)
auto buffers = model->buffers();
// 0'th empty buffer + ConstGen data + ConstGen node output
- ASSERT_EQ(buffers->Length(), 3);
+ ASSERT_EQ(3, buffers->Length());
// 0'th should be empty buffer
auto buffer_0 = (*buffers)[0];
auto array_0 = buffer_0->data();
- ASSERT_EQ(array_0, nullptr);
+ ASSERT_EQ(nullptr, array_0);
// 1'st should be ConstGen data which is two float
auto buffer_1 = (*buffers)[1];
auto array_1 = buffer_1->data();
size_t size_1 = array_1->size();
- ASSERT_EQ(size_1, 2 * sizeof(float));
+ ASSERT_EQ(2 * sizeof(float), size_1);
}
}
diff --git a/compiler/exo/src/TFLite/TFLExporterUtils.test.cpp b/compiler/exo/src/TFLite/TFLExporterUtils.test.cpp
index d19f87d25..ec9714d6d 100644
--- a/compiler/exo/src/TFLite/TFLExporterUtils.test.cpp
+++ b/compiler/exo/src/TFLite/TFLExporterUtils.test.cpp
@@ -46,7 +46,7 @@ TEST(ExporterUtilsTests, getOpPadding)
ofm._dims[1] = 2;
ofm._dims[2] = 2;
- ASSERT_EQ(getOpPadding(&pad, &stride, ifm, ofm), tflite::Padding_VALID);
+ ASSERT_EQ(tflite::Padding_VALID, getOpPadding(&pad, &stride, ifm, ofm));
}
// SAME padding
@@ -65,7 +65,7 @@ TEST(ExporterUtilsTests, getOpPadding)
ofm._dims[1] = 3;
ofm._dims[2] = 3;
- ASSERT_EQ(getOpPadding(&pad, &stride, ifm, ofm), tflite::Padding_SAME);
+ ASSERT_EQ(tflite::Padding_SAME, getOpPadding(&pad, &stride, ifm, ofm));
}
// Custom padding 1 - Not supported by tflite
diff --git a/compiler/exo/src/TFLite/TFLTensorExporter.cpp b/compiler/exo/src/TFLite/TFLTensorExporter.cpp
index 66854ef87..23c810ed5 100644
--- a/compiler/exo/src/TFLite/TFLTensorExporter.cpp
+++ b/compiler/exo/src/TFLite/TFLTensorExporter.cpp
@@ -60,8 +60,8 @@ public:
private:
std::string _name;
- tflite::TensorType _dtype;
- ShapeDescription _shape;
+ tflite::TensorType _dtype{TensorType_FLOAT32};
+ ShapeDescription _shape{};
// TODO Find a better design
loco::ConstGen *_content = nullptr; // TODO deprecate
@@ -80,7 +80,7 @@ struct NoOpDetector final : public loco::CanonicalNodeMutableVisitor<bool>
bool visit(loco::FilterEncode *node) final
{
- auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Filter> *>(node->encoder());
+ auto encoder = loco::must_cast<loco::PermutingEncoder<loco::Domain::Filter> *>(node->encoder());
auto perm = encoder->perm();
return isNHWC(perm);
@@ -88,14 +88,16 @@ struct NoOpDetector final : public loco::CanonicalNodeMutableVisitor<bool>
bool visit(loco::FeatureEncode *node) final
{
- auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Feature> *>(node->encoder());
+ auto encoder =
+ loco::must_cast<loco::PermutingEncoder<loco::Domain::Feature> *>(node->encoder());
auto perm = encoder->perm();
return isNHWC(perm);
}
bool visit(loco::FeatureDecode *node) final
{
- auto decoder = dynamic_cast<loco::PermutingDecoder<loco::Domain::Feature> *>(node->decoder());
+ auto decoder =
+ loco::must_cast<loco::PermutingDecoder<loco::Domain::Feature> *>(node->decoder());
auto perm = decoder->perm();
return isNHWC(perm);
}
@@ -133,7 +135,8 @@ void allocateTFLiteTensor(loco::Node *node, TFLTensorContext &ctx)
tensor_info.dtype(TypeInference::get(node));
tensor_info.shape(ShapeInference::get(node));
- tensor_info.tfl_content(dynamic_cast<locoex::TFLConst *>(node));
+ if (auto const_node = dynamic_cast<locoex::TFLConst *>(node))
+ tensor_info.tfl_content(const_node);
set_tensor_index(node, tensor_index);
diff --git a/compiler/exo/src/TFLite/TFLTypeInference.test.cpp b/compiler/exo/src/TFLite/TFLTypeInference.test.cpp
index 0712f0a25..8a3a08da9 100644
--- a/compiler/exo/src/TFLite/TFLTypeInference.test.cpp
+++ b/compiler/exo/src/TFLite/TFLTypeInference.test.cpp
@@ -91,7 +91,7 @@ private:
private:
loco::Graph _graph;
- loco::Node *_last;
+ loco::Node *_last{nullptr};
};
struct TypeInferenceTest : public Sequential, public ::testing::Test
@@ -113,6 +113,6 @@ TEST_F(TypeInferenceTest, Regression_0000)
TypeInferencePass type_inf_pass;
type_inf_pass.run(graph());
- ASSERT_EQ(TypeInference::get(relu), tflite::TensorType_INT8);
- ASSERT_EQ(TypeInference::get(push), tflite::TensorType_INT8);
+ ASSERT_EQ(tflite::TensorType_INT8, TypeInference::get(relu));
+ ASSERT_EQ(tflite::TensorType_INT8, TypeInference::get(push));
}
diff --git a/compiler/fipe/README.md b/compiler/fipe/README.md
new file mode 100644
index 000000000..7f3c74ae2
--- /dev/null
+++ b/compiler/fipe/README.md
@@ -0,0 +1 @@
+# fipe
diff --git a/compiler/foder/CMakeLists.txt b/compiler/foder/CMakeLists.txt
new file mode 100644
index 000000000..6a413c61e
--- /dev/null
+++ b/compiler/foder/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_library(foder INTERFACE)
+target_include_directories(foder INTERFACE include)
diff --git a/compiler/foder/README.md b/compiler/foder/README.md
new file mode 100644
index 000000000..2bb1635a1
--- /dev/null
+++ b/compiler/foder/README.md
@@ -0,0 +1,13 @@
+# foder
+
+_foder_ is a header only library that loads files.
+
+## Example
+
+```cpp
+foder::FileLoader fileloader{input_path};
+
+std::vector<char> data = fileloader.load();
+
+DO_SOMETHING_WITH(data);
+```
diff --git a/compiler/foder/include/foder/FileLoader.h b/compiler/foder/include/foder/FileLoader.h
new file mode 100644
index 000000000..e2143ecf6
--- /dev/null
+++ b/compiler/foder/include/foder/FileLoader.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <vector>
+
+namespace foder
+{
+
+class FileLoader
+{
+private:
+ using DataBuffer = std::vector<char>;
+
+public:
+ explicit FileLoader(const std::string &path) : _path(path) {}
+
+public:
+ FileLoader(const FileLoader &) = delete;
+ FileLoader(FileLoader &&) = delete;
+
+public:
+ DataBuffer load(void) const
+ {
+ std::ifstream file(_path, std::ios::binary | std::ios::in);
+ if (!file.good())
+ {
+ std::string errmsg = "ERROR: Failed to open file: " + _path;
+ throw std::runtime_error(errmsg.c_str());
+ }
+
+ file.unsetf(std::ios::skipws);
+
+ file.seekg(0, std::ios::end);
+ auto fileSize = file.tellg();
+ file.seekg(0, std::ios::beg);
+
+ // reserve capacity
+ DataBuffer data(fileSize);
+
+ // read the data
+ file.read(data.data(), fileSize);
+ if (file.fail())
+ {
+ std::string errmsg = "ERROR: Failed to read file: " + _path;
+ throw std::runtime_error(errmsg.c_str());
+ }
+
+ return data;
+ }
+
+private:
+ const std::string _path;
+};
+
+} // namespace foder
diff --git a/compiler/gen-core/CMakeLists.txt b/compiler/gen-core/CMakeLists.txt
index 3732f493b..3cee4cecf 100644
--- a/compiler/gen-core/CMakeLists.txt
+++ b/compiler/gen-core/CMakeLists.txt
@@ -1,4 +1,4 @@
-find_package(HDF5 COMPONENTS CXX QUIET)
+nnas_find_package(HDF5 QUIET)
if(NOT HDF5_FOUND)
return()
diff --git a/compiler/i5diff/CMakeLists.txt b/compiler/i5diff/CMakeLists.txt
index 321ae49a0..c310a668e 100644
--- a/compiler/i5diff/CMakeLists.txt
+++ b/compiler/i5diff/CMakeLists.txt
@@ -1,4 +1,4 @@
-find_package(HDF5 COMPONENTS CXX QUIET)
+nnas_find_package(HDF5 QUIET)
if(NOT HDF5_FOUND)
return()
diff --git a/compiler/imgdata2hdf5/CMakeLists.txt b/compiler/imgdata2hdf5/CMakeLists.txt
new file mode 100644
index 000000000..e2d9154f5
--- /dev/null
+++ b/compiler/imgdata2hdf5/CMakeLists.txt
@@ -0,0 +1,13 @@
+set(imgdata2hdf5_FILE "imgdata2hdf5.py")
+set(imgdata2hdf5_SRC "${CMAKE_CURRENT_SOURCE_DIR}/${imgdata2hdf5_FILE}")
+set(imgdata2hdf5_BIN "${CMAKE_CURRENT_BINARY_DIR}/${imgdata2hdf5_FILE}")
+
+add_custom_command(OUTPUT ${imgdata2hdf5_BIN}
+ COMMAND ${CMAKE_COMMAND} -E copy "${imgdata2hdf5_SRC}" "${imgdata2hdf5_BIN}"
+ DEPENDS ${imgdata2hdf5_SRC}
+ COMMENT "Generate ${imgdata2hdf5_BIN}"
+ )
+
+add_custom_target(imgdata2hdf5 ALL DEPENDS ${imgdata2hdf5_BIN})
+
+install(FILES ${imgdata2hdf5_BIN} DESTINATION bin)
diff --git a/compiler/imgdata2hdf5/README.md b/compiler/imgdata2hdf5/README.md
new file mode 100644
index 000000000..54743e070
--- /dev/null
+++ b/compiler/imgdata2hdf5/README.md
@@ -0,0 +1,24 @@
+# imgdata2hdf5
+
+_imgdata2hdf5_ is a tool to convert raw image data (assumed to be pre-processed) to an hdf5 file.
+
+## Prerequisite
+- Raw image data pre-processed for the corresponding DNN model
+- List of data to convert (saved in the text file)
+- Python installed with _numpy_ and _h5py_ (See docs/how-to-prepare-virtualenv.txt)
+
+## Example
+```
+python imgdata2hdf5.py \
+> --data_list=tmp/imgdata/datalist.txt
+> --output_path=tmp/imgdata/imgdata.hdf5
+```
+
+## Arguments
+```
+ -h, --help Show this help message and exit
+ -l DATA_LIST, --data_list DATA_LIST
+ Path to the text file which lists the absolute paths of the raw image data files to be converted.
+ -o OUTPUT_PATH, --output_path OUTPUT_PATH
+ Path to the output hdf5 file.
+```
diff --git a/compiler/imgdata2hdf5/imgdata2hdf5.py b/compiler/imgdata2hdf5/imgdata2hdf5.py
new file mode 100755
index 000000000..1ff912a2f
--- /dev/null
+++ b/compiler/imgdata2hdf5/imgdata2hdf5.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+import h5py as h5
+import numpy as np
+import argparse
+import glob
+import os
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "-l",
+ "--data_list",
+ type=str,
+ help=
+ "Path to the text file which lists the absolute paths of the raw image data files to be converted.",
+ required=True)
+parser.add_argument(
+ "-o", "--output_path", type=str, help="Path to the output hdf5 file.", required=True)
+
+args = parser.parse_args()
+data_list = args.data_list
+output_path = args.output_path
+
+# Create h5 file
+h5_file = h5.File(output_path, 'w')
+group = h5_file.create_group("value")
+# We assume the raw input data have the correct type/shape for the corresponding model
+# If this flag is set in the hdf5 file, record-minmax will skip type/shape check
+group.attrs['rawData'] = '1'
+
+if os.path.isfile(data_list) == False:
+ raise SystemExit("No such file. " + data_list)
+
+# Data list
+datalist = []
+with open(data_list, 'r') as f:
+ lines = f.readlines()
+ for line in lines:
+ if line.strip():
+ filename = line.rstrip()
+ if os.path.isfile(filename):
+ datalist.append(filename)
+ else:
+ raise SystemExit("No such file. " + filename)
+
+# Input files
+num_converted = 0
+for imgdata in datalist:
+ with open(imgdata, 'rb') as f:
+ sample = group.create_group(str(num_converted))
+ num_converted += 1
+ filename = os.path.basename(imgdata)
+ sample.attrs['desc'] = filename
+ raw_data = bytearray(f.read())
+ # The target model is DNN for handling an input image
+ sample.create_dataset('0', data=raw_data)
+
+h5_file.close()
+
+print("Raw image data have been packaged to " + output_path)
+print("Number of packaged data: " + str(num_converted))
diff --git a/compiler/loco/include/loco/IR/CastHelpers.h b/compiler/loco/include/loco/IR/CastHelpers.h
new file mode 100644
index 000000000..0dcd92df6
--- /dev/null
+++ b/compiler/loco/include/loco/IR/CastHelpers.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOCO_IR_CAST_HELPERS_H__
+#define __LOCO_IR_CAST_HELPERS_H__
+
+#include <string>
+#include <stdexcept>
+#include <typeinfo>
+
+namespace loco
+{
+
+// TODO move to somewhere appropriate
+template <typename T, typename ARG> T _must_cast(ARG arg)
+{
+ auto cast_arg = dynamic_cast<T>(arg);
+ if (cast_arg == nullptr)
+ {
+ std::string msg = "loco::must_cast() failed to cast: ";
+ msg += typeid(T).name();
+ throw std::invalid_argument(msg.c_str());
+ }
+ return cast_arg;
+}
+
+} // namespace loco
+
+#endif // __LOCO_IR_CAST_HELPERS_H__
diff --git a/compiler/loco/include/loco/IR/DataTypeTraits.h b/compiler/loco/include/loco/IR/DataTypeTraits.h
index c4479e545..c186300de 100644
--- a/compiler/loco/include/loco/IR/DataTypeTraits.h
+++ b/compiler/loco/include/loco/IR/DataTypeTraits.h
@@ -46,18 +46,43 @@ template <> struct DataTypeImpl<DataType::U8>
using Type = uint8_t;
};
+template <> struct DataTypeImpl<DataType::S16>
+{
+ // Use C++ int16_t type for 16bit integer
+ using Type = int16_t;
+};
+
template <> struct DataTypeImpl<DataType::S32>
{
// Use C++ int32_t type for 32bit integer
using Type = int32_t;
};
+template <> struct DataTypeImpl<DataType::U32>
+{
+ // Use C++ uint32_t type for unsigned 32bit integer
+ using Type = uint32_t;
+};
+
+template <> struct DataTypeImpl<DataType::S64>
+{
+ // Use C++ int64_t type for 64bit integer
+ using Type = int64_t;
+};
+
template <> struct DataTypeImpl<DataType::FLOAT32>
{
// Use C++ float type for IEEE 32-bit floating-point numbers
using Type = float;
};
+// NOTE DataTypeImpl for BOOL is subject to change
+template <> struct DataTypeImpl<DataType::BOOL>
+{
+ // Use C++ uint8_t type for bool
+ using Type = uint8_t;
+};
+
/**
* @brief Returns the size of the data type.
* @note If you need the size at compile time, use `sizeof(typename DataTypeImpl<DT>::Type)`.
@@ -70,10 +95,18 @@ inline uint32_t size(DataType data_type)
return sizeof(DataTypeImpl<DataType::S8>::Type);
case DataType::U8:
return sizeof(DataTypeImpl<DataType::U8>::Type);
+ case DataType::S16:
+ return sizeof(DataTypeImpl<DataType::S16>::Type);
case DataType::S32:
return sizeof(DataTypeImpl<DataType::S32>::Type);
+ case DataType::U32:
+ return sizeof(DataTypeImpl<DataType::U32>::Type);
+ case DataType::S64:
+ return sizeof(DataTypeImpl<DataType::S64>::Type);
case DataType::FLOAT32:
return sizeof(DataTypeImpl<DataType::FLOAT32>::Type);
+ case DataType::BOOL:
+ return sizeof(DataTypeImpl<DataType::BOOL>::Type);
default:
// TODO Support remaining data types.
assert(false);
diff --git a/compiler/loco/include/loco/IR/Dimension.h b/compiler/loco/include/loco/IR/Dimension.h
index 7b5d5943f..a939f1a30 100644
--- a/compiler/loco/include/loco/IR/Dimension.h
+++ b/compiler/loco/include/loco/IR/Dimension.h
@@ -36,18 +36,18 @@ private:
};
public:
- // @brief Construct an "unknown" dimension
+ /// @brief Construct an "unknown" dimension
Dimension() = default;
- // @brief Construct a "known" dimension
+ /// @brief Construct a "known" dimension
Dimension(uint32_t value) { set(value); }
public:
- // @brief Return whether the value is known (or not)
+ /// @brief Return whether the value is known (or not)
bool known(void) const { return _kind == Kind::Known; }
- // @brief Return the value
- // @note This value is meaningful only for known dimension
+ /// @brief Return the value
+ /// @note This value is meaningful only for known dimension
uint32_t value(void) const { return _value; }
void set(uint32_t value)
@@ -77,7 +77,7 @@ bool operator==(const Dimension &, const Dimension &);
bool operator==(const Dimension &, uint32_t);
bool operator==(uint32_t, const Dimension &);
-// @brief Make an "unknown" dimension
+/// @brief Make an "unknown" dimension
Dimension make_dimension(void);
} // namespace loco
diff --git a/compiler/loco/include/loco/IR/FeatureCodec.h b/compiler/loco/include/loco/IR/FeatureCodec.h
index 93094e13a..c5c465625 100644
--- a/compiler/loco/include/loco/IR/FeatureCodec.h
+++ b/compiler/loco/include/loco/IR/FeatureCodec.h
@@ -23,6 +23,8 @@
#include "loco/IR/TensorShape.h"
#include "loco/IR/TensorIndex.h"
+#include "loco/IR/CastHelpers.h"
+
#include <memory>
namespace loco
@@ -72,6 +74,29 @@ struct FeatureDecoder
virtual std::unique_ptr<FeatureDecoder> clone(void) const = 0;
};
+/**
+ * @brief A helper dynamic_cast that throws when failed
+ */
+template <typename T> T must_cast(FeatureEncoder *node)
+{
+ return _must_cast<T, FeatureEncoder *>(node);
+}
+
+template <typename T> T must_cast(const FeatureEncoder *node)
+{
+ return _must_cast<T, const FeatureEncoder *>(node);
+}
+
+template <typename T> T must_cast(FeatureDecoder *node)
+{
+ return _must_cast<T, FeatureDecoder *>(node);
+}
+
+template <typename T> T must_cast(const FeatureDecoder *node)
+{
+ return _must_cast<T, const FeatureDecoder *>(node);
+}
+
} // namespace loco
#endif // __LOCO_IR_FEATURE_CODEC_H__
diff --git a/compiler/loco/include/loco/IR/FilterCodec.h b/compiler/loco/include/loco/IR/FilterCodec.h
index 3ff548d6d..cf13deed3 100644
--- a/compiler/loco/include/loco/IR/FilterCodec.h
+++ b/compiler/loco/include/loco/IR/FilterCodec.h
@@ -23,6 +23,8 @@
#include "loco/IR/TensorShape.h"
#include "loco/IR/TensorIndex.h"
+#include "loco/IR/CastHelpers.h"
+
namespace loco
{
@@ -56,6 +58,21 @@ struct FilterDecoder
virtual FilterIndex value(const TensorIndex &index) const = 0;
};
+/**
+ * @brief A helper dynamic_cast that throws when failed
+ */
+template <typename T> T must_cast(FilterEncoder *node)
+{
+ return _must_cast<T, FilterEncoder *>(node);
+}
+
+template <typename T> T must_cast(const FilterEncoder *node)
+{
+ return _must_cast<T, const FilterEncoder *>(node);
+}
+
+// TODO add must_cast for FilterDecoder
+
} // namespace loco
#endif // __LOCO_IR_FILTER_CODEC_H__
diff --git a/compiler/loco/include/loco/IR/Node.h b/compiler/loco/include/loco/IR/Node.h
index ef0bf238d..28689b765 100644
--- a/compiler/loco/include/loco/IR/Node.h
+++ b/compiler/loco/include/loco/IR/Node.h
@@ -23,6 +23,7 @@
#include "loco/IR/Dialect.h"
#include "loco/IR/NodePool.forward.h"
#include "loco/IR/Graph.forward.h"
+#include "loco/IR/CastHelpers.h"
#include <array>
#include <memory>
@@ -142,6 +143,13 @@ private:
Subst<SubstQualifier::Default> replace(Node *node);
+/**
+ * @brief A helper dynamic_cast that throws when failed
+ */
+template <typename T> T must_cast(Node *node) { return _must_cast<T, Node *>(node); }
+
+template <typename T> T must_cast(const Node *node) { return _must_cast<T, const Node *>(node); }
+
} // namespace loco
#endif // __LOCO_IR_NODE_H__
diff --git a/compiler/loco/include/loco/IR/Nodes.h b/compiler/loco/include/loco/IR/Nodes.h
index 9aac48b6e..fecfad28d 100644
--- a/compiler/loco/include/loco/IR/Nodes.h
+++ b/compiler/loco/include/loco/IR/Nodes.h
@@ -632,7 +632,7 @@ public:
private:
TensorAxisSet _axes;
- ReduceFunc _func;
+ ReduceFunc _func{ReduceFunc::Mean};
};
/**
diff --git a/compiler/loco/include/loco/IR/TensorShape.h b/compiler/loco/include/loco/IR/TensorShape.h
index af1066d52..44eca6798 100644
--- a/compiler/loco/include/loco/IR/TensorShape.h
+++ b/compiler/loco/include/loco/IR/TensorShape.h
@@ -57,6 +57,11 @@ private:
*/
uint32_t element_count(const loco::TensorShape *tensor_shape);
+/**
+ * @brief '==' operator for TensorShape
+ */
+bool operator==(const TensorShape &lhs, const TensorShape &rhs);
+
} // namespace loco
#endif // __LOCO_IR_TENSOR_SHAPE_H__
diff --git a/compiler/loco/src/ADT/AnnotatedItem.test.cpp b/compiler/loco/src/ADT/AnnotatedItem.test.cpp
index 42113ff7b..45ca87d75 100644
--- a/compiler/loco/src/ADT/AnnotatedItem.test.cpp
+++ b/compiler/loco/src/ADT/AnnotatedItem.test.cpp
@@ -41,15 +41,15 @@ TEST(AnnotatedItemTest, annotation)
{
loco::AnnotatedItem<::Annotation> item;
- ASSERT_EQ(item.annot<DerivedAnnotation<0>>(), nullptr);
+ ASSERT_EQ(nullptr, item.annot<DerivedAnnotation<0>>());
item.annot(DerivedAnnotation<0>::make());
ASSERT_NE(item.annot<DerivedAnnotation<0>>(), nullptr);
- ASSERT_EQ(item.annot<DerivedAnnotation<1>>(), nullptr);
+ ASSERT_EQ(nullptr, item.annot<DerivedAnnotation<1>>());
item.annot<DerivedAnnotation<0>>(nullptr);
- ASSERT_EQ(item.annot<DerivedAnnotation<0>>(), nullptr);
+ ASSERT_EQ(nullptr, item.annot<DerivedAnnotation<0>>());
// Below check guarantees that "annot<T>(nullptr)" is allowed even when there is no annotation.
// This guarantee allows us to simplify code for some cases.
diff --git a/compiler/loco/src/IR/Algorithm.test.cpp b/compiler/loco/src/IR/Algorithm.test.cpp
index f0a3585c0..c60ae1434 100644
--- a/compiler/loco/src/IR/Algorithm.test.cpp
+++ b/compiler/loco/src/IR/Algorithm.test.cpp
@@ -50,9 +50,9 @@ TEST(AlgorithmTest, postorder_traversal)
auto seq = loco::postorder_traversal({push});
- ASSERT_EQ(seq.size(), 2);
- ASSERT_EQ(seq.at(0), pull_1);
- ASSERT_EQ(seq.at(1), push);
+ ASSERT_EQ(2, seq.size());
+ ASSERT_EQ(pull_1, seq.at(0));
+ ASSERT_EQ(push, seq.at(1));
}
TEST(AlgorithmTest, postorder_traversal_visit_once)
@@ -74,7 +74,7 @@ TEST(AlgorithmTest, postorder_traversal_visit_once)
auto seq = loco::postorder_traversal({push_1, push_2});
- ASSERT_EQ(seq.size(), 3);
+ ASSERT_EQ(3, seq.size());
ASSERT_TRUE(contains(seq, pull));
ASSERT_TRUE(contains(seq, push_1));
ASSERT_TRUE(contains(seq, push_2));
@@ -97,9 +97,9 @@ TEST(AlgorithmTest, postorder_traversal_incomplte_graph)
auto seq = loco::postorder_traversal({concat});
- ASSERT_EQ(seq.size(), 2);
- ASSERT_EQ(seq.at(0), pull);
- ASSERT_EQ(seq.at(1), concat);
+ ASSERT_EQ(2, seq.size());
+ ASSERT_EQ(pull, seq.at(0));
+ ASSERT_EQ(concat, seq.at(1));
}
TEST(AlgorithmTest, active_nodes)
@@ -116,7 +116,7 @@ TEST(AlgorithmTest, active_nodes)
auto s = loco::active_nodes({push});
- ASSERT_EQ(s.size(), 2);
+ ASSERT_EQ(2, s.size());
ASSERT_TRUE(contains(s, pull));
ASSERT_TRUE(contains(s, push));
}
diff --git a/compiler/loco/src/IR/CanonicalDialect.test.cpp b/compiler/loco/src/IR/CanonicalDialect.test.cpp
index 96b48218d..e479aeda2 100644
--- a/compiler/loco/src/IR/CanonicalDialect.test.cpp
+++ b/compiler/loco/src/IR/CanonicalDialect.test.cpp
@@ -25,5 +25,5 @@ TEST(CanonicalDialectTest, get)
// get() SHOULD return a valid(non-null) pointer
ASSERT_NE(d, nullptr);
// The return value SHOULD be stable across multiple invocations
- ASSERT_EQ(d, loco::CanonicalDialect::get());
+ ASSERT_EQ(loco::CanonicalDialect::get(), d);
}
diff --git a/compiler/loco/src/IR/CanonicalNode.test.cpp b/compiler/loco/src/IR/CanonicalNode.test.cpp
index cb61b5e83..dddb1588d 100644
--- a/compiler/loco/src/IR/CanonicalNode.test.cpp
+++ b/compiler/loco/src/IR/CanonicalNode.test.cpp
@@ -34,8 +34,8 @@ TEST(CanonicalNodeTest, visitor_with_user_default_impl)
MyVisitor v;
- ASSERT_EQ(forward.accept(&v), 128);
- ASSERT_EQ(constgen.accept(&v), 256);
+ ASSERT_EQ(128, forward.accept(&v));
+ ASSERT_EQ(256, constgen.accept(&v));
}
TEST(CanonicalNodeTest, visitor)
@@ -50,7 +50,7 @@ TEST(CanonicalNodeTest, visitor)
CountingVisitor v;
- ASSERT_EQ(node.accept(&v), 1);
+ ASSERT_EQ(1, node.accept(&v));
}
TEST(CanonicalNodeTest, mutable_visitor)
@@ -68,5 +68,5 @@ TEST(CanonicalNodeTest, mutable_visitor)
ResetForward v;
forward_node.accept(&v);
- ASSERT_EQ(forward_node.input(), nullptr);
+ ASSERT_EQ(nullptr, forward_node.input());
}
diff --git a/compiler/loco/src/IR/DataTypeTraits.test.cpp b/compiler/loco/src/IR/DataTypeTraits.test.cpp
index 76d2515a9..d33d99494 100644
--- a/compiler/loco/src/IR/DataTypeTraits.test.cpp
+++ b/compiler/loco/src/IR/DataTypeTraits.test.cpp
@@ -25,5 +25,5 @@ TEST(DataTypeTraitsTest, FLOAT32)
auto obtained = std::type_index(typeid(loco::DataTypeImpl<loco::DataType::FLOAT32>::Type));
auto expected = std::type_index(typeid(float));
- ASSERT_EQ(obtained, expected);
+ ASSERT_EQ(expected, obtained);
}
diff --git a/compiler/loco/src/IR/DepthwiseFilterIndex.test.cpp b/compiler/loco/src/IR/DepthwiseFilterIndex.test.cpp
index 202647cfc..f62c067fa 100644
--- a/compiler/loco/src/IR/DepthwiseFilterIndex.test.cpp
+++ b/compiler/loco/src/IR/DepthwiseFilterIndex.test.cpp
@@ -23,10 +23,10 @@ TEST(DepthwiseFilterIndexTest, default_constructor)
loco::DepthwiseFilterIndex index;
// All the values are 0 at the beginning
- ASSERT_EQ(index.channel(), 0);
- ASSERT_EQ(index.nth(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(0, index.channel());
+ ASSERT_EQ(0, index.nth());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
}
TEST(DepthwiseFilterIndexTest, settet_and_getter)
@@ -36,32 +36,32 @@ TEST(DepthwiseFilterIndexTest, settet_and_getter)
// Set depth
index.channel() = 2;
- ASSERT_EQ(index.channel(), 2);
- ASSERT_EQ(index.nth(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.channel());
+ ASSERT_EQ(0, index.nth());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set multiplier
index.nth() = 3;
- ASSERT_EQ(index.channel(), 2);
- ASSERT_EQ(index.nth(), 3);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.channel());
+ ASSERT_EQ(3, index.nth());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set height
index.row() = 4;
- ASSERT_EQ(index.channel(), 2);
- ASSERT_EQ(index.nth(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.channel());
+ ASSERT_EQ(3, index.nth());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(0, index.column());
// Set width
index.column() = 5;
- ASSERT_EQ(index.channel(), 2);
- ASSERT_EQ(index.nth(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 5);
+ ASSERT_EQ(2, index.channel());
+ ASSERT_EQ(3, index.nth());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(5, index.column());
}
diff --git a/compiler/loco/src/IR/DepthwiseFilterShape.test.cpp b/compiler/loco/src/IR/DepthwiseFilterShape.test.cpp
index 2b9518c1f..56bad5e62 100644
--- a/compiler/loco/src/IR/DepthwiseFilterShape.test.cpp
+++ b/compiler/loco/src/IR/DepthwiseFilterShape.test.cpp
@@ -40,7 +40,7 @@ TEST(DepthwiseFilterShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.depth(), 2);
+ ASSERT_EQ(2, shape.depth());
// Set multiplier
shape.multiplier() = 3;
@@ -50,8 +50,8 @@ TEST(DepthwiseFilterShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.depth(), 2);
- ASSERT_EQ(shape.multiplier(), 3);
+ ASSERT_EQ(2, shape.depth());
+ ASSERT_EQ(3, shape.multiplier());
// Set height
shape.height() = 4;
@@ -61,9 +61,9 @@ TEST(DepthwiseFilterShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.depth(), 2);
- ASSERT_EQ(shape.multiplier(), 3);
- ASSERT_EQ(shape.height(), 4);
+ ASSERT_EQ(2, shape.depth());
+ ASSERT_EQ(3, shape.multiplier());
+ ASSERT_EQ(4, shape.height());
// Set width
shape.width() = 5;
@@ -73,8 +73,8 @@ TEST(DepthwiseFilterShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_TRUE(shape.width().known());
- ASSERT_EQ(shape.depth(), 2);
- ASSERT_EQ(shape.multiplier(), 3);
- ASSERT_EQ(shape.height(), 4);
- ASSERT_EQ(shape.width(), 5);
+ ASSERT_EQ(2, shape.depth());
+ ASSERT_EQ(3, shape.multiplier());
+ ASSERT_EQ(4, shape.height());
+ ASSERT_EQ(5, shape.width());
}
diff --git a/compiler/loco/src/IR/Dialect.test.cpp b/compiler/loco/src/IR/Dialect.test.cpp
index 312bb52ef..3af303375 100644
--- a/compiler/loco/src/IR/Dialect.test.cpp
+++ b/compiler/loco/src/IR/Dialect.test.cpp
@@ -36,6 +36,6 @@ TEST(DialectTest, service)
MockDialect dialect;
- ASSERT_EQ(dialect.service<S0>(), nullptr);
+ ASSERT_EQ(nullptr, dialect.service<S0>());
ASSERT_NE(dialect.service<S1>(), nullptr);
}
diff --git a/compiler/loco/src/IR/Dimension.test.cpp b/compiler/loco/src/IR/Dimension.test.cpp
index 4faf78ac8..aa3a8d6aa 100644
--- a/compiler/loco/src/IR/Dimension.test.cpp
+++ b/compiler/loco/src/IR/Dimension.test.cpp
@@ -44,7 +44,7 @@ TEST_F(DimensionTest, value_constructor)
loco::Dimension dim{value()};
ASSERT_TRUE(dim.known());
- ASSERT_EQ(dim.value(), value());
+ ASSERT_EQ(value(), dim.value());
}
TEST_F(DimensionTest, set)
@@ -54,7 +54,7 @@ TEST_F(DimensionTest, set)
dim.set(value());
ASSERT_TRUE(dim.known());
- ASSERT_EQ(dim.value(), value());
+ ASSERT_EQ(value(), dim.value());
}
TEST_F(DimensionTest, unset)
diff --git a/compiler/loco/src/IR/FeatureIndex.test.cpp b/compiler/loco/src/IR/FeatureIndex.test.cpp
index 82b563986..0ed6b4f55 100644
--- a/compiler/loco/src/IR/FeatureIndex.test.cpp
+++ b/compiler/loco/src/IR/FeatureIndex.test.cpp
@@ -23,10 +23,10 @@ TEST(FeatureIndexTest, default_constructor)
loco::FeatureIndex index;
// All the values are 0 at the beginning
- ASSERT_EQ(index.batch(), 0);
- ASSERT_EQ(index.channel(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(0, index.batch());
+ ASSERT_EQ(0, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
}
TEST(FeatureIndexTest, settet_and_getter)
@@ -36,32 +36,32 @@ TEST(FeatureIndexTest, settet_and_getter)
// Set count
index.batch() = 2;
- ASSERT_EQ(index.batch(), 2);
- ASSERT_EQ(index.channel(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.batch());
+ ASSERT_EQ(0, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set channel
index.channel() = 3;
- ASSERT_EQ(index.batch(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.batch());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set height
index.row() = 4;
- ASSERT_EQ(index.batch(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.batch());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(0, index.column());
// Set width
index.column() = 5;
- ASSERT_EQ(index.batch(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 5);
+ ASSERT_EQ(2, index.batch());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(5, index.column());
}
diff --git a/compiler/loco/src/IR/FeatureShape.test.cpp b/compiler/loco/src/IR/FeatureShape.test.cpp
index 59e25ac23..b0414bec8 100644
--- a/compiler/loco/src/IR/FeatureShape.test.cpp
+++ b/compiler/loco/src/IR/FeatureShape.test.cpp
@@ -40,7 +40,7 @@ TEST(FeatureShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
+ ASSERT_EQ(2, shape.count());
// Set depth
shape.depth() = 3;
@@ -50,8 +50,8 @@ TEST(FeatureShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
// Set height
shape.height() = 4;
@@ -61,9 +61,9 @@ TEST(FeatureShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
- ASSERT_EQ(shape.height(), 4);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
+ ASSERT_EQ(4, shape.height());
// Set width
shape.width() = 5;
@@ -73,8 +73,8 @@ TEST(FeatureShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_TRUE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
- ASSERT_EQ(shape.height(), 4);
- ASSERT_EQ(shape.width(), 5);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
+ ASSERT_EQ(4, shape.height());
+ ASSERT_EQ(5, shape.width());
}
diff --git a/compiler/loco/src/IR/FilterIndex.test.cpp b/compiler/loco/src/IR/FilterIndex.test.cpp
index 58f38718e..4edf62c85 100644
--- a/compiler/loco/src/IR/FilterIndex.test.cpp
+++ b/compiler/loco/src/IR/FilterIndex.test.cpp
@@ -23,10 +23,10 @@ TEST(FilterIndexTest, default_constructor)
loco::FilterIndex index;
// All the values are 0 at the beginning
- ASSERT_EQ(index.nth(), 0);
- ASSERT_EQ(index.channel(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(0, index.nth());
+ ASSERT_EQ(0, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
}
TEST(FilterIndexTest, settet_and_getter)
@@ -36,32 +36,32 @@ TEST(FilterIndexTest, settet_and_getter)
// Set count
index.nth() = 2;
- ASSERT_EQ(index.nth(), 2);
- ASSERT_EQ(index.channel(), 0);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.nth());
+ ASSERT_EQ(0, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set channel
index.channel() = 3;
- ASSERT_EQ(index.nth(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 0);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.nth());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(0, index.row());
+ ASSERT_EQ(0, index.column());
// Set height
index.row() = 4;
- ASSERT_EQ(index.nth(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 0);
+ ASSERT_EQ(2, index.nth());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(0, index.column());
// Set width
index.column() = 5;
- ASSERT_EQ(index.nth(), 2);
- ASSERT_EQ(index.channel(), 3);
- ASSERT_EQ(index.row(), 4);
- ASSERT_EQ(index.column(), 5);
+ ASSERT_EQ(2, index.nth());
+ ASSERT_EQ(3, index.channel());
+ ASSERT_EQ(4, index.row());
+ ASSERT_EQ(5, index.column());
}
diff --git a/compiler/loco/src/IR/FilterShape.test.cpp b/compiler/loco/src/IR/FilterShape.test.cpp
index ccb60ed76..8033027c2 100644
--- a/compiler/loco/src/IR/FilterShape.test.cpp
+++ b/compiler/loco/src/IR/FilterShape.test.cpp
@@ -40,7 +40,7 @@ TEST(FilterShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
+ ASSERT_EQ(2, shape.count());
// Set depth
shape.depth() = 3;
@@ -50,8 +50,8 @@ TEST(FilterShapeTest, settet_and_getter)
ASSERT_FALSE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
// Set height
shape.height() = 4;
@@ -61,9 +61,9 @@ TEST(FilterShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_FALSE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
- ASSERT_EQ(shape.height(), 4);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
+ ASSERT_EQ(4, shape.height());
// Set width
shape.width() = 5;
@@ -73,8 +73,8 @@ TEST(FilterShapeTest, settet_and_getter)
ASSERT_TRUE(shape.height().known());
ASSERT_TRUE(shape.width().known());
- ASSERT_EQ(shape.count(), 2);
- ASSERT_EQ(shape.depth(), 3);
- ASSERT_EQ(shape.height(), 4);
- ASSERT_EQ(shape.width(), 5);
+ ASSERT_EQ(2, shape.count());
+ ASSERT_EQ(3, shape.depth());
+ ASSERT_EQ(4, shape.height());
+ ASSERT_EQ(5, shape.width());
}
diff --git a/compiler/loco/src/IR/Graph.cpp b/compiler/loco/src/IR/Graph.cpp
index 1d8752252..8073d4545 100644
--- a/compiler/loco/src/IR/Graph.cpp
+++ b/compiler/loco/src/IR/Graph.cpp
@@ -37,7 +37,7 @@ std::unique_ptr<loco::TensorShape> make_tensor_shape(std::initializer_list<loco:
assert(axis == dims.size());
}
- return std::move(tensor_shape);
+ return tensor_shape;
}
} // namespace
diff --git a/compiler/loco/src/IR/Graph.test.cpp b/compiler/loco/src/IR/Graph.test.cpp
index 6df630b0f..ad6894f30 100644
--- a/compiler/loco/src/IR/Graph.test.cpp
+++ b/compiler/loco/src/IR/Graph.test.cpp
@@ -21,7 +21,7 @@
namespace
{
-// @brief Mockup class for loco::NamedEntity
+/// @brief Mockup class for loco::NamedEntity
struct NamedElement final : private loco::NamedEntity
{
LOCO_NAMED_ENTITY_EXPOSE;
@@ -33,7 +33,7 @@ TEST(NamedTest, constructor)
{
NamedElement elem;
- ASSERT_EQ(elem.name(), "");
+ ASSERT_EQ("", elem.name());
}
TEST(NamedTest, setter_and_getter)
@@ -41,14 +41,14 @@ TEST(NamedTest, setter_and_getter)
NamedElement elem;
elem.name("name");
- ASSERT_EQ(elem.name(), "name");
+ ASSERT_EQ("name", elem.name());
}
TEST(DataTypedMixinTest, constructor)
{
loco::Mixin<loco::Trait::DataTyped> mixin;
- ASSERT_EQ(mixin.dtype(), loco::DataType::Unknown);
+ ASSERT_EQ(loco::DataType::Unknown, mixin.dtype());
}
TEST(DataTypedMixinTest, setter_and_getter)
@@ -56,7 +56,7 @@ TEST(DataTypedMixinTest, setter_and_getter)
loco::Mixin<loco::Trait::DataTyped> mixin;
mixin.dtype(loco::DataType::FLOAT32);
- ASSERT_EQ(mixin.dtype(), loco::DataType::FLOAT32);
+ ASSERT_EQ(loco::DataType::FLOAT32, mixin.dtype());
}
TEST(TensorShapedMixinTest, setter_and_getter)
@@ -65,11 +65,11 @@ TEST(TensorShapedMixinTest, setter_and_getter)
mixin.shape({1, 2, 3, 4});
ASSERT_NE(mixin.shape(), nullptr);
- ASSERT_EQ(mixin.shape()->rank(), 4);
- ASSERT_EQ(mixin.shape()->dim(0), 1);
- ASSERT_EQ(mixin.shape()->dim(1), 2);
- ASSERT_EQ(mixin.shape()->dim(2), 3);
- ASSERT_EQ(mixin.shape()->dim(3), 4);
+ ASSERT_EQ(4, mixin.shape()->rank());
+ ASSERT_EQ(1, mixin.shape()->dim(0));
+ ASSERT_EQ(2, mixin.shape()->dim(1));
+ ASSERT_EQ(3, mixin.shape()->dim(2));
+ ASSERT_EQ(4, mixin.shape()->dim(3));
}
TEST(GraphTest, create_and_destroy_node)
@@ -89,8 +89,8 @@ TEST(GraphTest, create_input)
auto input = g->inputs()->create();
// TODO Add more checks
- ASSERT_EQ(input->shape(), nullptr);
- ASSERT_EQ(input->index(), 0);
+ ASSERT_EQ(nullptr, input->shape());
+ ASSERT_EQ(0, input->index());
}
TEST(GraphTest, create_output)
@@ -100,8 +100,8 @@ TEST(GraphTest, create_output)
auto output = g->outputs()->create();
// TODO Add more checks
- ASSERT_EQ(output->shape(), nullptr);
- ASSERT_EQ(output->index(), 0);
+ ASSERT_EQ(nullptr, output->shape());
+ ASSERT_EQ(0, output->index());
}
namespace
@@ -132,10 +132,10 @@ TEST(GraphTest, consturctor_with_param_node)
auto test_node = g->nodes()->create<ParamCtorNode>(22, 11.11);
- ASSERT_EQ(test_node->graph(), g.get());
- ASSERT_EQ(const_cast<const ParamCtorNode *>(test_node)->graph(), g.get());
+ ASSERT_EQ(g.get(), test_node->graph());
+ ASSERT_EQ(g.get(), const_cast<const ParamCtorNode *>(test_node)->graph());
- ASSERT_EQ(test_node->i(), 22);
+ ASSERT_EQ(22, test_node->i());
ASSERT_FLOAT_EQ(test_node->f(), 11.11);
ASSERT_NO_THROW(g->nodes()->destroy(test_node));
@@ -170,7 +170,7 @@ TEST(GraphTest, graph_node_enumeration)
// Returns true if "nodes" includes a given node
auto member = [&nodes](loco::Node *node) { return nodes.find(node) != nodes.end(); };
- ASSERT_EQ(nodes.size(), 2);
+ ASSERT_EQ(2, nodes.size());
ASSERT_TRUE(member(pull_1));
ASSERT_TRUE(member(push_1));
}
@@ -197,9 +197,9 @@ TEST(GraphTest, graph_inout_enumeration)
auto output_nodes = loco::output_nodes(g.get());
- ASSERT_EQ(output_nodes.size(), 2);
- ASSERT_EQ(output_nodes.at(0), push_1);
- ASSERT_EQ(output_nodes.at(1), push_3);
+ ASSERT_EQ(2, output_nodes.size());
+ ASSERT_EQ(push_1, output_nodes.at(0));
+ ASSERT_EQ(push_3, output_nodes.at(1));
}
TEST(GraphTest, graph_name)
diff --git a/compiler/loco/src/IR/MockupNode.h b/compiler/loco/src/IR/MockupNode.h
index ec56c90e2..16eaccf36 100644
--- a/compiler/loco/src/IR/MockupNode.h
+++ b/compiler/loco/src/IR/MockupNode.h
@@ -32,7 +32,7 @@ struct MockDialect final : public loco::Dialect
}
};
-// @brief Mockup node for internal testing
+/// @brief Mockup node for internal testing
class MockupNode final : public loco::Node
{
public:
@@ -53,6 +53,21 @@ private:
loco::Use _arg{this};
};
+/// @brief Mockup2Node node for internal testing
+class Mockup2Node final : public loco::Node
+{
+public:
+ Mockup2Node() = default;
+
+public:
+ const loco::Dialect *dialect(void) const final { return MockDialect::get(); }
+ uint32_t opnum(void) const final { return 1; }
+
+ uint32_t arity(void) const final { return 0; }
+ Node *arg(uint32_t) const final { return nullptr; }
+ void drop(void) final {}
+};
+
} // namespace
#endif // __LOCO_IR_MOCKUP_NODE_H__
diff --git a/compiler/loco/src/IR/Node.test.cpp b/compiler/loco/src/IR/Node.test.cpp
index 00e444465..7c3eb3fc9 100644
--- a/compiler/loco/src/IR/Node.test.cpp
+++ b/compiler/loco/src/IR/Node.test.cpp
@@ -29,7 +29,7 @@ TEST(NodeTest, preds)
auto preds = loco::preds(&node);
- ASSERT_EQ(preds.size(), 1);
+ ASSERT_EQ(1, preds.size());
ASSERT_NE(preds.find(&arg), preds.end());
}
@@ -44,7 +44,7 @@ TEST(NodeTest, succs)
auto succs = loco::succs(&node);
- ASSERT_EQ(succs.size(), 2);
+ ASSERT_EQ(2, succs.size());
ASSERT_NE(succs.find(&succ_1), succs.end());
ASSERT_NE(succs.find(&succ_2), succs.end());
}
@@ -63,8 +63,8 @@ TEST(NodeTest, replace_with)
// The following holds at this point
// - node_3 USE node_1
// - node_4 USE node_2
- ASSERT_EQ(node_3.in(), &node_1);
- ASSERT_EQ(node_4.in(), &node_2);
+ ASSERT_EQ(&node_1, node_3.in());
+ ASSERT_EQ(&node_2, node_4.in());
// Replace all the usage of node_1 with node_2
replace(&node_1).with(&node_2);
@@ -72,8 +72,8 @@ TEST(NodeTest, replace_with)
// The following holds at this point
// - node_3 USE node_2
// - node_4 USE node_2
- ASSERT_EQ(node_3.in(), &node_2);
- ASSERT_EQ(node_4.in(), &node_2);
+ ASSERT_EQ(&node_2, node_3.in());
+ ASSERT_EQ(&node_2, node_4.in());
}
TEST(NodeTest, constructor)
@@ -81,7 +81,7 @@ TEST(NodeTest, constructor)
MockupNode node;
// graph() SHOULD return nullptr if node is not constructed through "Graph"
- ASSERT_EQ(node.graph(), nullptr);
+ ASSERT_EQ(nullptr, node.graph());
}
// TODO Rewrite this as a FixedAritry mix-in test
@@ -96,7 +96,23 @@ TEST(FixedArityNodeTest, constructor)
DerivedNode node;
- ASSERT_EQ(node.arity(), 1);
- ASSERT_EQ(node.arg(0), nullptr);
+ ASSERT_EQ(1, node.arity());
+ ASSERT_EQ(nullptr, node.arg(0));
}
#endif
+
+TEST(NodeTest, cast_with_must_NEG)
+{
+ Mockup2Node mockupnode;
+ loco::Node *node = &mockupnode;
+
+ ASSERT_THROW(loco::must_cast<MockupNode *>(node), std::invalid_argument);
+}
+
+TEST(NodeTest, cast_with_const_must_NEG)
+{
+ Mockup2Node mockupnode;
+ const loco::Node *node = &mockupnode;
+
+ ASSERT_THROW(loco::must_cast<const MockupNode *>(node), std::invalid_argument);
+}
diff --git a/compiler/loco/src/IR/NodeShape.test.cpp b/compiler/loco/src/IR/NodeShape.test.cpp
index 4f092e024..8bf7900cc 100644
--- a/compiler/loco/src/IR/NodeShape.test.cpp
+++ b/compiler/loco/src/IR/NodeShape.test.cpp
@@ -22,7 +22,7 @@ TEST(NodeShapeTest, default_constructor)
{
loco::NodeShape node_shape;
- ASSERT_EQ(node_shape.domain(), loco::Domain::Unknown);
+ ASSERT_EQ(loco::Domain::Unknown, node_shape.domain());
}
TEST(NodeShapeTest, bias_shape_constructor)
@@ -33,8 +33,8 @@ TEST(NodeShapeTest, bias_shape_constructor)
loco::NodeShape node_shape{bias_shape};
- ASSERT_EQ(node_shape.domain(), loco::Domain::Bias);
- ASSERT_EQ(node_shape.as<loco::BiasShape>().length(), 4);
+ ASSERT_EQ(loco::Domain::Bias, node_shape.domain());
+ ASSERT_EQ(4, node_shape.as<loco::BiasShape>().length());
}
TEST(NodeShapeTest, dwfilter_shape_constructor)
@@ -48,11 +48,11 @@ TEST(NodeShapeTest, dwfilter_shape_constructor)
loco::NodeShape node_shape{dwfilter_shape};
- ASSERT_EQ(node_shape.domain(), loco::Domain::DepthwiseFilter);
- ASSERT_EQ(node_shape.as<loco::DepthwiseFilterShape>().depth(), 2);
- ASSERT_EQ(node_shape.as<loco::DepthwiseFilterShape>().multiplier(), 3);
- ASSERT_EQ(node_shape.as<loco::DepthwiseFilterShape>().height(), 4);
- ASSERT_EQ(node_shape.as<loco::DepthwiseFilterShape>().width(), 5);
+ ASSERT_EQ(loco::Domain::DepthwiseFilter, node_shape.domain());
+ ASSERT_EQ(2, node_shape.as<loco::DepthwiseFilterShape>().depth());
+ ASSERT_EQ(3, node_shape.as<loco::DepthwiseFilterShape>().multiplier());
+ ASSERT_EQ(4, node_shape.as<loco::DepthwiseFilterShape>().height());
+ ASSERT_EQ(5, node_shape.as<loco::DepthwiseFilterShape>().width());
}
TEST(NodeShapeTest, feature_shape_constructor)
@@ -66,11 +66,11 @@ TEST(NodeShapeTest, feature_shape_constructor)
loco::NodeShape node_shape{feature_shape};
- ASSERT_EQ(node_shape.domain(), loco::Domain::Feature);
- ASSERT_EQ(node_shape.as<loco::FeatureShape>().count(), 2);
- ASSERT_EQ(node_shape.as<loco::FeatureShape>().depth(), 3);
- ASSERT_EQ(node_shape.as<loco::FeatureShape>().height(), 4);
- ASSERT_EQ(node_shape.as<loco::FeatureShape>().width(), 5);
+ ASSERT_EQ(loco::Domain::Feature, node_shape.domain());
+ ASSERT_EQ(2, node_shape.as<loco::FeatureShape>().count());
+ ASSERT_EQ(3, node_shape.as<loco::FeatureShape>().depth());
+ ASSERT_EQ(4, node_shape.as<loco::FeatureShape>().height());
+ ASSERT_EQ(5, node_shape.as<loco::FeatureShape>().width());
}
TEST(NodeShapeTest, filter_shape_constructor)
@@ -84,11 +84,11 @@ TEST(NodeShapeTest, filter_shape_constructor)
loco::NodeShape node_shape{filter_shape};
- ASSERT_EQ(node_shape.domain(), loco::Domain::Filter);
- ASSERT_EQ(node_shape.as<loco::FilterShape>().count(), 2);
- ASSERT_EQ(node_shape.as<loco::FilterShape>().depth(), 3);
- ASSERT_EQ(node_shape.as<loco::FilterShape>().height(), 4);
- ASSERT_EQ(node_shape.as<loco::FilterShape>().width(), 5);
+ ASSERT_EQ(loco::Domain::Filter, node_shape.domain());
+ ASSERT_EQ(2, node_shape.as<loco::FilterShape>().count());
+ ASSERT_EQ(3, node_shape.as<loco::FilterShape>().depth());
+ ASSERT_EQ(4, node_shape.as<loco::FilterShape>().height());
+ ASSERT_EQ(5, node_shape.as<loco::FilterShape>().width());
}
TEST(NodeShapeTest, tensor_shape_constructor)
@@ -101,10 +101,10 @@ TEST(NodeShapeTest, tensor_shape_constructor)
loco::NodeShape node_shape{tensor_shape};
- ASSERT_EQ(node_shape.domain(), loco::Domain::Tensor);
- ASSERT_EQ(node_shape.as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(node_shape.as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(node_shape.as<loco::TensorShape>().dim(1), 5);
+ ASSERT_EQ(loco::Domain::Tensor, node_shape.domain());
+ ASSERT_EQ(2, node_shape.as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, node_shape.as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(5, node_shape.as<loco::TensorShape>().dim(1));
}
TEST(NodeShapeTest, copy_constructible)
@@ -118,8 +118,8 @@ TEST(NodeShapeTest, copy_constructible)
loco::NodeShape orig{tensor_shape};
loco::NodeShape copy{orig}; // Call Copy Constructor
- ASSERT_EQ(copy.domain(), loco::Domain::Tensor);
- ASSERT_EQ(copy.as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(copy.as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(copy.as<loco::TensorShape>().dim(1), 5);
+ ASSERT_EQ(loco::Domain::Tensor, copy.domain());
+ ASSERT_EQ(2, copy.as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, copy.as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(5, copy.as<loco::TensorShape>().dim(1));
}
diff --git a/compiler/loco/src/IR/Nodes.test.cpp b/compiler/loco/src/IR/Nodes.test.cpp
index cd51f46c0..0b2210357 100644
--- a/compiler/loco/src/IR/Nodes.test.cpp
+++ b/compiler/loco/src/IR/Nodes.test.cpp
@@ -23,8 +23,8 @@ TEST(PushTest, constructor)
{
loco::Push push_node;
- ASSERT_EQ(push_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(push_node.opcode(), loco::CanonicalOpcode::Push);
+ ASSERT_EQ(loco::CanonicalDialect::get(), push_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::Push, push_node.opcode());
ASSERT_FALSE(push_node.indexed());
}
@@ -37,24 +37,24 @@ TEST(PushTest, shape)
push_node.shape({dims[0], dims[1], dims[2], dims[3]});
- ASSERT_EQ(push_node.rank(), dims.size());
- ASSERT_EQ(push_node.dim(0), dims[0]);
- ASSERT_EQ(push_node.dim(1), dims[1]);
- ASSERT_EQ(push_node.dim(2), dims[2]);
- ASSERT_EQ(push_node.dim(3), dims[3]);
+ ASSERT_EQ(dims.size(), push_node.rank());
+ ASSERT_EQ(dims[0], push_node.dim(0));
+ ASSERT_EQ(dims[1], push_node.dim(1));
+ ASSERT_EQ(dims[2], push_node.dim(2));
+ ASSERT_EQ(dims[3], push_node.dim(3));
}
TEST(PullTest, constructor)
{
loco::Pull pull_node;
- ASSERT_EQ(pull_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(pull_node.opcode(), loco::CanonicalOpcode::Pull);
+ ASSERT_EQ(loco::CanonicalDialect::get(), pull_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::Pull, pull_node.opcode());
ASSERT_FALSE(pull_node.indexed());
- ASSERT_EQ(pull_node.dtype(), loco::DataType::Unknown);
- ASSERT_EQ(pull_node.rank(), 0);
+ ASSERT_EQ(loco::DataType::Unknown, pull_node.dtype());
+ ASSERT_EQ(0, pull_node.rank());
}
TEST(PullTest, shape)
@@ -65,58 +65,58 @@ TEST(PullTest, shape)
pull_node.shape({dims[0], dims[1], dims[2], dims[3]});
- ASSERT_EQ(pull_node.rank(), dims.size());
- ASSERT_EQ(pull_node.dim(0), dims[0]);
- ASSERT_EQ(pull_node.dim(1), dims[1]);
- ASSERT_EQ(pull_node.dim(2), dims[2]);
- ASSERT_EQ(pull_node.dim(3), dims[3]);
+ ASSERT_EQ(dims.size(), pull_node.rank());
+ ASSERT_EQ(dims[0], pull_node.dim(0));
+ ASSERT_EQ(dims[1], pull_node.dim(1));
+ ASSERT_EQ(dims[2], pull_node.dim(2));
+ ASSERT_EQ(dims[3], pull_node.dim(3));
}
TEST(ForwardTest, constructor)
{
loco::Forward forward_node;
- ASSERT_EQ(forward_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(forward_node.opcode(), loco::CanonicalOpcode::Forward);
+ ASSERT_EQ(loco::CanonicalDialect::get(), forward_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::Forward, forward_node.opcode());
- ASSERT_EQ(forward_node.input(), nullptr);
+ ASSERT_EQ(nullptr, forward_node.input());
}
TEST(ReLUTest, constructor)
{
loco::ReLU relu_node;
- ASSERT_EQ(relu_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(relu_node.opcode(), loco::CanonicalOpcode::ReLU);
+ ASSERT_EQ(loco::CanonicalDialect::get(), relu_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::ReLU, relu_node.opcode());
- ASSERT_EQ(relu_node.input(), nullptr);
+ ASSERT_EQ(nullptr, relu_node.input());
}
TEST(ReLU6Test, constructor)
{
loco::ReLU6 relu6_node;
- ASSERT_EQ(relu6_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(relu6_node.opcode(), loco::CanonicalOpcode::ReLU6);
+ ASSERT_EQ(loco::CanonicalDialect::get(), relu6_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::ReLU6, relu6_node.opcode());
- ASSERT_EQ(relu6_node.input(), nullptr);
+ ASSERT_EQ(nullptr, relu6_node.input());
}
TEST(ConstGenTest, constructor)
{
loco::ConstGen constgen_node;
- ASSERT_EQ(constgen_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(constgen_node.opcode(), loco::CanonicalOpcode::ConstGen);
+ ASSERT_EQ(loco::CanonicalDialect::get(), constgen_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::ConstGen, constgen_node.opcode());
- ASSERT_EQ(constgen_node.dtype(), loco::DataType::Unknown);
- ASSERT_EQ(constgen_node.rank(), 0);
+ ASSERT_EQ(loco::DataType::Unknown, constgen_node.dtype());
+ ASSERT_EQ(0, constgen_node.rank());
constgen_node.dtype(loco::DataType::FLOAT32);
- ASSERT_EQ(constgen_node.dtype(), loco::DataType::FLOAT32);
+ ASSERT_EQ(loco::DataType::FLOAT32, constgen_node.dtype());
constgen_node.rank(2);
- ASSERT_EQ(constgen_node.rank(), 2);
+ ASSERT_EQ(2, constgen_node.rank());
constgen_node.dim(0) = 2;
constgen_node.dim(1) = 3;
@@ -124,12 +124,12 @@ TEST(ConstGenTest, constructor)
ASSERT_TRUE(constgen_node.dim(0).known());
ASSERT_TRUE(constgen_node.dim(1).known());
- ASSERT_EQ(constgen_node.dim(0), 2);
- ASSERT_EQ(constgen_node.dim(1), 3);
+ ASSERT_EQ(2, constgen_node.dim(0));
+ ASSERT_EQ(3, constgen_node.dim(1));
constgen_node.size<loco::DataType::FLOAT32>(6);
- ASSERT_EQ(constgen_node.size<loco::DataType::FLOAT32>(), 6);
+ ASSERT_EQ(6, constgen_node.size<loco::DataType::FLOAT32>());
constgen_node.at<loco::DataType::FLOAT32>(0) = 0.0f; // Set 0,0
constgen_node.at<loco::DataType::FLOAT32>(1) = 1.0f; // Set 0,1
@@ -138,26 +138,26 @@ TEST(ConstGenTest, constructor)
constgen_node.at<loco::DataType::FLOAT32>(4) = 4.0f; // Set 1,1
constgen_node.at<loco::DataType::FLOAT32>(5) = 5.0f; // Set 1,2
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(0), 0.0f);
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(1), 1.0f);
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(2), 2.0f);
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(3), 3.0f);
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(4), 4.0f);
- ASSERT_EQ(constgen_node.at<loco::DataType::FLOAT32>(5), 5.0f);
+ ASSERT_EQ(0.0f, constgen_node.at<loco::DataType::FLOAT32>(0));
+ ASSERT_EQ(1.0f, constgen_node.at<loco::DataType::FLOAT32>(1));
+ ASSERT_EQ(2.0f, constgen_node.at<loco::DataType::FLOAT32>(2));
+ ASSERT_EQ(3.0f, constgen_node.at<loco::DataType::FLOAT32>(3));
+ ASSERT_EQ(4.0f, constgen_node.at<loco::DataType::FLOAT32>(4));
+ ASSERT_EQ(5.0f, constgen_node.at<loco::DataType::FLOAT32>(5));
}
TEST(ConstGenTest, constructor_s32)
{
loco::ConstGen constgen_node;
- ASSERT_EQ(constgen_node.dtype(), loco::DataType::Unknown);
- ASSERT_EQ(constgen_node.rank(), 0);
+ ASSERT_EQ(loco::DataType::Unknown, constgen_node.dtype());
+ ASSERT_EQ(0, constgen_node.rank());
constgen_node.dtype(loco::DataType::S32);
- ASSERT_EQ(constgen_node.dtype(), loco::DataType::S32);
+ ASSERT_EQ(loco::DataType::S32, constgen_node.dtype());
constgen_node.rank(2);
- ASSERT_EQ(constgen_node.rank(), 2);
+ ASSERT_EQ(2, constgen_node.rank());
constgen_node.dim(0) = 2;
constgen_node.dim(1) = 3;
@@ -165,12 +165,12 @@ TEST(ConstGenTest, constructor_s32)
ASSERT_TRUE(constgen_node.dim(0).known());
ASSERT_TRUE(constgen_node.dim(1).known());
- ASSERT_EQ(constgen_node.dim(0), 2);
- ASSERT_EQ(constgen_node.dim(1), 3);
+ ASSERT_EQ(2, constgen_node.dim(0));
+ ASSERT_EQ(3, constgen_node.dim(1));
constgen_node.size<loco::DataType::S32>(6);
- ASSERT_EQ(constgen_node.size<loco::DataType::S32>(), 6);
+ ASSERT_EQ(6, constgen_node.size<loco::DataType::S32>());
constgen_node.at<loco::DataType::S32>(0) = 0; // Set 0,0
constgen_node.at<loco::DataType::S32>(1) = 1; // Set 0,1
@@ -179,33 +179,33 @@ TEST(ConstGenTest, constructor_s32)
constgen_node.at<loco::DataType::S32>(4) = -4; // Set 1,1
constgen_node.at<loco::DataType::S32>(5) = -5; // Set 1,2
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(0), 0);
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(1), 1);
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(2), 2);
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(3), -3);
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(4), -4);
- ASSERT_EQ(constgen_node.at<loco::DataType::S32>(5), -5);
+ ASSERT_EQ(0, constgen_node.at<loco::DataType::S32>(0));
+ ASSERT_EQ(1, constgen_node.at<loco::DataType::S32>(1));
+ ASSERT_EQ(2, constgen_node.at<loco::DataType::S32>(2));
+ ASSERT_EQ(-3, constgen_node.at<loco::DataType::S32>(3));
+ ASSERT_EQ(-4, constgen_node.at<loco::DataType::S32>(4));
+ ASSERT_EQ(-5, constgen_node.at<loco::DataType::S32>(5));
}
TEST(MaxPool2DTest, constructor)
{
loco::MaxPool2D maxpool_node;
- ASSERT_EQ(maxpool_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(maxpool_node.opcode(), loco::CanonicalOpcode::MaxPool2D);
+ ASSERT_EQ(loco::CanonicalDialect::get(), maxpool_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::MaxPool2D, maxpool_node.opcode());
- ASSERT_EQ(maxpool_node.ifm(), nullptr);
+ ASSERT_EQ(nullptr, maxpool_node.ifm());
- ASSERT_EQ(maxpool_node.pad()->top(), 0);
- ASSERT_EQ(maxpool_node.pad()->bottom(), 0);
- ASSERT_EQ(maxpool_node.pad()->left(), 0);
- ASSERT_EQ(maxpool_node.pad()->right(), 0);
+ ASSERT_EQ(0, maxpool_node.pad()->top());
+ ASSERT_EQ(0, maxpool_node.pad()->bottom());
+ ASSERT_EQ(0, maxpool_node.pad()->left());
+ ASSERT_EQ(0, maxpool_node.pad()->right());
- ASSERT_EQ(maxpool_node.window()->vertical(), 1);
- ASSERT_EQ(maxpool_node.window()->horizontal(), 1);
+ ASSERT_EQ(1, maxpool_node.window()->vertical());
+ ASSERT_EQ(1, maxpool_node.window()->horizontal());
- ASSERT_EQ(maxpool_node.stride()->vertical(), 1);
- ASSERT_EQ(maxpool_node.stride()->horizontal(), 1);
+ ASSERT_EQ(1, maxpool_node.stride()->vertical());
+ ASSERT_EQ(1, maxpool_node.stride()->horizontal());
}
TEST(MaxPool2DTest, pad)
@@ -218,71 +218,71 @@ TEST(MaxPool2DTest, pad)
loco::MaxPool2D maxpool_node;
maxpool_node.pad()->top(t);
- ASSERT_EQ(maxpool_node.pad()->top(), t);
+ ASSERT_EQ(t, maxpool_node.pad()->top());
maxpool_node.pad()->bottom(b);
- ASSERT_EQ(maxpool_node.pad()->bottom(), b);
+ ASSERT_EQ(b, maxpool_node.pad()->bottom());
maxpool_node.pad()->left(l);
- ASSERT_EQ(maxpool_node.pad()->left(), l);
+ ASSERT_EQ(l, maxpool_node.pad()->left());
maxpool_node.pad()->right(r);
- ASSERT_EQ(maxpool_node.pad()->right(), r);
+ ASSERT_EQ(r, maxpool_node.pad()->right());
}
TEST(AvgPool2DTest, constructor)
{
loco::AvgPool2D avgpool_node;
- ASSERT_EQ(avgpool_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(avgpool_node.opcode(), loco::CanonicalOpcode::AvgPool2D);
+ ASSERT_EQ(loco::CanonicalDialect::get(), avgpool_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::AvgPool2D, avgpool_node.opcode());
- ASSERT_EQ(avgpool_node.ifm(), nullptr);
+ ASSERT_EQ(nullptr, avgpool_node.ifm());
- ASSERT_EQ(avgpool_node.convention(), loco::AvgPool2D::Convention::Unknown);
+ ASSERT_EQ(loco::AvgPool2D::Convention::Unknown, avgpool_node.convention());
- ASSERT_EQ(avgpool_node.pad()->top(), 0);
- ASSERT_EQ(avgpool_node.pad()->bottom(), 0);
- ASSERT_EQ(avgpool_node.pad()->left(), 0);
- ASSERT_EQ(avgpool_node.pad()->right(), 0);
+ ASSERT_EQ(0, avgpool_node.pad()->top());
+ ASSERT_EQ(0, avgpool_node.pad()->bottom());
+ ASSERT_EQ(0, avgpool_node.pad()->left());
+ ASSERT_EQ(0, avgpool_node.pad()->right());
- ASSERT_EQ(avgpool_node.window()->vertical(), 1);
- ASSERT_EQ(avgpool_node.window()->horizontal(), 1);
+ ASSERT_EQ(1, avgpool_node.window()->vertical());
+ ASSERT_EQ(1, avgpool_node.window()->horizontal());
- ASSERT_EQ(avgpool_node.stride()->vertical(), 1);
- ASSERT_EQ(avgpool_node.stride()->horizontal(), 1);
+ ASSERT_EQ(1, avgpool_node.stride()->vertical());
+ ASSERT_EQ(1, avgpool_node.stride()->horizontal());
}
TEST(FeatureEncodeTest, constructor)
{
loco::FeatureEncode feature_encode;
- ASSERT_EQ(feature_encode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(feature_encode.opcode(), loco::CanonicalOpcode::FeatureEncode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), feature_encode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FeatureEncode, feature_encode.opcode());
- ASSERT_EQ(feature_encode.input(), nullptr);
- ASSERT_EQ(feature_encode.encoder(), nullptr);
+ ASSERT_EQ(nullptr, feature_encode.input());
+ ASSERT_EQ(nullptr, feature_encode.encoder());
}
TEST(FeatureDecodeTest, constructor)
{
loco::FeatureDecode feature_decode;
- ASSERT_EQ(feature_decode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(feature_decode.opcode(), loco::CanonicalOpcode::FeatureDecode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), feature_decode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FeatureDecode, feature_decode.opcode());
- ASSERT_EQ(feature_decode.input(), nullptr);
- ASSERT_EQ(feature_decode.decoder(), nullptr);
+ ASSERT_EQ(nullptr, feature_decode.input());
+ ASSERT_EQ(nullptr, feature_decode.decoder());
}
TEST(Reshape_Fixed_Test, constructor)
{
loco::Reshape<loco::ReshapeType::Fixed> reshape;
- ASSERT_EQ(reshape.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(reshape.opcode(), loco::CanonicalOpcode::FixedReshape);
+ ASSERT_EQ(loco::CanonicalDialect::get(), reshape.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FixedReshape, reshape.opcode());
- ASSERT_EQ(reshape.rank(), 0);
+ ASSERT_EQ(0, reshape.rank());
}
TEST(Reshape_Fixed_Test, shape)
@@ -290,153 +290,153 @@ TEST(Reshape_Fixed_Test, shape)
loco::Reshape<loco::ReshapeType::Fixed> reshape;
reshape.shape({2, 3});
- ASSERT_EQ(reshape.rank(), 2);
- ASSERT_EQ(reshape.dim(0), 2);
- ASSERT_EQ(reshape.dim(1), 3);
+ ASSERT_EQ(2, reshape.rank());
+ ASSERT_EQ(2, reshape.dim(0));
+ ASSERT_EQ(3, reshape.dim(1));
}
TEST(FilterEncodeTest, constructor)
{
loco::FilterEncode filter_encode;
- ASSERT_EQ(filter_encode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(filter_encode.opcode(), loco::CanonicalOpcode::FilterEncode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), filter_encode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FilterEncode, filter_encode.opcode());
- ASSERT_EQ(filter_encode.input(), nullptr);
- ASSERT_EQ(filter_encode.encoder(), nullptr);
+ ASSERT_EQ(nullptr, filter_encode.input());
+ ASSERT_EQ(nullptr, filter_encode.encoder());
}
TEST(FilterDecodeTest, constructor)
{
loco::FilterDecode filter_decode;
- ASSERT_EQ(filter_decode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(filter_decode.opcode(), loco::CanonicalOpcode::FilterDecode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), filter_decode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FilterDecode, filter_decode.opcode());
- ASSERT_EQ(filter_decode.input(), nullptr);
- ASSERT_EQ(filter_decode.decoder(), nullptr);
+ ASSERT_EQ(nullptr, filter_decode.input());
+ ASSERT_EQ(nullptr, filter_decode.decoder());
}
TEST(DepthwiseFilterEncodeTest, constructor)
{
loco::DepthwiseFilterEncode dw_filter_encode;
- ASSERT_EQ(dw_filter_encode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(dw_filter_encode.opcode(), loco::CanonicalOpcode::DepthwiseFilterEncode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), dw_filter_encode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::DepthwiseFilterEncode, dw_filter_encode.opcode());
- ASSERT_EQ(dw_filter_encode.input(), nullptr);
- ASSERT_EQ(dw_filter_encode.encoder(), nullptr);
+ ASSERT_EQ(nullptr, dw_filter_encode.input());
+ ASSERT_EQ(nullptr, dw_filter_encode.encoder());
}
TEST(DepthwiseFilterDecodeTest, constructor)
{
loco::DepthwiseFilterDecode dw_filter_decode;
- ASSERT_EQ(dw_filter_decode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(dw_filter_decode.opcode(), loco::CanonicalOpcode::DepthwiseFilterDecode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), dw_filter_decode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::DepthwiseFilterDecode, dw_filter_decode.opcode());
- ASSERT_EQ(dw_filter_decode.input(), nullptr);
- ASSERT_EQ(dw_filter_decode.decoder(), nullptr);
+ ASSERT_EQ(nullptr, dw_filter_decode.input());
+ ASSERT_EQ(nullptr, dw_filter_decode.decoder());
}
TEST(TensorConcatTest, constructor)
{
loco::TensorConcat tensor_concat;
- ASSERT_EQ(tensor_concat.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(tensor_concat.opcode(), loco::CanonicalOpcode::TensorConcat);
+ ASSERT_EQ(loco::CanonicalDialect::get(), tensor_concat.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::TensorConcat, tensor_concat.opcode());
- ASSERT_EQ(tensor_concat.lhs(), nullptr);
- ASSERT_EQ(tensor_concat.rhs(), nullptr);
- ASSERT_EQ(tensor_concat.axis(), 0);
+ ASSERT_EQ(nullptr, tensor_concat.lhs());
+ ASSERT_EQ(nullptr, tensor_concat.rhs());
+ ASSERT_EQ(0, tensor_concat.axis());
tensor_concat.axis(3);
- ASSERT_EQ(tensor_concat.axis(), 3);
+ ASSERT_EQ(3, tensor_concat.axis());
}
TEST(Conv2DTest, constructor)
{
loco::Conv2D conv2d;
- ASSERT_EQ(conv2d.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(conv2d.opcode(), loco::CanonicalOpcode::Conv2D);
+ ASSERT_EQ(loco::CanonicalDialect::get(), conv2d.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::Conv2D, conv2d.opcode());
- ASSERT_EQ(conv2d.ifm(), nullptr);
- ASSERT_EQ(conv2d.ker(), nullptr);
+ ASSERT_EQ(nullptr, conv2d.ifm());
+ ASSERT_EQ(nullptr, conv2d.ker());
ASSERT_NE(conv2d.pad(), nullptr);
- ASSERT_EQ(conv2d.pad()->top(), 0);
- ASSERT_EQ(conv2d.pad()->bottom(), 0);
- ASSERT_EQ(conv2d.pad()->left(), 0);
- ASSERT_EQ(conv2d.pad()->right(), 0);
+ ASSERT_EQ(0, conv2d.pad()->top());
+ ASSERT_EQ(0, conv2d.pad()->bottom());
+ ASSERT_EQ(0, conv2d.pad()->left());
+ ASSERT_EQ(0, conv2d.pad()->right());
ASSERT_NE(conv2d.stride(), nullptr);
- ASSERT_EQ(conv2d.stride()->vertical(), 1);
- ASSERT_EQ(conv2d.stride()->horizontal(), 1);
+ ASSERT_EQ(1, conv2d.stride()->vertical());
+ ASSERT_EQ(1, conv2d.stride()->horizontal());
}
TEST(DepthwiseConv2DTest, constructor)
{
loco::DepthwiseConv2D dw_conv2d;
- ASSERT_EQ(dw_conv2d.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(dw_conv2d.opcode(), loco::CanonicalOpcode::DepthwiseConv2D);
+ ASSERT_EQ(loco::CanonicalDialect::get(), dw_conv2d.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::DepthwiseConv2D, dw_conv2d.opcode());
- ASSERT_EQ(dw_conv2d.ifm(), nullptr);
- ASSERT_EQ(dw_conv2d.ker(), nullptr);
+ ASSERT_EQ(nullptr, dw_conv2d.ifm());
+ ASSERT_EQ(nullptr, dw_conv2d.ker());
ASSERT_NE(dw_conv2d.pad(), nullptr);
- ASSERT_EQ(dw_conv2d.pad()->top(), 0);
- ASSERT_EQ(dw_conv2d.pad()->bottom(), 0);
- ASSERT_EQ(dw_conv2d.pad()->left(), 0);
- ASSERT_EQ(dw_conv2d.pad()->right(), 0);
+ ASSERT_EQ(0, dw_conv2d.pad()->top());
+ ASSERT_EQ(0, dw_conv2d.pad()->bottom());
+ ASSERT_EQ(0, dw_conv2d.pad()->left());
+ ASSERT_EQ(0, dw_conv2d.pad()->right());
ASSERT_NE(dw_conv2d.stride(), nullptr);
- ASSERT_EQ(dw_conv2d.stride()->vertical(), 1);
- ASSERT_EQ(dw_conv2d.stride()->horizontal(), 1);
+ ASSERT_EQ(1, dw_conv2d.stride()->vertical());
+ ASSERT_EQ(1, dw_conv2d.stride()->horizontal());
}
TEST(TransposedConv2DTest, constructor)
{
loco::TransposedConv2D tr_conv2d;
- ASSERT_EQ(tr_conv2d.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(tr_conv2d.opcode(), loco::CanonicalOpcode::TransposedConv2D);
+ ASSERT_EQ(loco::CanonicalDialect::get(), tr_conv2d.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::TransposedConv2D, tr_conv2d.opcode());
- ASSERT_EQ(tr_conv2d.ifm(), nullptr);
- ASSERT_EQ(tr_conv2d.ker(), nullptr);
+ ASSERT_EQ(nullptr, tr_conv2d.ifm());
+ ASSERT_EQ(nullptr, tr_conv2d.ker());
ASSERT_NE(tr_conv2d.pad(), nullptr);
- ASSERT_EQ(tr_conv2d.pad()->top(), 0);
- ASSERT_EQ(tr_conv2d.pad()->bottom(), 0);
- ASSERT_EQ(tr_conv2d.pad()->left(), 0);
- ASSERT_EQ(tr_conv2d.pad()->right(), 0);
+ ASSERT_EQ(0, tr_conv2d.pad()->top());
+ ASSERT_EQ(0, tr_conv2d.pad()->bottom());
+ ASSERT_EQ(0, tr_conv2d.pad()->left());
+ ASSERT_EQ(0, tr_conv2d.pad()->right());
ASSERT_NE(tr_conv2d.stride(), nullptr);
- ASSERT_EQ(tr_conv2d.stride()->vertical(), 1);
- ASSERT_EQ(tr_conv2d.stride()->horizontal(), 1);
+ ASSERT_EQ(1, tr_conv2d.stride()->vertical());
+ ASSERT_EQ(1, tr_conv2d.stride()->horizontal());
}
TEST(BiasEncodeTest, constructor)
{
loco::BiasEncode bias_encode;
- ASSERT_EQ(bias_encode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(bias_encode.opcode(), loco::CanonicalOpcode::BiasEncode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), bias_encode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::BiasEncode, bias_encode.opcode());
- ASSERT_EQ(bias_encode.input(), nullptr);
+ ASSERT_EQ(nullptr, bias_encode.input());
}
TEST(TensorBiasAddTest, constructor)
{
loco::BiasAdd<loco::Domain::Tensor> bias_add;
- ASSERT_EQ(bias_add.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(bias_add.opcode(), loco::CanonicalOpcode::TensorBiasAdd);
+ ASSERT_EQ(loco::CanonicalDialect::get(), bias_add.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::TensorBiasAdd, bias_add.opcode());
- ASSERT_EQ(bias_add.value(), nullptr);
- ASSERT_EQ(bias_add.bias(), nullptr);
- ASSERT_EQ(bias_add.axis(), 0);
+ ASSERT_EQ(nullptr, bias_add.value());
+ ASSERT_EQ(nullptr, bias_add.bias());
+ ASSERT_EQ(0, bias_add.axis());
}
TEST(TensorBiasAddTest, alias)
@@ -450,11 +450,11 @@ TEST(FeatureBiasAddTest, constructor)
{
loco::BiasAdd<loco::Domain::Feature> bias_add;
- ASSERT_EQ(bias_add.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(bias_add.opcode(), loco::CanonicalOpcode::FeatureBiasAdd);
+ ASSERT_EQ(loco::CanonicalDialect::get(), bias_add.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::FeatureBiasAdd, bias_add.opcode());
- ASSERT_EQ(bias_add.value(), nullptr);
- ASSERT_EQ(bias_add.bias(), nullptr);
+ ASSERT_EQ(nullptr, bias_add.value());
+ ASSERT_EQ(nullptr, bias_add.bias());
}
TEST(FeatureBiasAddTest, alias)
@@ -503,74 +503,74 @@ TEST(EltwiseSqrtTest, constructor)
{
loco::EltwiseSqrt sqrt_node;
- ASSERT_EQ(sqrt_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(sqrt_node.opcode(), loco::CanonicalOpcode::EltwiseSqrt);
+ ASSERT_EQ(loco::CanonicalDialect::get(), sqrt_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::EltwiseSqrt, sqrt_node.opcode());
- ASSERT_EQ(sqrt_node.input(), nullptr);
+ ASSERT_EQ(nullptr, sqrt_node.input());
}
TEST(TensorBroadcastTest, constructor)
{
loco::TensorBroadcast tensor_broadcast_node;
- ASSERT_EQ(tensor_broadcast_node.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(tensor_broadcast_node.opcode(), loco::CanonicalOpcode::TensorBroadcast);
+ ASSERT_EQ(loco::CanonicalDialect::get(), tensor_broadcast_node.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::TensorBroadcast, tensor_broadcast_node.opcode());
- ASSERT_EQ(tensor_broadcast_node.input(), nullptr);
+ ASSERT_EQ(nullptr, tensor_broadcast_node.input());
}
TEST(TensorBroadcastTest, mapping)
{
loco::TensorBroadcast tensor_broadcast_node;
- ASSERT_EQ(tensor_broadcast_node.mapping()->defined(0), false);
+ ASSERT_EQ(false, tensor_broadcast_node.mapping()->defined(0));
tensor_broadcast_node.mapping()->dim(0) = 3;
- ASSERT_EQ(tensor_broadcast_node.mapping()->defined(0), true);
- ASSERT_EQ(tensor_broadcast_node.mapping()->dim(0), 3);
+ ASSERT_EQ(true, tensor_broadcast_node.mapping()->defined(0));
+ ASSERT_EQ(3, tensor_broadcast_node.mapping()->dim(0));
}
TEST(MatrixEncodeTest, constructor)
{
loco::MatrixEncode matrix_encode;
- ASSERT_EQ(matrix_encode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(matrix_encode.opcode(), loco::CanonicalOpcode::MatrixEncode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), matrix_encode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::MatrixEncode, matrix_encode.opcode());
- ASSERT_EQ(matrix_encode.input(), nullptr);
+ ASSERT_EQ(nullptr, matrix_encode.input());
}
TEST(MatrixDecodeTest, constructor)
{
loco::MatrixDecode matrix_decode;
- ASSERT_EQ(matrix_decode.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(matrix_decode.opcode(), loco::CanonicalOpcode::MatrixDecode);
+ ASSERT_EQ(loco::CanonicalDialect::get(), matrix_decode.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::MatrixDecode, matrix_decode.opcode());
- ASSERT_EQ(matrix_decode.input(), nullptr);
+ ASSERT_EQ(nullptr, matrix_decode.input());
}
TEST(MatMulTest, constructor)
{
loco::MatMul mat_mul;
- ASSERT_EQ(mat_mul.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(mat_mul.opcode(), loco::CanonicalOpcode::MatMul);
+ ASSERT_EQ(loco::CanonicalDialect::get(), mat_mul.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::MatMul, mat_mul.opcode());
- ASSERT_EQ(mat_mul.lhs(), nullptr);
- ASSERT_EQ(mat_mul.rhs(), nullptr);
+ ASSERT_EQ(nullptr, mat_mul.lhs());
+ ASSERT_EQ(nullptr, mat_mul.rhs());
}
TEST(TransposeTest, constructor)
{
loco::TensorTranspose transpose;
- ASSERT_EQ(transpose.dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(transpose.opcode(), loco::CanonicalOpcode::TensorTranspose);
+ ASSERT_EQ(loco::CanonicalDialect::get(), transpose.dialect());
+ ASSERT_EQ(loco::CanonicalOpcode::TensorTranspose, transpose.opcode());
- ASSERT_EQ(transpose.input(), nullptr);
- ASSERT_EQ(transpose.perm()->size(), 0);
+ ASSERT_EQ(nullptr, transpose.input());
+ ASSERT_EQ(0, transpose.perm()->size());
}
TEST(TransposeTest, perm)
@@ -582,7 +582,7 @@ TEST(TransposeTest, perm)
transpose.perm()->axis(1) = 2;
transpose.perm()->axis(2) = 0;
- ASSERT_EQ(transpose.perm()->axis(0), 1);
- ASSERT_EQ(transpose.perm()->axis(1), 2);
- ASSERT_EQ(transpose.perm()->axis(2), 0);
+ ASSERT_EQ(1, transpose.perm()->axis(0));
+ ASSERT_EQ(2, transpose.perm()->axis(1));
+ ASSERT_EQ(0, transpose.perm()->axis(2));
}
diff --git a/compiler/loco/src/IR/Padding2D.test.cpp b/compiler/loco/src/IR/Padding2D.test.cpp
index 2e3d4af87..b919f26a6 100644
--- a/compiler/loco/src/IR/Padding2D.test.cpp
+++ b/compiler/loco/src/IR/Padding2D.test.cpp
@@ -22,8 +22,8 @@ TEST(PadTest, default_constructor_2D)
{
loco::Padding2D pad;
- ASSERT_EQ(pad.top(), 0);
- ASSERT_EQ(pad.bottom(), 0);
- ASSERT_EQ(pad.left(), 0);
- ASSERT_EQ(pad.right(), 0);
+ ASSERT_EQ(0, pad.top());
+ ASSERT_EQ(0, pad.bottom());
+ ASSERT_EQ(0, pad.left());
+ ASSERT_EQ(0, pad.right());
}
diff --git a/compiler/loco/src/IR/PaddingND.test.cpp b/compiler/loco/src/IR/PaddingND.test.cpp
index 0e20406ff..8c384c027 100644
--- a/compiler/loco/src/IR/PaddingND.test.cpp
+++ b/compiler/loco/src/IR/PaddingND.test.cpp
@@ -26,7 +26,7 @@ TEST(PaddingNDTest, default_constructor_ND)
padding.front(0) = 1;
padding.back(0) = 2;
- ASSERT_EQ(padding.rank(), 1);
- ASSERT_EQ(padding.front(0), 1);
- ASSERT_EQ(padding.back(0), 2);
+ ASSERT_EQ(1, padding.rank());
+ ASSERT_EQ(1, padding.front(0));
+ ASSERT_EQ(2, padding.back(0));
}
diff --git a/compiler/loco/src/IR/PermutingCodec.test.cpp b/compiler/loco/src/IR/PermutingCodec.test.cpp
index 2eff286d0..f8f754110 100644
--- a/compiler/loco/src/IR/PermutingCodec.test.cpp
+++ b/compiler/loco/src/IR/PermutingCodec.test.cpp
@@ -43,10 +43,10 @@ TEST(PemutationTest, feature)
ASSERT_TRUE(perm.mapped(FeatureAxis::Width));
// Check the value
- ASSERT_EQ(perm[FeatureAxis::Count], 5);
- ASSERT_EQ(perm[FeatureAxis::Depth], 6);
- ASSERT_EQ(perm[FeatureAxis::Height], 7);
- ASSERT_EQ(perm[FeatureAxis::Width], 8);
+ ASSERT_EQ(5, perm[FeatureAxis::Count]);
+ ASSERT_EQ(6, perm[FeatureAxis::Depth]);
+ ASSERT_EQ(7, perm[FeatureAxis::Height]);
+ ASSERT_EQ(8, perm[FeatureAxis::Width]);
}
TEST(PemutationTest, filter)
@@ -72,10 +72,10 @@ TEST(PemutationTest, filter)
ASSERT_TRUE(perm.mapped(FilterAxis::Width));
// Check the value
- ASSERT_EQ(perm[FilterAxis::Count], 5);
- ASSERT_EQ(perm[FilterAxis::Depth], 6);
- ASSERT_EQ(perm[FilterAxis::Height], 7);
- ASSERT_EQ(perm[FilterAxis::Width], 8);
+ ASSERT_EQ(5, perm[FilterAxis::Count]);
+ ASSERT_EQ(6, perm[FilterAxis::Depth]);
+ ASSERT_EQ(7, perm[FilterAxis::Height]);
+ ASSERT_EQ(8, perm[FilterAxis::Width]);
}
TEST(PemutationTest, depthwise_filter)
@@ -101,10 +101,10 @@ TEST(PemutationTest, depthwise_filter)
ASSERT_TRUE(perm.mapped(DepthwiseFilterAxis::Width));
// Check the value
- ASSERT_EQ(perm[DepthwiseFilterAxis::Depth], 5);
- ASSERT_EQ(perm[DepthwiseFilterAxis::Multiplier], 6);
- ASSERT_EQ(perm[DepthwiseFilterAxis::Height], 7);
- ASSERT_EQ(perm[DepthwiseFilterAxis::Width], 8);
+ ASSERT_EQ(5, perm[DepthwiseFilterAxis::Depth]);
+ ASSERT_EQ(6, perm[DepthwiseFilterAxis::Multiplier]);
+ ASSERT_EQ(7, perm[DepthwiseFilterAxis::Height]);
+ ASSERT_EQ(8, perm[DepthwiseFilterAxis::Width]);
}
TEST(PermutingEncoderTest, feature)
@@ -147,10 +147,10 @@ TEST(PermutingEncoderTest, feature)
// Get the feature shape corresponding to a given image
auto feature_shape = enc.shape(tensor_shape);
- ASSERT_EQ(feature_shape.count(), 1);
- ASSERT_EQ(feature_shape.depth(), 3);
- ASSERT_EQ(feature_shape.height(), 720);
- ASSERT_EQ(feature_shape.width(), 1280);
+ ASSERT_EQ(1, feature_shape.count());
+ ASSERT_EQ(3, feature_shape.depth());
+ ASSERT_EQ(720, feature_shape.height());
+ ASSERT_EQ(1280, feature_shape.width());
// Let's find a source tensor index!
FeatureIndex feature_index;
@@ -162,10 +162,10 @@ TEST(PermutingEncoderTest, feature)
auto tensor_index = enc.value(feature_index);
- ASSERT_EQ(tensor_index.at(0), 0); // BATCH(COUNT)
- ASSERT_EQ(tensor_index.at(1), 2); // ROW(HEIGHT)
- ASSERT_EQ(tensor_index.at(2), 3); // COLUMN(WIDTH)
- ASSERT_EQ(tensor_index.at(3), 1); // CHANNEL(DEPTH)
+ ASSERT_EQ(0, tensor_index.at(0)); // BATCH(COUNT)
+ ASSERT_EQ(2, tensor_index.at(1)); // ROW(HEIGHT)
+ ASSERT_EQ(3, tensor_index.at(2)); // COLUMN(WIDTH)
+ ASSERT_EQ(1, tensor_index.at(3)); // CHANNEL(DEPTH)
}
TEST(PermutingEncoderTest, feature_clone)
@@ -180,7 +180,7 @@ TEST(PermutingEncoderTest, feature_clone)
src_perm->axis(FeatureAxis::Width) = 2;
auto dst_enc = src_enc.clone();
- auto dst_perm = dynamic_cast<PermutingEncoder<Domain::Feature> *>(dst_enc.get())->perm();
+ auto dst_perm = loco::must_cast<PermutingEncoder<Domain::Feature> *>(dst_enc.get())->perm();
EXPECT_EQ(dst_perm->axis(FeatureAxis::Count), src_perm->axis(FeatureAxis::Count));
EXPECT_EQ(dst_perm->axis(FeatureAxis::Depth), src_perm->axis(FeatureAxis::Depth));
@@ -233,10 +233,10 @@ TEST(PermutingEncoderTest, filter)
// Get the corresponding filter shape
auto filter_shape = enc.shape(tensor_shape);
- ASSERT_EQ(filter_shape.count(), 8);
- ASSERT_EQ(filter_shape.depth(), 4);
- ASSERT_EQ(filter_shape.height(), 1);
- ASSERT_EQ(filter_shape.width(), 7);
+ ASSERT_EQ(8, filter_shape.count());
+ ASSERT_EQ(4, filter_shape.depth());
+ ASSERT_EQ(1, filter_shape.height());
+ ASSERT_EQ(7, filter_shape.width());
// Let's find a source tensor index!
FilterIndex filter_index;
@@ -248,10 +248,10 @@ TEST(PermutingEncoderTest, filter)
auto tensor_index = enc.value(filter_index);
- ASSERT_EQ(tensor_index.at(0), 1); // NTH(COUNT)
- ASSERT_EQ(tensor_index.at(1), 0); // ROW(HEIGHT)
- ASSERT_EQ(tensor_index.at(2), 3); // COLUMN(WIDTH)
- ASSERT_EQ(tensor_index.at(3), 2); // CHANNEL(DEPTH)
+ ASSERT_EQ(1, tensor_index.at(0)); // NTH(COUNT)
+ ASSERT_EQ(0, tensor_index.at(1)); // ROW(HEIGHT)
+ ASSERT_EQ(3, tensor_index.at(2)); // COLUMN(WIDTH)
+ ASSERT_EQ(2, tensor_index.at(3)); // CHANNEL(DEPTH)
}
TEST(PermutingEncoderTest, depthwise_filter)
@@ -293,10 +293,10 @@ TEST(PermutingEncoderTest, depthwise_filter)
// Get the corresponding depthwise filter shape
auto filter_shape = enc.shape(tensor_shape);
- ASSERT_EQ(filter_shape.depth(), 8);
- ASSERT_EQ(filter_shape.multiplier(), 4);
- ASSERT_EQ(filter_shape.height(), 1);
- ASSERT_EQ(filter_shape.width(), 7);
+ ASSERT_EQ(8, filter_shape.depth());
+ ASSERT_EQ(4, filter_shape.multiplier());
+ ASSERT_EQ(1, filter_shape.height());
+ ASSERT_EQ(7, filter_shape.width());
// Let's find a source tensor index!
DepthwiseFilterIndex filter_index;
@@ -308,10 +308,10 @@ TEST(PermutingEncoderTest, depthwise_filter)
auto tensor_index = enc.value(filter_index);
- ASSERT_EQ(tensor_index.at(0), 1); // CHANNEL(DEPTH)
- ASSERT_EQ(tensor_index.at(1), 0); // ROW(HEIGHT)
- ASSERT_EQ(tensor_index.at(2), 3); // COLUMN(WIDTH)
- ASSERT_EQ(tensor_index.at(3), 2); // NTH(MULTIPLIER)
+ ASSERT_EQ(1, tensor_index.at(0)); // CHANNEL(DEPTH)
+ ASSERT_EQ(0, tensor_index.at(1)); // ROW(HEIGHT)
+ ASSERT_EQ(3, tensor_index.at(2)); // COLUMN(WIDTH)
+ ASSERT_EQ(2, tensor_index.at(3)); // NTH(MULTIPLIER)
}
TEST(PermutingEncoderTest, depthwisefilter_init)
@@ -379,11 +379,11 @@ TEST(PermutingDecoderTest, feature)
// Get the tensor shape corresponding to a given image
auto tensor_shape = dec.shape(feature_shape);
- ASSERT_EQ(tensor_shape.rank(), 4);
- ASSERT_EQ(tensor_shape.dim(0), 1); // COUNT
- ASSERT_EQ(tensor_shape.dim(1), 720); // HEIGHT
- ASSERT_EQ(tensor_shape.dim(2), 1280); // WIDTH
- ASSERT_EQ(tensor_shape.dim(3), 3); // DEPTH
+ ASSERT_EQ(4, tensor_shape.rank());
+ ASSERT_EQ(1, tensor_shape.dim(0)); // COUNT
+ ASSERT_EQ(720, tensor_shape.dim(1)); // HEIGHT
+ ASSERT_EQ(1280, tensor_shape.dim(2)); // WIDTH
+ ASSERT_EQ(3, tensor_shape.dim(3)); // DEPTH
// Let's find a source feature index!
TensorIndex tensor_index;
@@ -397,10 +397,10 @@ TEST(PermutingDecoderTest, feature)
auto feature_index = dec.value(tensor_index);
- ASSERT_EQ(feature_index.batch(), 0);
- ASSERT_EQ(feature_index.channel(), 1);
- ASSERT_EQ(feature_index.row(), 2);
- ASSERT_EQ(feature_index.column(), 3);
+ ASSERT_EQ(0, feature_index.batch());
+ ASSERT_EQ(1, feature_index.channel());
+ ASSERT_EQ(2, feature_index.row());
+ ASSERT_EQ(3, feature_index.column());
}
TEST(PermutingDecoderTest, feature_clone)
@@ -415,7 +415,7 @@ TEST(PermutingDecoderTest, feature_clone)
src_perm->axis(FeatureAxis::Width) = 2;
auto dst_enc = src_enc.clone();
- auto dst_perm = dynamic_cast<PermutingDecoder<Domain::Feature> *>(dst_enc.get())->perm();
+ auto dst_perm = loco::must_cast<PermutingDecoder<Domain::Feature> *>(dst_enc.get())->perm();
EXPECT_EQ(dst_perm->axis(FeatureAxis::Count), src_perm->axis(FeatureAxis::Count));
EXPECT_EQ(dst_perm->axis(FeatureAxis::Depth), src_perm->axis(FeatureAxis::Depth));
@@ -468,11 +468,11 @@ TEST(PermutingDecoderTest, filter)
// Get the tensor shape corresponding to a given image
auto tensor_shape = dec.shape(filter_shape);
- ASSERT_EQ(tensor_shape.rank(), 4);
- ASSERT_EQ(tensor_shape.dim(0), 10); // COUNT
- ASSERT_EQ(tensor_shape.dim(1), 6); // HEIGHT
- ASSERT_EQ(tensor_shape.dim(2), 8); // WIDTH
- ASSERT_EQ(tensor_shape.dim(3), 3); // DEPTH
+ ASSERT_EQ(4, tensor_shape.rank());
+ ASSERT_EQ(10, tensor_shape.dim(0)); // COUNT
+ ASSERT_EQ(6, tensor_shape.dim(1)); // HEIGHT
+ ASSERT_EQ(8, tensor_shape.dim(2)); // WIDTH
+ ASSERT_EQ(3, tensor_shape.dim(3)); // DEPTH
// Let's find a source filter index!
TensorIndex tensor_index;
@@ -486,10 +486,10 @@ TEST(PermutingDecoderTest, filter)
auto filter_index = dec.value(tensor_index);
- ASSERT_EQ(filter_index.nth(), 0);
- ASSERT_EQ(filter_index.channel(), 1);
- ASSERT_EQ(filter_index.row(), 2);
- ASSERT_EQ(filter_index.column(), 3);
+ ASSERT_EQ(0, filter_index.nth());
+ ASSERT_EQ(1, filter_index.channel());
+ ASSERT_EQ(2, filter_index.row());
+ ASSERT_EQ(3, filter_index.column());
}
TEST(PermutingDecoderTest, depthwise_filter)
@@ -530,10 +530,10 @@ TEST(PermutingDecoderTest, depthwise_filter)
// Get the corresponding depthwise filter shape
auto tensor_shape = dec.shape(dw_filter_shape);
- ASSERT_EQ(tensor_shape.dim(0).value(), 8);
- ASSERT_EQ(tensor_shape.dim(1).value(), 7);
- ASSERT_EQ(tensor_shape.dim(2).value(), 4);
- ASSERT_EQ(tensor_shape.dim(3).value(), 1);
+ ASSERT_EQ(8, tensor_shape.dim(0).value());
+ ASSERT_EQ(7, tensor_shape.dim(1).value());
+ ASSERT_EQ(4, tensor_shape.dim(2).value());
+ ASSERT_EQ(1, tensor_shape.dim(3).value());
// Let's find a source tensor index!
TensorIndex tensor_index;
@@ -546,8 +546,8 @@ TEST(PermutingDecoderTest, depthwise_filter)
auto dw_filter_index = dec.value(tensor_index);
- ASSERT_EQ(dw_filter_index.channel(), 4);
- ASSERT_EQ(dw_filter_index.nth(), 0);
- ASSERT_EQ(dw_filter_index.row(), 2);
- ASSERT_EQ(dw_filter_index.column(), 1);
+ ASSERT_EQ(4, dw_filter_index.channel());
+ ASSERT_EQ(0, dw_filter_index.nth());
+ ASSERT_EQ(2, dw_filter_index.row());
+ ASSERT_EQ(1, dw_filter_index.column());
}
diff --git a/compiler/loco/src/IR/Stride.test.cpp b/compiler/loco/src/IR/Stride.test.cpp
index 60deb5c6f..9cc88f37b 100644
--- a/compiler/loco/src/IR/Stride.test.cpp
+++ b/compiler/loco/src/IR/Stride.test.cpp
@@ -22,8 +22,8 @@ TEST(StrideTest, default_constructor_2D)
{
loco::Stride<2> stride;
- ASSERT_EQ(stride.vertical(), 1);
- ASSERT_EQ(stride.horizontal(), 1);
+ ASSERT_EQ(1, stride.vertical());
+ ASSERT_EQ(1, stride.horizontal());
}
TEST(StrideTest, setter_and_getter_2D)
@@ -32,11 +32,11 @@ TEST(StrideTest, setter_and_getter_2D)
stride.vertical(2);
- ASSERT_EQ(stride.vertical(), 2);
- ASSERT_EQ(stride.horizontal(), 1);
+ ASSERT_EQ(2, stride.vertical());
+ ASSERT_EQ(1, stride.horizontal());
stride.horizontal(3);
- ASSERT_EQ(stride.vertical(), 2);
- ASSERT_EQ(stride.horizontal(), 3);
+ ASSERT_EQ(2, stride.vertical());
+ ASSERT_EQ(3, stride.horizontal());
}
diff --git a/compiler/loco/src/IR/TensorShape.cpp b/compiler/loco/src/IR/TensorShape.cpp
index ad30dcbc0..e9d4f7e14 100644
--- a/compiler/loco/src/IR/TensorShape.cpp
+++ b/compiler/loco/src/IR/TensorShape.cpp
@@ -37,3 +37,20 @@ uint32_t element_count(const loco::TensorShape *tensor_shape)
}
} // namespace loco
+
+namespace loco
+{
+
+bool operator==(const TensorShape &lhs, const TensorShape &rhs)
+{
+ if (lhs.rank() != rhs.rank())
+ return false;
+ for (uint32_t axis = 0; axis < lhs.rank(); ++axis)
+ {
+ if (!(lhs.dim(axis) == rhs.dim(axis)))
+ return false;
+ }
+ return true;
+}
+
+} // namespace loco
diff --git a/compiler/loco/src/IR/TensorShape.test.cpp b/compiler/loco/src/IR/TensorShape.test.cpp
index ce03ccbd4..ca7af721b 100644
--- a/compiler/loco/src/IR/TensorShape.test.cpp
+++ b/compiler/loco/src/IR/TensorShape.test.cpp
@@ -22,20 +22,20 @@ TEST(TensorShapeTest, default_constructor)
{
loco::TensorShape tensor_shape;
- ASSERT_EQ(tensor_shape.rank(), 0);
+ ASSERT_EQ(0, tensor_shape.rank());
}
TEST(TensorShapeTest, initializer_list_constructor)
{
loco::TensorShape tensor_shape{3, 5};
- ASSERT_EQ(tensor_shape.rank(), 2);
+ ASSERT_EQ(2, tensor_shape.rank());
ASSERT_TRUE(tensor_shape.dim(0).known());
ASSERT_TRUE(tensor_shape.dim(1).known());
- ASSERT_EQ(tensor_shape.dim(0).value(), 3);
- ASSERT_EQ(tensor_shape.dim(1).value(), 5);
+ ASSERT_EQ(3, tensor_shape.dim(0).value());
+ ASSERT_EQ(5, tensor_shape.dim(1).value());
}
TEST(TensorShapeTest, rank)
@@ -44,7 +44,7 @@ TEST(TensorShapeTest, rank)
tensor_shape.rank(2);
- ASSERT_EQ(tensor_shape.rank(), 2);
+ ASSERT_EQ(2, tensor_shape.rank());
ASSERT_FALSE(tensor_shape.dim(0).known());
ASSERT_FALSE(tensor_shape.dim(1).known());
}
@@ -60,7 +60,7 @@ TEST(TensorShapeTest, dim)
ASSERT_TRUE(tensor_shape.dim(0).known());
ASSERT_FALSE(tensor_shape.dim(1).known());
- ASSERT_EQ(tensor_shape.dim(0), 3);
+ ASSERT_EQ(3, tensor_shape.dim(0));
}
TEST(TensorShapeTest, rank_update)
@@ -78,7 +78,7 @@ TEST(TensorShapeTest, rank_update)
ASSERT_FALSE(tensor_shape.dim(2).known());
ASSERT_FALSE(tensor_shape.dim(3).known());
- ASSERT_EQ(tensor_shape.dim(1), 3);
+ ASSERT_EQ(3, tensor_shape.dim(1));
}
TEST(TensorShapeTest, copy)
@@ -92,12 +92,12 @@ TEST(TensorShapeTest, copy)
dst = src;
- ASSERT_EQ(dst.rank(), 2);
+ ASSERT_EQ(2, dst.rank());
ASSERT_FALSE(dst.dim(0).known());
ASSERT_TRUE(dst.dim(1).known());
- ASSERT_EQ(dst.dim(1), 3);
+ ASSERT_EQ(3, dst.dim(1));
}
TEST(TensorShapeTest, element_count)
@@ -105,5 +105,36 @@ TEST(TensorShapeTest, element_count)
// Check Rank-0 case
loco::TensorShape src;
- ASSERT_EQ(loco::element_count(&src), 1);
+ ASSERT_EQ(1, loco::element_count(&src));
+}
+
+TEST(TensorShapeTest, equal_operator)
+{
+ loco::TensorShape lhs, rhs;
+
+ lhs.rank(2);
+ lhs.dim(0) = 1;
+ lhs.dim(1) = 3;
+
+ rhs.rank(1);
+ rhs.dim(0) = 1;
+
+ EXPECT_FALSE(lhs == rhs);
+
+ rhs.rank(2);
+ rhs.dim(0) = 1;
+ rhs.dim(1) = 3;
+
+ EXPECT_TRUE(lhs == rhs);
+
+ // for unknown
+ loco::TensorShape lhs_u, rhs_u;
+
+ lhs_u.rank(2);
+ lhs_u.dim(0) = 1;
+
+ rhs_u.rank(2);
+ rhs_u.dim(0) = 1;
+
+ EXPECT_FALSE(lhs == rhs_u);
}
diff --git a/compiler/loco/src/IR/Use.test.cpp b/compiler/loco/src/IR/Use.test.cpp
index 4a2f1cc25..251e3b416 100644
--- a/compiler/loco/src/IR/Use.test.cpp
+++ b/compiler/loco/src/IR/Use.test.cpp
@@ -25,8 +25,8 @@ TEST(UseTest, constructor)
MockupNode user;
loco::Use use{&user};
- ASSERT_EQ(use.user(), &user);
- ASSERT_EQ(use.node(), nullptr);
+ ASSERT_EQ(&user, use.user());
+ ASSERT_EQ(nullptr, use.node());
}
TEST(UseTest, link_node)
@@ -37,6 +37,6 @@ TEST(UseTest, link_node)
use.node(&def);
- ASSERT_EQ(use.user(), &user);
- ASSERT_EQ(use.node(), &def);
+ ASSERT_EQ(&user, use.user());
+ ASSERT_EQ(&def, use.node());
}
diff --git a/compiler/loco/src/IR/Verifier.test.cpp b/compiler/loco/src/IR/Verifier.test.cpp
index 247a59390..8c40a5058 100644
--- a/compiler/loco/src/IR/Verifier.test.cpp
+++ b/compiler/loco/src/IR/Verifier.test.cpp
@@ -58,7 +58,7 @@ TEST(VerifierTest, valid_error_reporter)
std::vector<ErrorDetail<ErrorCategory::MissingArgument>> errors;
ASSERT_FALSE(loco::valid(g.get(), make_unique<Collector>(&errors)));
- ASSERT_EQ(errors.size(), 1);
- ASSERT_EQ(errors.at(0).node(), push);
- ASSERT_EQ(errors.at(0).index(), 0);
+ ASSERT_EQ(1, errors.size());
+ ASSERT_EQ(push, errors.at(0).node());
+ ASSERT_EQ(0, errors.at(0).index());
}
diff --git a/compiler/loco/src/IR/Window.test.cpp b/compiler/loco/src/IR/Window.test.cpp
index c112e0f96..4cf7183d8 100644
--- a/compiler/loco/src/IR/Window.test.cpp
+++ b/compiler/loco/src/IR/Window.test.cpp
@@ -22,8 +22,8 @@ TEST(WindowTest, default_constructor_2D)
{
loco::Window<2> window;
- ASSERT_EQ(window.vertical(), 1);
- ASSERT_EQ(window.horizontal(), 1);
+ ASSERT_EQ(1, window.vertical());
+ ASSERT_EQ(1, window.horizontal());
}
TEST(WindowTest, setter_and_getter_2D)
@@ -32,11 +32,11 @@ TEST(WindowTest, setter_and_getter_2D)
window.vertical(2);
- ASSERT_EQ(window.vertical(), 2);
- ASSERT_EQ(window.horizontal(), 1);
+ ASSERT_EQ(2, window.vertical());
+ ASSERT_EQ(1, window.horizontal());
window.horizontal(3);
- ASSERT_EQ(window.vertical(), 2);
- ASSERT_EQ(window.horizontal(), 3);
+ ASSERT_EQ(2, window.vertical());
+ ASSERT_EQ(3, window.horizontal());
}
diff --git a/compiler/loco/src/Service/CanonicalShapeInferenceRule.cpp b/compiler/loco/src/Service/CanonicalShapeInferenceRule.cpp
index d30a8279a..6d5adc525 100644
--- a/compiler/loco/src/Service/CanonicalShapeInferenceRule.cpp
+++ b/compiler/loco/src/Service/CanonicalShapeInferenceRule.cpp
@@ -766,7 +766,7 @@ void CanonicalShapeInferenceRule::infer(const Context *ctx, const Node *node, Si
assert(dynamic_cast<const loco::CanonicalNode *>(node) != nullptr);
ForwardShapeInferenceAlgorithm alg{ctx};
- auto shape = dynamic_cast<const loco::CanonicalNode *>(node)->accept(&alg);
+ auto shape = loco::must_cast<const loco::CanonicalNode *>(node)->accept(&alg);
sink->okay(shape);
}
diff --git a/compiler/loco/src/Service/CanonicalShapeInferenceRule.test.cpp b/compiler/loco/src/Service/CanonicalShapeInferenceRule.test.cpp
index 5cc8c3808..e88872b5d 100644
--- a/compiler/loco/src/Service/CanonicalShapeInferenceRule.test.cpp
+++ b/compiler/loco/src/Service/CanonicalShapeInferenceRule.test.cpp
@@ -35,12 +35,12 @@ TEST(CanonicalShapeInferenceRuleTest, minimal)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.push_node));
- ASSERT_EQ(loco::shape_get(testcase.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank(), 4);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0), 1);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1), 2);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(2), 3);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(3), 4);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.push_node).domain());
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(1, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1));
+ ASSERT_EQ(3, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(2));
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(3));
}
TEST(CanonicalShapeInferenceRuleTest, const_gen)
@@ -58,10 +58,10 @@ TEST(CanonicalShapeInferenceRuleTest, const_gen)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.push_node));
- ASSERT_EQ(loco::shape_get(testcase.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0), 1);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1), 2);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.push_node).domain());
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(1, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1));
}
TEST(CanonicalShapeInferenceRuleTest, relu)
@@ -78,12 +78,12 @@ TEST(CanonicalShapeInferenceRuleTest, relu)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.push_node));
- ASSERT_EQ(loco::shape_get(testcase.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank(), 4);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0), 1);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1), 2);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(2), 3);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(3), 4);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.push_node).domain());
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(1, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1));
+ ASSERT_EQ(3, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(2));
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(3));
}
TEST(CanonicalShapeInferenceRuleTest, feature_codec)
@@ -100,15 +100,15 @@ TEST(CanonicalShapeInferenceRuleTest, feature_codec)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.encode_node));
- ASSERT_EQ(loco::shape_get(testcase.encode_node).domain(), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, loco::shape_get(testcase.encode_node).domain());
ASSERT_TRUE(loco::shape_known(testcase.decode_node));
- ASSERT_EQ(loco::shape_get(testcase.decode_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.decode_node).as<loco::TensorShape>().rank(), 4);
- ASSERT_EQ(loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(0), 1);
- ASSERT_EQ(loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(1), 2);
- ASSERT_EQ(loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(2), 3);
- ASSERT_EQ(loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(3), 4);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.decode_node).domain());
+ ASSERT_EQ(4, loco::shape_get(testcase.decode_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(1, loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(2, loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(1));
+ ASSERT_EQ(3, loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(2));
+ ASSERT_EQ(4, loco::shape_get(testcase.decode_node).as<loco::TensorShape>().dim(3));
}
TEST(CanonicalShapeInferenceRuleTest, avgpool2d)
@@ -141,11 +141,11 @@ TEST(CanonicalShapeInferenceRuleTest, avgpool2d)
//
// NOTE AvgPool2D testcase assumes NHWC layout
ASSERT_TRUE(loco::shape_known(testcase.avgpool2d_node));
- ASSERT_EQ(loco::shape_get(testcase.avgpool2d_node).domain(), loco::Domain::Feature);
- ASSERT_EQ(loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().count(), 1);
- ASSERT_EQ(loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().depth(), 3);
- ASSERT_EQ(loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().height(), 4);
- ASSERT_EQ(loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().width(), 2);
+ ASSERT_EQ(loco::Domain::Feature, loco::shape_get(testcase.avgpool2d_node).domain());
+ ASSERT_EQ(1, loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().count());
+ ASSERT_EQ(3, loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().depth());
+ ASSERT_EQ(4, loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().height());
+ ASSERT_EQ(2, loco::shape_get(testcase.avgpool2d_node).as<FeatureShape>().width());
}
TEST(CanonicalShapeInferenceRuleTest, depthwiseconv2d)
@@ -172,11 +172,11 @@ TEST(CanonicalShapeInferenceRuleTest, depthwiseconv2d)
//
// NOTE DepthwiseConv2D testcase assumes NHWC layout
ASSERT_TRUE(loco::shape_known(testcase.depthwiseconv2d_node));
- ASSERT_EQ(loco::shape_get(testcase.depthwiseconv2d_node).domain(), loco::Domain::Feature);
- ASSERT_EQ(loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().count(), 1);
- ASSERT_EQ(loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().depth(), 6);
- ASSERT_EQ(loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().height(), 3);
- ASSERT_EQ(loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().width(), 3);
+ ASSERT_EQ(loco::Domain::Feature, loco::shape_get(testcase.depthwiseconv2d_node).domain());
+ ASSERT_EQ(1, loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().count());
+ ASSERT_EQ(6, loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().depth());
+ ASSERT_EQ(3, loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().height());
+ ASSERT_EQ(3, loco::shape_get(testcase.depthwiseconv2d_node).as<FeatureShape>().width());
}
TEST(CanonicalShapeInferenceRuleTest, transposedconv2d)
@@ -206,11 +206,11 @@ TEST(CanonicalShapeInferenceRuleTest, transposedconv2d)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.tr_conv2d_node));
- ASSERT_EQ(loco::shape_get(testcase.tr_conv2d_node).domain(), loco::Domain::Feature);
- ASSERT_EQ(loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().count(), 1);
- ASSERT_EQ(loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().height(), 540);
- ASSERT_EQ(loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().width(), 960);
- ASSERT_EQ(loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().depth(), 12);
+ ASSERT_EQ(loco::Domain::Feature, loco::shape_get(testcase.tr_conv2d_node).domain());
+ ASSERT_EQ(1, loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().count());
+ ASSERT_EQ(540, loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().height());
+ ASSERT_EQ(960, loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().width());
+ ASSERT_EQ(12, loco::shape_get(testcase.tr_conv2d_node).as<FeatureShape>().depth());
}
TEST(CanonicalShapeInferenceRuleTest, maxpool2d)
@@ -243,11 +243,11 @@ TEST(CanonicalShapeInferenceRuleTest, maxpool2d)
//
// NOTE MaxPool2D testcase assumes NHWC layout
ASSERT_TRUE(loco::shape_known(testcase.maxpool2d_node));
- ASSERT_EQ(loco::shape_get(testcase.maxpool2d_node).domain(), loco::Domain::Feature);
- ASSERT_EQ(loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().count(), 1);
- ASSERT_EQ(loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().depth(), 3);
- ASSERT_EQ(loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().height(), 4);
- ASSERT_EQ(loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().width(), 2);
+ ASSERT_EQ(loco::Domain::Feature, loco::shape_get(testcase.maxpool2d_node).domain());
+ ASSERT_EQ(1, loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().count());
+ ASSERT_EQ(3, loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().depth());
+ ASSERT_EQ(4, loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().height());
+ ASSERT_EQ(2, loco::shape_get(testcase.maxpool2d_node).as<FeatureShape>().width());
}
TEST(CanonicalShapeInferenceRuleTest, tensor_concat)
@@ -268,11 +268,11 @@ TEST(CanonicalShapeInferenceRuleTest, tensor_concat)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.concat_node));
- ASSERT_EQ(loco::shape_get(testcase.concat_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.concat_node).as<TensorShape>().rank(), 3);
- ASSERT_EQ(loco::shape_get(testcase.concat_node).as<TensorShape>().dim(0), 1);
- ASSERT_EQ(loco::shape_get(testcase.concat_node).as<TensorShape>().dim(1), 6);
- ASSERT_EQ(loco::shape_get(testcase.concat_node).as<TensorShape>().dim(2), 3);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.concat_node).domain());
+ ASSERT_EQ(3, loco::shape_get(testcase.concat_node).as<TensorShape>().rank());
+ ASSERT_EQ(1, loco::shape_get(testcase.concat_node).as<TensorShape>().dim(0));
+ ASSERT_EQ(6, loco::shape_get(testcase.concat_node).as<TensorShape>().dim(1));
+ ASSERT_EQ(3, loco::shape_get(testcase.concat_node).as<TensorShape>().dim(2));
}
TEST(CanonicalShapeInferenceRuleTest, fixed_reshape)
@@ -290,10 +290,10 @@ TEST(CanonicalShapeInferenceRuleTest, fixed_reshape)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.push_node));
- ASSERT_EQ(loco::shape_get(testcase.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1), 9);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.push_node).domain());
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(9, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1));
}
TEST(CanonicalShapeInferenceRuleTest, tensor_broadcast)
@@ -310,10 +310,10 @@ TEST(CanonicalShapeInferenceRuleTest, tensor_broadcast)
// Verify!
ASSERT_TRUE(loco::shape_known(testcase.push_node));
- ASSERT_EQ(loco::shape_get(testcase.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1), 2);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.push_node).domain());
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(2, loco::shape_get(testcase.push_node).as<loco::TensorShape>().dim(1));
}
TEST(CanonicalShapeInferenceRuleTest, tensor_transpose)
@@ -336,12 +336,12 @@ TEST(CanonicalShapeInferenceRuleTest, tensor_transpose)
// Verify!
ASSERT_TRUE(loco::shape_known(tc.push_node));
- ASSERT_EQ(loco::shape_get(tc.push_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(tc.push_node).as<loco::TensorShape>().rank(), 4);
- ASSERT_EQ(loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(0), 30);
- ASSERT_EQ(loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(1), 40);
- ASSERT_EQ(loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(2), 10);
- ASSERT_EQ(loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(3), 20);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(tc.push_node).domain());
+ ASSERT_EQ(4, loco::shape_get(tc.push_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(30, loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(40, loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(1));
+ ASSERT_EQ(10, loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(2));
+ ASSERT_EQ(20, loco::shape_get(tc.push_node).as<loco::TensorShape>().dim(3));
}
namespace
@@ -393,8 +393,8 @@ TEST(CanonicalShapeInferenceRuleTest, infer_v2)
rule.infer(&ctx, relu_2, &sink);
- ASSERT_EQ(sink.shape.domain(), loco::Domain::Tensor);
- ASSERT_EQ(sink.shape.as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(sink.shape.as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(sink.shape.as<loco::TensorShape>().dim(1), 5);
+ ASSERT_EQ(loco::Domain::Tensor, sink.shape.domain());
+ ASSERT_EQ(2, sink.shape.as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, sink.shape.as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(5, sink.shape.as<loco::TensorShape>().dim(1));
}
diff --git a/compiler/loco/src/Service/GraphBuilder.test.cpp b/compiler/loco/src/Service/GraphBuilder.test.cpp
index 7b2ea5198..812964870 100644
--- a/compiler/loco/src/Service/GraphBuilder.test.cpp
+++ b/compiler/loco/src/Service/GraphBuilder.test.cpp
@@ -41,7 +41,7 @@ TEST(GraphBuilderTest, Usecase_000)
auto node = gbuilder->pop();
- ASSERT_EQ(g->nodes()->size(), 1);
- ASSERT_EQ(node->dialect(), loco::CanonicalDialect::get());
- ASSERT_EQ(node->opnum(), static_cast<uint32_t>(loco::CanonicalOpcode::ConstGen));
+ ASSERT_EQ(1, g->nodes()->size());
+ ASSERT_EQ(loco::CanonicalDialect::get(), node->dialect());
+ ASSERT_EQ(static_cast<uint32_t>(loco::CanonicalOpcode::ConstGen), node->opnum());
}
diff --git a/compiler/loco/src/Service/GraphTestcase.h b/compiler/loco/src/Service/GraphTestcase.h
index 6743b9a14..27b011f8d 100644
--- a/compiler/loco/src/Service/GraphTestcase.h
+++ b/compiler/loco/src/Service/GraphTestcase.h
@@ -59,7 +59,7 @@ template <> loco::Permutation<loco::Domain::Feature> make_NHWC_perm(void)
template <loco::Domain D> loco::Permutation<D> make_HWCN_perm(void);
-// @note Also known as HWIO permutation
+/// @note Also known as HWIO permutation
template <> loco::Permutation<loco::Domain::Filter> make_HWCN_perm(void)
{
loco::Permutation<loco::Domain::Filter> perm;
diff --git a/compiler/loco/src/Service/MultiDialectShapeInferenceRule.test.cpp b/compiler/loco/src/Service/MultiDialectShapeInferenceRule.test.cpp
index ffa9ee5ca..3d5a11ae4 100644
--- a/compiler/loco/src/Service/MultiDialectShapeInferenceRule.test.cpp
+++ b/compiler/loco/src/Service/MultiDialectShapeInferenceRule.test.cpp
@@ -120,15 +120,15 @@ TEST(MultiDialectShapeInferenceRuleTest, test1)
// Verify!
ASSERT_TRUE(loco::shape_known(t23_node));
auto t23_shape = loco::shape_get(t23_node);
- ASSERT_EQ(t23_shape.domain(), loco::Domain::Tensor);
- ASSERT_EQ(t23_shape.as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(t23_shape.as<loco::TensorShape>().dim(0), 2);
- ASSERT_EQ(t23_shape.as<loco::TensorShape>().dim(1), 3);
+ ASSERT_EQ(loco::Domain::Tensor, t23_shape.domain());
+ ASSERT_EQ(2, t23_shape.as<loco::TensorShape>().rank());
+ ASSERT_EQ(2, t23_shape.as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(3, t23_shape.as<loco::TensorShape>().dim(1));
ASSERT_TRUE(loco::shape_known(t45_node));
auto t45_shape = loco::shape_get(t45_node);
- ASSERT_EQ(t45_shape.domain(), loco::Domain::Tensor);
- ASSERT_EQ(t45_shape.as<loco::TensorShape>().rank(), 2);
- ASSERT_EQ(t45_shape.as<loco::TensorShape>().dim(0), 4);
- ASSERT_EQ(t45_shape.as<loco::TensorShape>().dim(1), 5);
+ ASSERT_EQ(loco::Domain::Tensor, t45_shape.domain());
+ ASSERT_EQ(2, t45_shape.as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, t45_shape.as<loco::TensorShape>().dim(0));
+ ASSERT_EQ(5, t45_shape.as<loco::TensorShape>().dim(1));
}
diff --git a/compiler/loco/src/Service/ShapeInference.test.cpp b/compiler/loco/src/Service/ShapeInference.test.cpp
index e10b98844..20857034e 100644
--- a/compiler/loco/src/Service/ShapeInference.test.cpp
+++ b/compiler/loco/src/Service/ShapeInference.test.cpp
@@ -71,16 +71,16 @@ TEST(ShapeInferenceTest, framework)
loco::apply(&rule).to(testcase.graph());
// Framework SHOULD visit all the nodes
- ASSERT_EQ(nodes.size(), 2);
+ ASSERT_EQ(2, nodes.size());
// Framework SHOULD visit "pull" before "push"
- ASSERT_EQ(nodes.at(0), testcase.pull_node);
- ASSERT_EQ(nodes.at(1), testcase.push_node);
+ ASSERT_EQ(testcase.pull_node, nodes.at(0));
+ ASSERT_EQ(testcase.push_node, nodes.at(1));
// Framework SHOULD make an annotation if "rule" returns TRUE
ASSERT_TRUE(loco::shape_known(testcase.pull_node));
- ASSERT_EQ(loco::shape_get(testcase.pull_node).domain(), loco::Domain::Tensor);
- ASSERT_EQ(loco::shape_get(testcase.pull_node).as<loco::TensorShape>().rank(), 1);
- ASSERT_EQ(loco::shape_get(testcase.pull_node).as<loco::TensorShape>().dim(0), 4);
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(testcase.pull_node).domain());
+ ASSERT_EQ(1, loco::shape_get(testcase.pull_node).as<loco::TensorShape>().rank());
+ ASSERT_EQ(4, loco::shape_get(testcase.pull_node).as<loco::TensorShape>().dim(0));
// Framework SHOULD NOT make any annotation if "rule" returns FALSE
ASSERT_FALSE(loco::shape_known(testcase.push_node));
diff --git a/compiler/loco/src/Service/TypeInference.cpp b/compiler/loco/src/Service/TypeInference.cpp
index fbf0033ee..27d7d9a29 100644
--- a/compiler/loco/src/Service/TypeInference.cpp
+++ b/compiler/loco/src/Service/TypeInference.cpp
@@ -182,7 +182,7 @@ bool CanonicalTypeInferenceRule::infer(const Node *node, DataType &dtype) const
assert(dynamic_cast<const loco::CanonicalNode *>(node) != nullptr);
CanonicalTypeForwardAlgorithm alg;
- dtype = dynamic_cast<const loco::CanonicalNode *>(node)->accept(&alg);
+ dtype = loco::must_cast<const loco::CanonicalNode *>(node)->accept(&alg);
return true;
}
diff --git a/compiler/loco/src/Service/TypeInference.test.cpp b/compiler/loco/src/Service/TypeInference.test.cpp
index 4660401db..13bcfa52b 100644
--- a/compiler/loco/src/Service/TypeInference.test.cpp
+++ b/compiler/loco/src/Service/TypeInference.test.cpp
@@ -88,14 +88,14 @@ TEST(TypeInferenceTest, framework)
loco::apply(&rule).to(g.get());
- ASSERT_EQ(nodes.size(), 2); // Framework SHOULD visit all the nodes
- ASSERT_EQ(nodes.at(0), pull_node); // Framework SHOULD visit "pull" before "push"
- ASSERT_EQ(nodes.at(1), push_node);
+ ASSERT_EQ(2, nodes.size()); // Framework SHOULD visit all the nodes
+ ASSERT_EQ(pull_node, nodes.at(0)); // Framework SHOULD visit "pull" before "push"
+ ASSERT_EQ(push_node, nodes.at(1));
// Framework SHOULD NOT make any annotation if "rule" returns FALSE
ASSERT_TRUE(loco::dtype_known(pull_node));
// Framework SHOULD make an annotation if "rule" returns TRUE
- ASSERT_EQ(loco::dtype_get(pull_node), loco::DataType::U8);
+ ASSERT_EQ(loco::DataType::U8, loco::dtype_get(pull_node));
ASSERT_FALSE(loco::dtype_known(push_node));
}
@@ -129,7 +129,7 @@ TEST(CanonicalTypeInferenceRuleTest, minimal)
// Verify!
ASSERT_TRUE(loco::dtype_known(push_node));
- ASSERT_EQ(loco::dtype_get(push_node), loco::DataType::U8);
+ ASSERT_EQ(loco::DataType::U8, loco::dtype_get(push_node));
}
TEST(CanonicalTypeInferenceRuleTest, relu6)
@@ -166,7 +166,7 @@ TEST(CanonicalTypeInferenceRuleTest, relu6)
// Verify!
ASSERT_TRUE(loco::dtype_known(relu6_node));
- ASSERT_EQ(loco::dtype_get(relu6_node), loco::DataType::FLOAT32);
+ ASSERT_EQ(loco::DataType::FLOAT32, loco::dtype_get(relu6_node));
}
TEST(CanonicalTypeInferenceRuleTest, tensor_broadcast)
@@ -183,7 +183,7 @@ TEST(CanonicalTypeInferenceRuleTest, tensor_broadcast)
// Verify!
ASSERT_TRUE(loco::dtype_known(testcase.push_node));
- ASSERT_EQ(loco::dtype_get(testcase.push_node), loco::DataType::U8);
+ ASSERT_EQ(loco::DataType::U8, loco::dtype_get(testcase.push_node));
}
// mockup for MultiDialectTypeInferenceRule
@@ -275,8 +275,8 @@ TEST(MultiDialectTypeInferenceRuleTest, test1)
// Verify!
ASSERT_TRUE(loco::dtype_known(s8_node));
- ASSERT_EQ(loco::dtype_get(s8_node), loco::DataType::S8);
+ ASSERT_EQ(loco::DataType::S8, loco::dtype_get(s8_node));
ASSERT_TRUE(loco::dtype_known(u8_node));
- ASSERT_EQ(loco::dtype_get(u8_node), loco::DataType::U8);
+ ASSERT_EQ(loco::DataType::U8, loco::dtype_get(u8_node));
}
diff --git a/compiler/loco/src/loco.test.cpp b/compiler/loco/src/loco.test.cpp
index 4c4f51aa5..e5668b6d3 100644
--- a/compiler/loco/src/loco.test.cpp
+++ b/compiler/loco/src/loco.test.cpp
@@ -58,11 +58,11 @@ TEST(LOCO, identity_network)
loco::link(graph_output, push_node);
// loco::link SHOULD update "index"
- ASSERT_EQ(pull_node->index(), 0);
- ASSERT_EQ(graph_input->dtype(), loco::DataType::FLOAT32);
+ ASSERT_EQ(0, pull_node->index());
+ ASSERT_EQ(loco::DataType::FLOAT32, graph_input->dtype());
// loco::link SHOULD update "index"
- ASSERT_EQ(push_node->index(), 0);
+ ASSERT_EQ(0, push_node->index());
}
#if 0
@@ -99,10 +99,10 @@ TEST(LOCO, identity_network_V2)
push_node->index(0);
push_node->from(pull_node);
- ASSERT_EQ(pull_node->dtype(), loco::DataType::FLOAT32);
+ ASSERT_EQ(loco::DataType::FLOAT32, pull_node->dtype());
// TODO Check Shape of pull_node
// TODO Check Shape of push_node
- ASSERT_EQ(loco::pull_node(g.get(), 0), pull_node);
- ASSERT_EQ(loco::push_node(g.get(), 0), push_node);
+ ASSERT_EQ(pull_node, loco::pull_node(g.get(), 0));
+ ASSERT_EQ(push_node, loco::push_node(g.get(), 0));
}
diff --git a/compiler/locoex-customop/src/Service/COpShapeInferenceRule.cpp b/compiler/locoex-customop/src/Service/COpShapeInferenceRule.cpp
index 4dc8f461f..cf9decc55 100644
--- a/compiler/locoex-customop/src/Service/COpShapeInferenceRule.cpp
+++ b/compiler/locoex-customop/src/Service/COpShapeInferenceRule.cpp
@@ -37,7 +37,7 @@ bool COpShapeInferenceRule::infer(const loco::Node *node, loco::NodeShape &shape
assert(node->dialect() == COpDialect::get());
assert(dynamic_cast<const COpNode *>(node) != nullptr);
- auto cop_call = dynamic_cast<const COpCall *>(node);
+ auto cop_call = loco::must_cast<const COpCall *>(node);
// Note that the shape of custom op is considered as TensorShape
// TODO Decide how to deal with this shape error cases
diff --git a/compiler/locomotiv/src/Node/AvgPool2D.cpp b/compiler/locomotiv/src/Node/AvgPool2D.cpp
index ad603badf..5fdf1e725 100644
--- a/compiler/locomotiv/src/Node/AvgPool2D.cpp
+++ b/compiler/locomotiv/src/Node/AvgPool2D.cpp
@@ -129,7 +129,8 @@ nncc::core::ADT::tensor::Buffer<T> avgPool2D(const loco::AvgPool2D *avgpool2d,
}
}
- assert(filter_ele_count > 0);
+ if (filter_ele_count <= 0)
+ throw std::runtime_error("The number of filter element must be greater than zero.");
output_buf.at(Index({batch, out_y, out_x, channel})) = total / filter_ele_count;
}
}
@@ -141,10 +142,12 @@ nncc::core::ADT::tensor::Buffer<T> avgPool2D(const loco::AvgPool2D *avgpool2d,
} // namespace
-namespace locomotiv
+namespace
{
-void NodeExecution::execute(loco::AvgPool2D *avgpool2d)
+using namespace locomotiv;
+
+void exectute_node(loco::AvgPool2D *avgpool2d)
{
auto ifm_data = annot_data(avgpool2d->ifm());
@@ -176,4 +179,11 @@ void NodeExecution::execute(loco::AvgPool2D *avgpool2d)
annot_domain(avgpool2d, loco::Domain::Feature);
}
+} // namespace
+
+namespace locomotiv
+{
+
+void NodeExecution::execute(loco::AvgPool2D *avgpool2d) { exectute_node(avgpool2d); }
+
} // namespace locomotiv
diff --git a/compiler/locomotiv/src/Node/AvgPool2D.test.cpp b/compiler/locomotiv/src/Node/AvgPool2D.test.cpp
index 89e10a35e..f9863b47d 100644
--- a/compiler/locomotiv/src/Node/AvgPool2D.test.cpp
+++ b/compiler/locomotiv/src/Node/AvgPool2D.test.cpp
@@ -88,10 +88,10 @@ void run_test(const float *ifm, const float *expected_ofm, const Shape &ifm_shap
for (nncc::core::ADT::tensor::IndexEnumerator e{ofm_shape}; e.valid(); e.advance())
{
const auto &ind = e.current();
- ASSERT_FLOAT_EQ(avgpool2d_data->as_f32_bufptr()->at(ind), ofm_overlay.at(ind));
+ ASSERT_FLOAT_EQ(ofm_overlay.at(ind), avgpool2d_data->as_f32_bufptr()->at(ind));
}
- ASSERT_EQ(locomotiv::annot_domain(avgpool2d), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(avgpool2d));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/BiasAdd.cpp b/compiler/locomotiv/src/Node/BiasAdd.cpp
index 0724fb728..dfe32ca92 100644
--- a/compiler/locomotiv/src/Node/BiasAdd.cpp
+++ b/compiler/locomotiv/src/Node/BiasAdd.cpp
@@ -46,6 +46,8 @@ namespace locomotiv
void NodeExecution::execute(loco::BiasAdd<loco::Domain::Tensor> *bias_add)
{
+ validate(bias_add, "BiasAdd is nullptr");
+
auto input_data = locomotiv::annot_data(bias_add->value());
auto bias_data = locomotiv::annot_data(bias_add->bias());
@@ -63,6 +65,8 @@ void NodeExecution::execute(loco::BiasAdd<loco::Domain::Tensor> *bias_add)
void NodeExecution::execute(loco::BiasAdd<loco::Domain::Feature> *bias_add)
{
+ validate(bias_add, "BiasAdd is nullptr");
+
auto input_data = locomotiv::annot_data(bias_add->value());
auto bias_data = locomotiv::annot_data(bias_add->bias());
diff --git a/compiler/locomotiv/src/Node/BiasAdd.test.cpp b/compiler/locomotiv/src/Node/BiasAdd.test.cpp
index 0ca826673..cba2d414a 100644
--- a/compiler/locomotiv/src/Node/BiasAdd.test.cpp
+++ b/compiler/locomotiv/src/Node/BiasAdd.test.cpp
@@ -107,16 +107,16 @@ TEST(NodeExecution_TensorBiasAdd, f32)
// comparing the result
ASSERT_NE(bias_add_data, nullptr);
- ASSERT_EQ(bias_add_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(bias_add_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, bias_add_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(bias_add_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(bias_add_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(bias_add_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], bias_add_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(bias_add), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(bias_add));
}
/*
@@ -191,14 +191,14 @@ TEST(NodeExecution_FeatureBiasAdd, f32)
// comparing the result
ASSERT_NE(bias_add_data, nullptr);
- ASSERT_EQ(bias_add_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(bias_add_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, bias_add_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(bias_add_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(bias_add_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(bias_add_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], bias_add_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(feature_bias_add), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(feature_bias_add));
}
diff --git a/compiler/locomotiv/src/Node/BiasEncode.test.cpp b/compiler/locomotiv/src/Node/BiasEncode.test.cpp
index 73e2af8a8..cdb255ccb 100644
--- a/compiler/locomotiv/src/Node/BiasEncode.test.cpp
+++ b/compiler/locomotiv/src/Node/BiasEncode.test.cpp
@@ -82,11 +82,11 @@ template <typename T> void test()
auto bias_enc_data = locomotiv::annot_data(bias_enc);
ASSERT_NE(bias_enc_data, nullptr);
- ASSERT_EQ(bias_enc_data->dtype(), loco_dtype<T>());
- ASSERT_EQ(*(bias_enc_data->shape()), Shape{1});
- ASSERT_EQ(as_bufptr<T>(bias_enc_data)->at(Index{0}), pull_buf.at(Index{0}));
+ ASSERT_EQ(loco_dtype<T>(), bias_enc_data->dtype());
+ ASSERT_EQ(Shape{1}, *(bias_enc_data->shape()));
+ ASSERT_EQ(pull_buf.at(Index{0}), as_bufptr<T>(bias_enc_data)->at(Index{0}));
- ASSERT_EQ(locomotiv::annot_domain(bias_enc), loco::Domain::Bias);
+ ASSERT_EQ(loco::Domain::Bias, locomotiv::annot_domain(bias_enc));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/ConstGen.test.cpp b/compiler/locomotiv/src/Node/ConstGen.test.cpp
index 838f4c11d..382cc77e1 100644
--- a/compiler/locomotiv/src/Node/ConstGen.test.cpp
+++ b/compiler/locomotiv/src/Node/ConstGen.test.cpp
@@ -53,16 +53,16 @@ TEST(NodeExecution_ConstGen, s32)
// test
auto data = locomotiv::annot_data(&constgen);
ASSERT_NE(data, nullptr);
- ASSERT_EQ(data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*data->shape(), Shape({2, 3}));
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{0, 0}), 0);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{0, 1}), 1);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{0, 2}), 2);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{1, 0}), -3);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{1, 1}), -4);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{1, 2}), -5);
-
- ASSERT_EQ(locomotiv::annot_domain(&constgen), loco::Domain::Tensor);
+ ASSERT_EQ(loco::DataType::S32, data->dtype());
+ ASSERT_EQ(Shape({2, 3}), *data->shape());
+ ASSERT_EQ(0, data->as_s32_bufptr()->at(Index{0, 0}));
+ ASSERT_EQ(1, data->as_s32_bufptr()->at(Index{0, 1}));
+ ASSERT_EQ(2, data->as_s32_bufptr()->at(Index{0, 2}));
+ ASSERT_EQ(-3, data->as_s32_bufptr()->at(Index{1, 0}));
+ ASSERT_EQ(-4, data->as_s32_bufptr()->at(Index{1, 1}));
+ ASSERT_EQ(-5, data->as_s32_bufptr()->at(Index{1, 2}));
+
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(&constgen));
}
TEST(NodeExecution_ConstGen, f32)
@@ -87,14 +87,14 @@ TEST(NodeExecution_ConstGen, f32)
// test
auto data = locomotiv::annot_data(&constgen);
ASSERT_NE(data, nullptr);
- ASSERT_EQ(data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*data->shape(), Shape({2, 3}));
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{0, 0}), 0.0f);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{0, 1}), 1.0f);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{0, 2}), 2.0f);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{1, 0}), 3.0f);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{1, 1}), 4.0f);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{1, 2}), 5.0f);
-
- ASSERT_EQ(locomotiv::annot_domain(&constgen), loco::Domain::Tensor);
+ ASSERT_EQ(loco::DataType::FLOAT32, data->dtype());
+ ASSERT_EQ(Shape({2, 3}), *data->shape());
+ ASSERT_FLOAT_EQ(0.0f, data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(1.0f, data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(2.0f, data->as_f32_bufptr()->at(Index{0, 2}));
+ ASSERT_FLOAT_EQ(3.0f, data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(4.0f, data->as_f32_bufptr()->at(Index{1, 1}));
+ ASSERT_FLOAT_EQ(5.0f, data->as_f32_bufptr()->at(Index{1, 2}));
+
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(&constgen));
}
diff --git a/compiler/locomotiv/src/Node/Conv2D.test.cpp b/compiler/locomotiv/src/Node/Conv2D.test.cpp
index 83d7fc268..66e947acc 100644
--- a/compiler/locomotiv/src/Node/Conv2D.test.cpp
+++ b/compiler/locomotiv/src/Node/Conv2D.test.cpp
@@ -101,10 +101,10 @@ void run_test(const float *ifm, const float *ker, const float *expected_ofm, con
for (nncc::core::ADT::tensor::IndexEnumerator e{ofm_shape}; e.valid(); e.advance())
{
const auto &ind = e.current();
- ASSERT_FLOAT_EQ(conv2d_result->as_f32_bufptr()->at(ind), ofm_overlay.at(ind));
+ ASSERT_FLOAT_EQ(ofm_overlay.at(ind), conv2d_result->as_f32_bufptr()->at(ind));
}
- ASSERT_EQ(locomotiv::annot_domain(conv2d), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(conv2d));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/DepthwiseConv2D.test.cpp b/compiler/locomotiv/src/Node/DepthwiseConv2D.test.cpp
index 48824c2e0..1ff333be0 100644
--- a/compiler/locomotiv/src/Node/DepthwiseConv2D.test.cpp
+++ b/compiler/locomotiv/src/Node/DepthwiseConv2D.test.cpp
@@ -101,10 +101,10 @@ void run_test(const float *ifm, const float *ker, const float *expected_ofm, con
for (nncc::core::ADT::tensor::IndexEnumerator e{ofm_shape}; e.valid(); e.advance())
{
const auto &ind = e.current();
- ASSERT_FLOAT_EQ(dw_conv2d_result->as_f32_bufptr()->at(ind), ofm_overlay.at(ind));
+ ASSERT_FLOAT_EQ(ofm_overlay.at(ind), dw_conv2d_result->as_f32_bufptr()->at(ind));
}
- ASSERT_EQ(locomotiv::annot_domain(dw_conv2d), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(dw_conv2d));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/DepthwiseFilterEncode.test.cpp b/compiler/locomotiv/src/Node/DepthwiseFilterEncode.test.cpp
index db828c08b..5b2ec9326 100644
--- a/compiler/locomotiv/src/Node/DepthwiseFilterEncode.test.cpp
+++ b/compiler/locomotiv/src/Node/DepthwiseFilterEncode.test.cpp
@@ -77,14 +77,14 @@ TEST(NodeExecution_DepthwiseFilterEncode, f32)
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{H, W, C, M})); // locomotiv depthwise filter is HWCM
+ ASSERT_EQ(loco::DataType::FLOAT32, enc_data->dtype());
+ ASSERT_EQ((Shape{H, W, C, M}), *(enc_data->shape())); // locomotiv depthwise filter is HWCM
auto enc_buf = enc_data->as_f32_bufptr();
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
for (uint32_t m = 0; m < M; ++m)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{m, h, w, c}), enc_buf->at(Index{h, w, c, m}));
+ ASSERT_FLOAT_EQ(enc_buf->at(Index{h, w, c, m}), pull_buf.at(Index{m, h, w, c}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::DepthwiseFilter);
+ ASSERT_EQ(loco::Domain::DepthwiseFilter, locomotiv::annot_domain(enc));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseAdd.test.cpp b/compiler/locomotiv/src/Node/EltwiseAdd.test.cpp
index 2899dccdd..2873a6544 100644
--- a/compiler/locomotiv/src/Node/EltwiseAdd.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseAdd.test.cpp
@@ -108,14 +108,14 @@ TEST(NodeExecution_EltwiseAdd, f32)
// comparing the result
ASSERT_NE(eltwise_add_data, nullptr);
- ASSERT_EQ(eltwise_add_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(eltwise_add_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, eltwise_add_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(eltwise_add_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(eltwise_add_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(eltwise_add_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], eltwise_add_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(eltwise_add), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(eltwise_add));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseDiv.test.cpp b/compiler/locomotiv/src/Node/EltwiseDiv.test.cpp
index 60950c15b..cc5045073 100644
--- a/compiler/locomotiv/src/Node/EltwiseDiv.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseDiv.test.cpp
@@ -108,14 +108,14 @@ TEST(NodeExecution_EltwiseDiv, f32)
// comparing the result
ASSERT_NE(eltwise_div_data, nullptr);
- ASSERT_EQ(eltwise_div_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(eltwise_div_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, eltwise_div_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(eltwise_div_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(eltwise_div_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(eltwise_div_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], eltwise_div_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(eltwise_div), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(eltwise_div));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseMax.test.cpp b/compiler/locomotiv/src/Node/EltwiseMax.test.cpp
index c64db8994..94c398212 100644
--- a/compiler/locomotiv/src/Node/EltwiseMax.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseMax.test.cpp
@@ -108,14 +108,14 @@ TEST(NodeExecution_EltwiseMax, f32)
// comparing the result
ASSERT_NE(eltwise_max_data, nullptr);
- ASSERT_EQ(eltwise_max_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(eltwise_max_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, eltwise_max_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(eltwise_max_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(eltwise_max_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(eltwise_max_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], eltwise_max_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(eltwise_max), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(eltwise_max));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseMul.test.cpp b/compiler/locomotiv/src/Node/EltwiseMul.test.cpp
index b76888300..bbe51bce1 100644
--- a/compiler/locomotiv/src/Node/EltwiseMul.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseMul.test.cpp
@@ -111,14 +111,14 @@ TEST(NodeExecution_EltwiseMul, f32)
// comparing the result
ASSERT_NE(eltwise_mul_data, nullptr);
- ASSERT_EQ(eltwise_mul_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(eltwise_mul_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, eltwise_mul_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(eltwise_mul_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(eltwise_mul_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(eltwise_mul_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], eltwise_mul_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(eltwise_mul), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(eltwise_mul));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseSqrt.test.cpp b/compiler/locomotiv/src/Node/EltwiseSqrt.test.cpp
index adb1b853e..44d0ca654 100644
--- a/compiler/locomotiv/src/Node/EltwiseSqrt.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseSqrt.test.cpp
@@ -58,12 +58,12 @@ TEST(NodeExecution_EltwiseSqrt, f32)
auto sqrt_data = locomotiv::annot_data(sqrt);
ASSERT_NE(sqrt_data, nullptr);
- ASSERT_EQ(sqrt_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(sqrt_data->shape()), Shape{4});
- ASSERT_FLOAT_EQ(sqrt_data->as_f32_bufptr()->at(Index{0}), 2.0f);
- ASSERT_FLOAT_EQ(sqrt_data->as_f32_bufptr()->at(Index{1}), 3.0f);
- ASSERT_FLOAT_EQ(sqrt_data->as_f32_bufptr()->at(Index{2}), 0.0f);
+ ASSERT_EQ(loco::DataType::FLOAT32, sqrt_data->dtype());
+ ASSERT_EQ(Shape{4}, *(sqrt_data->shape()));
+ ASSERT_FLOAT_EQ(2.0f, sqrt_data->as_f32_bufptr()->at(Index{0}));
+ ASSERT_FLOAT_EQ(3.0f, sqrt_data->as_f32_bufptr()->at(Index{1}));
+ ASSERT_FLOAT_EQ(0.0f, sqrt_data->as_f32_bufptr()->at(Index{2}));
ASSERT_TRUE(std::isnan(sqrt_data->as_f32_bufptr()->at(Index{3})));
- ASSERT_EQ(locomotiv::annot_domain(sqrt), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(sqrt));
}
diff --git a/compiler/locomotiv/src/Node/EltwiseSub.test.cpp b/compiler/locomotiv/src/Node/EltwiseSub.test.cpp
index 7eff90f9e..94dc9c9ad 100644
--- a/compiler/locomotiv/src/Node/EltwiseSub.test.cpp
+++ b/compiler/locomotiv/src/Node/EltwiseSub.test.cpp
@@ -108,14 +108,14 @@ TEST(NodeExecution_EltwiseSub, f32)
// comparing the result
ASSERT_NE(eltwise_sub_data, nullptr);
- ASSERT_EQ(eltwise_sub_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(eltwise_sub_data->shape()), Shape({1, 3, 3, 2}));
+ ASSERT_EQ(loco::DataType::FLOAT32, eltwise_sub_data->dtype());
+ ASSERT_EQ(Shape({1, 3, 3, 2}), *(eltwise_sub_data->shape()));
uint32_t n = 0;
for (IndexEnumerator e{*(eltwise_sub_data->shape())}; e.valid(); e.advance())
{
- ASSERT_FLOAT_EQ(eltwise_sub_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
+ ASSERT_FLOAT_EQ(out_val[n++], eltwise_sub_data->as_f32_bufptr()->at(e.current()));
}
- ASSERT_EQ(locomotiv::annot_domain(eltwise_sub), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(eltwise_sub));
}
diff --git a/compiler/locomotiv/src/Node/FeatureCodec.test.cpp b/compiler/locomotiv/src/Node/FeatureCodec.test.cpp
index c35f0e69a..1b6b06c13 100644
--- a/compiler/locomotiv/src/Node/FeatureCodec.test.cpp
+++ b/compiler/locomotiv/src/Node/FeatureCodec.test.cpp
@@ -128,16 +128,16 @@ TEST_F(NodeExecution_FeatureCodec, s32)
// Test FeatureEncode
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{N, H, W, C})); // locomotiv feature is NHWC
+ ASSERT_EQ(loco::DataType::S32, enc_data->dtype());
+ ASSERT_EQ((Shape{N, H, W, C}), *(enc_data->shape())); // locomotiv feature is NHWC
auto enc_buf = enc_data->as_s32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_EQ(pull_buf.at(Index{n, c, h, w}), enc_buf->at(Index{n, h, w, c}));
+ ASSERT_EQ(enc_buf->at(Index{n, h, w, c}), pull_buf.at(Index{n, c, h, w}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(enc));
// FeatureDecode
auto dec = feature_decode_layer(enc, NCHW);
@@ -146,16 +146,16 @@ TEST_F(NodeExecution_FeatureCodec, s32)
// Test FeatureDecode: Encode -> Decode == identity
auto dec_data = locomotiv::annot_data(dec);
ASSERT_NE(dec_data, nullptr);
- ASSERT_EQ(dec_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(dec_data->shape()), (Shape{N, C, H, W}));
+ ASSERT_EQ(loco::DataType::S32, dec_data->dtype());
+ ASSERT_EQ((Shape{N, C, H, W}), *(dec_data->shape()));
auto dec_buf = dec_data->as_s32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_EQ(pull_buf.at(Index{n, c, h, w}), dec_buf->at(Index{n, c, h, w}));
+ ASSERT_EQ(dec_buf->at(Index{n, c, h, w}), pull_buf.at(Index{n, c, h, w}));
- ASSERT_EQ(locomotiv::annot_domain(dec), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(dec));
}
TEST_F(NodeExecution_FeatureCodec, f32)
@@ -192,16 +192,16 @@ TEST_F(NodeExecution_FeatureCodec, f32)
// Test FeatureEncode
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{N, H, W, C})); // locomotiv feature is NHWC
+ ASSERT_EQ(loco::DataType::FLOAT32, enc_data->dtype());
+ ASSERT_EQ((Shape{N, H, W, C}), *(enc_data->shape())); // locomotiv feature is NHWC
auto enc_buf = enc_data->as_f32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{c, h, n, w}), enc_buf->at(Index{n, h, w, c}));
+ ASSERT_FLOAT_EQ(enc_buf->at(Index{n, h, w, c}), pull_buf.at(Index{c, h, n, w}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(enc));
// FeatureDecode
auto dec = feature_decode_layer(enc, CHNW);
@@ -210,14 +210,14 @@ TEST_F(NodeExecution_FeatureCodec, f32)
// Test FeatureDecode: Encode -> Decode == identity
auto dec_data = locomotiv::annot_data(dec);
ASSERT_NE(dec_data, nullptr);
- ASSERT_EQ(dec_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(dec_data->shape()), (Shape{C, H, N, W}));
+ ASSERT_EQ(loco::DataType::FLOAT32, dec_data->dtype());
+ ASSERT_EQ((Shape{C, H, N, W}), *(dec_data->shape()));
auto dec_buf = dec_data->as_f32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{c, h, n, w}), dec_buf->at(Index{c, h, n, w}));
+ ASSERT_FLOAT_EQ(dec_buf->at(Index{c, h, n, w}), pull_buf.at(Index{c, h, n, w}));
- ASSERT_EQ(locomotiv::annot_domain(dec), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(dec));
}
diff --git a/compiler/locomotiv/src/Node/FilterEncode.test.cpp b/compiler/locomotiv/src/Node/FilterEncode.test.cpp
index 79b8308e2..dcca94993 100644
--- a/compiler/locomotiv/src/Node/FilterEncode.test.cpp
+++ b/compiler/locomotiv/src/Node/FilterEncode.test.cpp
@@ -77,16 +77,16 @@ TEST(NodeExecution_FilterEncode, s32)
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{N, H, W, C})); // locomotiv filter is NHWC
+ ASSERT_EQ(loco::DataType::S32, enc_data->dtype());
+ ASSERT_EQ((Shape{N, H, W, C}), *(enc_data->shape())); // locomotiv filter is NHWC
auto enc_buf = enc_data->as_s32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_EQ(pull_buf.at(Index{n, c, h, w}), enc_buf->at(Index{n, h, w, c}));
+ ASSERT_EQ(enc_buf->at(Index{n, h, w, c}), pull_buf.at(Index{n, c, h, w}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Filter);
+ ASSERT_EQ(loco::Domain::Filter, locomotiv::annot_domain(enc));
}
TEST(NodeExecution_FilterEncode, f32)
@@ -131,14 +131,14 @@ TEST(NodeExecution_FilterEncode, f32)
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{N, H, W, C})); // locomotiv filter is NHWC
+ ASSERT_EQ(loco::DataType::FLOAT32, enc_data->dtype());
+ ASSERT_EQ((Shape{N, H, W, C}), *(enc_data->shape())); // locomotiv filter is NHWC
auto enc_buf = enc_data->as_f32_bufptr();
for (uint32_t n = 0; n < N; ++n)
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
for (uint32_t c = 0; c < C; ++c)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{c, h, n, w}), enc_buf->at(Index{n, h, w, c}));
+ ASSERT_FLOAT_EQ(enc_buf->at(Index{n, h, w, c}), pull_buf.at(Index{c, h, n, w}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Filter);
+ ASSERT_EQ(loco::Domain::Filter, locomotiv::annot_domain(enc));
}
diff --git a/compiler/locomotiv/src/Node/Forward.test.cpp b/compiler/locomotiv/src/Node/Forward.test.cpp
index 73d37139a..5116a9596 100644
--- a/compiler/locomotiv/src/Node/Forward.test.cpp
+++ b/compiler/locomotiv/src/Node/Forward.test.cpp
@@ -52,11 +52,11 @@ TEST(NodeExecution_Forward, s32)
auto forward_data = locomotiv::annot_data(forward);
ASSERT_NE(forward_data, nullptr);
- ASSERT_EQ(forward_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(forward_data->shape()), Shape{1});
- ASSERT_EQ(forward_data->as_s32_bufptr()->at(Index{0}), pull_buf.at(Index{0}));
+ ASSERT_EQ(loco::DataType::S32, forward_data->dtype());
+ ASSERT_EQ(Shape{1}, *(forward_data->shape()));
+ ASSERT_EQ(pull_buf.at(Index{0}), forward_data->as_s32_bufptr()->at(Index{0}));
- ASSERT_EQ(locomotiv::annot_domain(forward), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(forward));
}
TEST(NodeExecution_Forward, f32)
@@ -80,9 +80,9 @@ TEST(NodeExecution_Forward, f32)
auto forward_data = locomotiv::annot_data(forward);
ASSERT_NE(forward_data, nullptr);
- ASSERT_EQ(forward_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(forward_data->shape()), Shape{1});
- ASSERT_FLOAT_EQ(forward_data->as_f32_bufptr()->at(Index{0}), pull_buf.at(Index{0}));
+ ASSERT_EQ(loco::DataType::FLOAT32, forward_data->dtype());
+ ASSERT_EQ(Shape{1}, *(forward_data->shape()));
+ ASSERT_FLOAT_EQ(pull_buf.at(Index{0}), forward_data->as_f32_bufptr()->at(Index{0}));
- ASSERT_EQ(locomotiv::annot_domain(forward), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(forward));
}
diff --git a/compiler/locomotiv/src/Node/MatMul.test.cpp b/compiler/locomotiv/src/Node/MatMul.test.cpp
index bd480f7c7..f1f3a52d3 100644
--- a/compiler/locomotiv/src/Node/MatMul.test.cpp
+++ b/compiler/locomotiv/src/Node/MatMul.test.cpp
@@ -92,14 +92,14 @@ void run_test(const T *lhs, const T *rhs, const T *expected_output, const Shape
{
const auto &ind = e.current();
if (expected_datatype == loco::DataType::FLOAT32)
- ASSERT_FLOAT_EQ(mat_mul_result->as_f32_bufptr()->at(ind), out_overlay.at(ind));
+ ASSERT_FLOAT_EQ(out_overlay.at(ind), mat_mul_result->as_f32_bufptr()->at(ind));
else if (expected_datatype == loco::DataType::S32)
- ASSERT_EQ(mat_mul_result->as_s32_bufptr()->at(ind), out_overlay.at(ind));
+ ASSERT_EQ(out_overlay.at(ind), mat_mul_result->as_s32_bufptr()->at(ind));
else
throw std::runtime_error("NYI for these DataTypes");
}
- ASSERT_EQ(locomotiv::annot_domain(mat_mul), loco::Domain::Matrix);
+ ASSERT_EQ(loco::Domain::Matrix, locomotiv::annot_domain(mat_mul));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/MatrixCodec.test.cpp b/compiler/locomotiv/src/Node/MatrixCodec.test.cpp
index 8fc5d593b..da4afeded 100644
--- a/compiler/locomotiv/src/Node/MatrixCodec.test.cpp
+++ b/compiler/locomotiv/src/Node/MatrixCodec.test.cpp
@@ -124,14 +124,14 @@ TEST_F(NodeExecution_MatrixCodec, HW_s32)
// Test MatrixEncode
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{H, W})); // locomotiv matrix is HW
+ ASSERT_EQ(loco::DataType::S32, enc_data->dtype());
+ ASSERT_EQ((Shape{H, W}), *(enc_data->shape())); // locomotiv matrix is HW
auto enc_buf = enc_data->as_s32_bufptr();
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
- ASSERT_EQ(pull_buf.at(Index{h, w}), enc_buf->at(Index{h, w}));
+ ASSERT_EQ(enc_buf->at(Index{h, w}), pull_buf.at(Index{h, w}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Matrix);
+ ASSERT_EQ(loco::Domain::Matrix, locomotiv::annot_domain(enc));
// MatrixDecode
auto dec = matrix_decode_layer(enc, HW);
@@ -140,14 +140,14 @@ TEST_F(NodeExecution_MatrixCodec, HW_s32)
// Test MatrixDecode: Encode -> Decode == identity
auto dec_data = locomotiv::annot_data(dec);
ASSERT_NE(dec_data, nullptr);
- ASSERT_EQ(dec_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(dec_data->shape()), (Shape{H, W}));
+ ASSERT_EQ(loco::DataType::S32, dec_data->dtype());
+ ASSERT_EQ((Shape{H, W}), *(dec_data->shape()));
auto dec_buf = dec_data->as_s32_bufptr();
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
- ASSERT_EQ(pull_buf.at(Index{h, w}), dec_buf->at(Index{h, w}));
+ ASSERT_EQ(dec_buf->at(Index{h, w}), pull_buf.at(Index{h, w}));
- ASSERT_EQ(locomotiv::annot_domain(dec), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(dec));
}
TEST_F(NodeExecution_MatrixCodec, WH_f32)
@@ -180,14 +180,14 @@ TEST_F(NodeExecution_MatrixCodec, WH_f32)
// Test MatrixEncode
auto enc_data = locomotiv::annot_data(enc);
ASSERT_NE(enc_data, nullptr);
- ASSERT_EQ(enc_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(enc_data->shape()), (Shape{H, W})); // locomotiv matrix is HW
+ ASSERT_EQ(loco::DataType::FLOAT32, enc_data->dtype());
+ ASSERT_EQ((Shape{H, W}), *(enc_data->shape())); // locomotiv matrix is HW
auto enc_buf = enc_data->as_f32_bufptr();
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{w, h}), enc_buf->at(Index{h, w}));
+ ASSERT_FLOAT_EQ(enc_buf->at(Index{h, w}), pull_buf.at(Index{w, h}));
- ASSERT_EQ(locomotiv::annot_domain(enc), loco::Domain::Matrix);
+ ASSERT_EQ(loco::Domain::Matrix, locomotiv::annot_domain(enc));
// MatrixDecode
auto dec = matrix_decode_layer(enc, WH);
@@ -196,12 +196,12 @@ TEST_F(NodeExecution_MatrixCodec, WH_f32)
// Test MatrixDecode: Encode -> Decode == identity
auto dec_data = locomotiv::annot_data(dec);
ASSERT_NE(dec_data, nullptr);
- ASSERT_EQ(dec_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(dec_data->shape()), (Shape{W, H}));
+ ASSERT_EQ(loco::DataType::FLOAT32, dec_data->dtype());
+ ASSERT_EQ((Shape{W, H}), *(dec_data->shape()));
auto dec_buf = dec_data->as_f32_bufptr();
for (uint32_t h = 0; h < H; ++h)
for (uint32_t w = 0; w < W; ++w)
- ASSERT_FLOAT_EQ(pull_buf.at(Index{w, h}), dec_buf->at(Index{w, h}));
+ ASSERT_FLOAT_EQ(dec_buf->at(Index{w, h}), pull_buf.at(Index{w, h}));
- ASSERT_EQ(locomotiv::annot_domain(dec), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(dec));
}
diff --git a/compiler/locomotiv/src/Node/MaxPool2D.test.cpp b/compiler/locomotiv/src/Node/MaxPool2D.test.cpp
index 9d877a96b..5046d4a6e 100644
--- a/compiler/locomotiv/src/Node/MaxPool2D.test.cpp
+++ b/compiler/locomotiv/src/Node/MaxPool2D.test.cpp
@@ -86,10 +86,10 @@ void run_test(const float *ifm, const float *expected_ofm, const Shape &ifm_shap
for (nncc::core::ADT::tensor::IndexEnumerator e{ofm_shape}; e.valid(); e.advance())
{
const auto &ind = e.current();
- ASSERT_FLOAT_EQ(maxpool2d_data->as_f32_bufptr()->at(ind), ofm_overlay.at(ind));
+ ASSERT_FLOAT_EQ(ofm_overlay.at(ind), maxpool2d_data->as_f32_bufptr()->at(ind));
}
- ASSERT_EQ(locomotiv::annot_domain(maxpool2d), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(maxpool2d));
}
} // namespace
diff --git a/compiler/locomotiv/src/Node/Push.test.cpp b/compiler/locomotiv/src/Node/Push.test.cpp
index be8f1e4e9..e9f56056a 100644
--- a/compiler/locomotiv/src/Node/Push.test.cpp
+++ b/compiler/locomotiv/src/Node/Push.test.cpp
@@ -52,11 +52,11 @@ TEST(NodeExecution_Push, s32)
auto push_data = locomotiv::annot_data(push);
ASSERT_NE(push_data, nullptr);
- ASSERT_EQ(push_data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(push_data->shape()), Shape{1});
- ASSERT_EQ(push_data->as_s32_bufptr()->at(Index{0}), pull_buf.at(Index{0}));
+ ASSERT_EQ(loco::DataType::S32, push_data->dtype());
+ ASSERT_EQ(Shape{1}, *(push_data->shape()));
+ ASSERT_EQ(pull_buf.at(Index{0}), push_data->as_s32_bufptr()->at(Index{0}));
- ASSERT_EQ(locomotiv::annot_domain(push), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(push));
}
TEST(NodeExecution_Push, f32)
@@ -80,9 +80,9 @@ TEST(NodeExecution_Push, f32)
auto push_data = locomotiv::annot_data(push);
ASSERT_NE(push_data, nullptr);
- ASSERT_EQ(push_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(push_data->shape()), Shape{1});
- ASSERT_FLOAT_EQ(push_data->as_f32_bufptr()->at(Index{0}), pull_buf.at(Index{0}));
+ ASSERT_EQ(loco::DataType::FLOAT32, push_data->dtype());
+ ASSERT_EQ(Shape{1}, *(push_data->shape()));
+ ASSERT_FLOAT_EQ(pull_buf.at(Index{0}), push_data->as_f32_bufptr()->at(Index{0}));
- ASSERT_EQ(locomotiv::annot_domain(push), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(push));
}
diff --git a/compiler/locomotiv/src/Node/ReLU.test.cpp b/compiler/locomotiv/src/Node/ReLU.test.cpp
index 0ddd01d0f..d2f928d1d 100644
--- a/compiler/locomotiv/src/Node/ReLU.test.cpp
+++ b/compiler/locomotiv/src/Node/ReLU.test.cpp
@@ -53,10 +53,10 @@ TEST(NodeExecution_ReLU, f32)
auto relu_data = locomotiv::annot_data(relu);
ASSERT_NE(relu_data, nullptr);
- ASSERT_EQ(relu_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(relu_data->shape()), Shape{2});
- ASSERT_FLOAT_EQ(relu_data->as_f32_bufptr()->at(Index{0}), 0.0f);
- ASSERT_FLOAT_EQ(relu_data->as_f32_bufptr()->at(Index{1}), 10.0f);
+ ASSERT_EQ(loco::DataType::FLOAT32, relu_data->dtype());
+ ASSERT_EQ(Shape{2}, *(relu_data->shape()));
+ ASSERT_FLOAT_EQ(0.0f, relu_data->as_f32_bufptr()->at(Index{0}));
+ ASSERT_FLOAT_EQ(10.0f, relu_data->as_f32_bufptr()->at(Index{1}));
- ASSERT_EQ(locomotiv::annot_domain(relu), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(relu));
}
diff --git a/compiler/locomotiv/src/Node/ReLU6.test.cpp b/compiler/locomotiv/src/Node/ReLU6.test.cpp
index 07f6af23f..b2362b1f4 100644
--- a/compiler/locomotiv/src/Node/ReLU6.test.cpp
+++ b/compiler/locomotiv/src/Node/ReLU6.test.cpp
@@ -55,12 +55,12 @@ TEST(NodeExecution_ReLU6, f32)
auto relu6_data = locomotiv::annot_data(relu6);
ASSERT_NE(relu6_data, nullptr);
- ASSERT_EQ(relu6_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(relu6_data->shape()), Shape({2, 2}));
- ASSERT_FLOAT_EQ(relu6_data->as_f32_bufptr()->at(Index{0, 0}), 0.0f);
- ASSERT_FLOAT_EQ(relu6_data->as_f32_bufptr()->at(Index{0, 1}), 6.0f);
- ASSERT_FLOAT_EQ(relu6_data->as_f32_bufptr()->at(Index{1, 0}), 6.0f);
- ASSERT_FLOAT_EQ(relu6_data->as_f32_bufptr()->at(Index{1, 1}), 0.0f);
+ ASSERT_EQ(loco::DataType::FLOAT32, relu6_data->dtype());
+ ASSERT_EQ(Shape({2, 2}), *(relu6_data->shape()));
+ ASSERT_FLOAT_EQ(0.0f, relu6_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(6.0f, relu6_data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(6.0f, relu6_data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(0.0f, relu6_data->as_f32_bufptr()->at(Index{1, 1}));
- ASSERT_EQ(locomotiv::annot_domain(relu6), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(relu6));
}
diff --git a/compiler/locomotiv/src/Node/Reshape.test.cpp b/compiler/locomotiv/src/Node/Reshape.test.cpp
index 8e54a16df..8aeb4656f 100644
--- a/compiler/locomotiv/src/Node/Reshape.test.cpp
+++ b/compiler/locomotiv/src/Node/Reshape.test.cpp
@@ -56,12 +56,12 @@ TEST(NodeExecution_Reshape, f32)
auto reshape_data = locomotiv::annot_data(reshape);
ASSERT_NE(reshape_data, nullptr);
- ASSERT_EQ(reshape_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(reshape_data->shape()), (Shape{2, 2}));
- ASSERT_FLOAT_EQ(reshape_data->as_f32_bufptr()->at(Index{0, 0}), 0.0f);
- ASSERT_FLOAT_EQ(reshape_data->as_f32_bufptr()->at(Index{0, 1}), 1.1f);
- ASSERT_FLOAT_EQ(reshape_data->as_f32_bufptr()->at(Index{1, 0}), 2.2f);
- ASSERT_FLOAT_EQ(reshape_data->as_f32_bufptr()->at(Index{1, 1}), 3.3f);
+ ASSERT_EQ(loco::DataType::FLOAT32, reshape_data->dtype());
+ ASSERT_EQ((Shape{2, 2}), *(reshape_data->shape()));
+ ASSERT_FLOAT_EQ(0.0f, reshape_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(1.1f, reshape_data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(2.2f, reshape_data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(3.3f, reshape_data->as_f32_bufptr()->at(Index{1, 1}));
- ASSERT_EQ(locomotiv::annot_domain(reshape), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(reshape));
}
diff --git a/compiler/locomotiv/src/Node/Softmax.test.cpp b/compiler/locomotiv/src/Node/Softmax.test.cpp
index 21d240275..257279338 100644
--- a/compiler/locomotiv/src/Node/Softmax.test.cpp
+++ b/compiler/locomotiv/src/Node/Softmax.test.cpp
@@ -57,12 +57,12 @@ TEST(NodeExecution_Softmax, f32)
auto kShape = Shape{2, 2};
auto softmax_data = locomotiv::annot_data(softmax);
ASSERT_NE(softmax_data, nullptr);
- ASSERT_EQ(softmax_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(softmax_data->shape()), kShape);
- ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{0, 0}), 0.5f);
- ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{0, 1}), 0.5f);
- ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{1, 0}), 0.5f);
- ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{1, 1}), 0.5f);
+ ASSERT_EQ(loco::DataType::FLOAT32, softmax_data->dtype());
+ ASSERT_EQ(kShape, *(softmax_data->shape()));
+ ASSERT_FLOAT_EQ(0.5f, softmax_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(0.5f, softmax_data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(0.5f, softmax_data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(0.5f, softmax_data->as_f32_bufptr()->at(Index{1, 1}));
- ASSERT_EQ(locomotiv::annot_domain(softmax), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(softmax));
}
diff --git a/compiler/locomotiv/src/Node/Tanh.test.cpp b/compiler/locomotiv/src/Node/Tanh.test.cpp
index 78c3a13ba..96c1e7f1f 100644
--- a/compiler/locomotiv/src/Node/Tanh.test.cpp
+++ b/compiler/locomotiv/src/Node/Tanh.test.cpp
@@ -54,11 +54,11 @@ TEST(NodeExecution_Tanh, f32)
auto tanh_data = locomotiv::annot_data(tanh);
ASSERT_NE(tanh_data, nullptr);
- ASSERT_EQ(tanh_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(tanh_data->shape()), Shape{3});
- ASSERT_FLOAT_EQ(tanh_data->as_f32_bufptr()->at(Index{0}), 0.0f);
- ASSERT_FLOAT_EQ(tanh_data->as_f32_bufptr()->at(Index{1}), 0.761594f);
- ASSERT_FLOAT_EQ(tanh_data->as_f32_bufptr()->at(Index{2}), -0.761594f);
+ ASSERT_EQ(loco::DataType::FLOAT32, tanh_data->dtype());
+ ASSERT_EQ(Shape{3}, *(tanh_data->shape()));
+ ASSERT_FLOAT_EQ(0.0f, tanh_data->as_f32_bufptr()->at(Index{0}));
+ ASSERT_FLOAT_EQ(0.761594f, tanh_data->as_f32_bufptr()->at(Index{1}));
+ ASSERT_FLOAT_EQ(-0.761594f, tanh_data->as_f32_bufptr()->at(Index{2}));
- ASSERT_EQ(locomotiv::annot_domain(tanh), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(tanh));
}
diff --git a/compiler/locomotiv/src/Node/TensorBroadcast.test.cpp b/compiler/locomotiv/src/Node/TensorBroadcast.test.cpp
index e8347d737..52f7c8517 100644
--- a/compiler/locomotiv/src/Node/TensorBroadcast.test.cpp
+++ b/compiler/locomotiv/src/Node/TensorBroadcast.test.cpp
@@ -54,10 +54,10 @@ TEST(NodeExecution_TensorBroadcast, f32)
auto broadcast_data = locomotiv::annot_data(broadcast);
ASSERT_NE(broadcast_data, nullptr);
- ASSERT_EQ(broadcast_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ((*(broadcast_data->shape())), (Shape{2, 1}));
- ASSERT_FLOAT_EQ(broadcast_data->as_f32_bufptr()->at(Index{0, 0}), -1.0f);
- ASSERT_FLOAT_EQ(broadcast_data->as_f32_bufptr()->at(Index{1, 0}), -1.0f);
+ ASSERT_EQ(loco::DataType::FLOAT32, broadcast_data->dtype());
+ ASSERT_EQ((Shape{2, 1}), (*(broadcast_data->shape())));
+ ASSERT_FLOAT_EQ(-1.0f, broadcast_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(-1.0f, broadcast_data->as_f32_bufptr()->at(Index{1, 0}));
- ASSERT_EQ(locomotiv::annot_domain(broadcast), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(broadcast));
}
diff --git a/compiler/locomotiv/src/Node/TensorConcat.cpp b/compiler/locomotiv/src/Node/TensorConcat.cpp
index 5097e55c6..3187a7f75 100644
--- a/compiler/locomotiv/src/Node/TensorConcat.cpp
+++ b/compiler/locomotiv/src/Node/TensorConcat.cpp
@@ -40,6 +40,8 @@ namespace locomotiv
void NodeExecution::execute(loco::TensorConcat *tensor_concat)
{
+ validate(tensor_concat, "TensorConcat is nullptr");
+
auto lhs_data = annot_data(tensor_concat->lhs());
auto rhs_data = annot_data(tensor_concat->rhs());
auto axis = tensor_concat->axis();
diff --git a/compiler/locomotiv/src/Node/TensorConcat.test.cpp b/compiler/locomotiv/src/Node/TensorConcat.test.cpp
index d71b51524..e9060e36f 100644
--- a/compiler/locomotiv/src/Node/TensorConcat.test.cpp
+++ b/compiler/locomotiv/src/Node/TensorConcat.test.cpp
@@ -65,14 +65,14 @@ TEST(NodeExecution_TensorConcat, f32)
auto concat_data = locomotiv::annot_data(tconcat);
ASSERT_NE(concat_data, nullptr);
- ASSERT_EQ(concat_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ((*(concat_data->shape())), (Shape{2, 2}));
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{0, 0}), -1.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{0, 1}), -2.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{1, 0}), 3.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{1, 1}), 4.0f);
-
- ASSERT_EQ(locomotiv::annot_domain(tconcat), loco::Domain::Tensor);
+ ASSERT_EQ(loco::DataType::FLOAT32, concat_data->dtype());
+ ASSERT_EQ((Shape{2, 2}), (*(concat_data->shape())));
+ ASSERT_FLOAT_EQ(-1.0f, concat_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(-2.0f, concat_data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(3.0f, concat_data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(4.0f, concat_data->as_f32_bufptr()->at(Index{1, 1}));
+
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(tconcat));
}
TEST(NodeExecution_TensorConcat, f32_2)
@@ -113,16 +113,16 @@ TEST(NodeExecution_TensorConcat, f32_2)
auto concat_data = locomotiv::annot_data(tconcat);
ASSERT_NE(concat_data, nullptr);
- ASSERT_EQ(concat_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ((*(concat_data->shape())), (Shape{4, 2}));
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{0, 0}), -1.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{0, 1}), -2.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{1, 0}), 3.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{1, 1}), 4.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{2, 0}), -3.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{2, 1}), -4.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{3, 0}), 5.0f);
- ASSERT_FLOAT_EQ(concat_data->as_f32_bufptr()->at(Index{3, 1}), 6.0f);
-
- ASSERT_EQ(locomotiv::annot_domain(tconcat), loco::Domain::Tensor);
+ ASSERT_EQ(loco::DataType::FLOAT32, concat_data->dtype());
+ ASSERT_EQ((Shape{4, 2}), (*(concat_data->shape())));
+ ASSERT_FLOAT_EQ(-1.0f, concat_data->as_f32_bufptr()->at(Index{0, 0}));
+ ASSERT_FLOAT_EQ(-2.0f, concat_data->as_f32_bufptr()->at(Index{0, 1}));
+ ASSERT_FLOAT_EQ(3.0f, concat_data->as_f32_bufptr()->at(Index{1, 0}));
+ ASSERT_FLOAT_EQ(4.0f, concat_data->as_f32_bufptr()->at(Index{1, 1}));
+ ASSERT_FLOAT_EQ(-3.0f, concat_data->as_f32_bufptr()->at(Index{2, 0}));
+ ASSERT_FLOAT_EQ(-4.0f, concat_data->as_f32_bufptr()->at(Index{2, 1}));
+ ASSERT_FLOAT_EQ(5.0f, concat_data->as_f32_bufptr()->at(Index{3, 0}));
+ ASSERT_FLOAT_EQ(6.0f, concat_data->as_f32_bufptr()->at(Index{3, 1}));
+
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(tconcat));
}
diff --git a/compiler/locomotiv/src/Node/TensorConstantPad.cpp b/compiler/locomotiv/src/Node/TensorConstantPad.cpp
index 989afaf94..cd81a3a4d 100644
--- a/compiler/locomotiv/src/Node/TensorConstantPad.cpp
+++ b/compiler/locomotiv/src/Node/TensorConstantPad.cpp
@@ -36,6 +36,8 @@ namespace locomotiv
void NodeExecution::execute(loco::TensorConstantPad *pad)
{
+ validate(pad, "TensorConstantPad is nullptr");
+
auto input_data = annot_data(pad->input());
auto input_domain = annot_domain(pad->input());
validate(input_data, "Input not ready");
diff --git a/compiler/locomotiv/src/Node/TensorConstantPad.test.cpp b/compiler/locomotiv/src/Node/TensorConstantPad.test.cpp
index 0f60c5f85..64b913014 100644
--- a/compiler/locomotiv/src/Node/TensorConstantPad.test.cpp
+++ b/compiler/locomotiv/src/Node/TensorConstantPad.test.cpp
@@ -74,16 +74,16 @@ TEST(NodeExecution_Pad, tensor_constant_pad_4_dim)
auto pad_data = locomotiv::annot_data(pad);
ASSERT_NE(pad_data, nullptr);
- ASSERT_EQ(pad_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(pad_data->shape()), Shape({1, 6, 4, 1}));
+ ASSERT_EQ(loco::DataType::FLOAT32, pad_data->dtype());
+ ASSERT_EQ(Shape({1, 6, 4, 1}), *(pad_data->shape()));
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0, 3, 1, 0}), 1.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0, 3, 2, 0}), 2.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0, 4, 1, 0}), 3.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0, 4, 2, 0}), 4.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0, 0, 0, 0}), 0.0f);
+ ASSERT_FLOAT_EQ(1.0f, pad_data->as_f32_bufptr()->at(Index{0, 3, 1, 0}));
+ ASSERT_FLOAT_EQ(2.0f, pad_data->as_f32_bufptr()->at(Index{0, 3, 2, 0}));
+ ASSERT_FLOAT_EQ(3.0f, pad_data->as_f32_bufptr()->at(Index{0, 4, 1, 0}));
+ ASSERT_FLOAT_EQ(4.0f, pad_data->as_f32_bufptr()->at(Index{0, 4, 2, 0}));
+ ASSERT_FLOAT_EQ(0.0f, pad_data->as_f32_bufptr()->at(Index{0, 0, 0, 0}));
- ASSERT_EQ(locomotiv::annot_domain(pad), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(pad));
}
TEST(NodeExecution_Pad, tensor_constant_pad_1_dim)
@@ -122,17 +122,17 @@ TEST(NodeExecution_Pad, tensor_constant_pad_1_dim)
auto pad_data = locomotiv::annot_data(pad);
ASSERT_NE(pad_data, nullptr);
- ASSERT_EQ(pad_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(pad_data->shape()), Shape({6}));
+ ASSERT_EQ(loco::DataType::FLOAT32, pad_data->dtype());
+ ASSERT_EQ(Shape({6}), *(pad_data->shape()));
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{0}), 0.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1}), 0.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{2}), 1.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{3}), 5.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{4}), 3.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{5}), 0.0f);
+ ASSERT_FLOAT_EQ(0.0f, pad_data->as_f32_bufptr()->at(Index{0}));
+ ASSERT_FLOAT_EQ(0.0f, pad_data->as_f32_bufptr()->at(Index{1}));
+ ASSERT_FLOAT_EQ(1.0f, pad_data->as_f32_bufptr()->at(Index{2}));
+ ASSERT_FLOAT_EQ(5.0f, pad_data->as_f32_bufptr()->at(Index{3}));
+ ASSERT_FLOAT_EQ(3.0f, pad_data->as_f32_bufptr()->at(Index{4}));
+ ASSERT_FLOAT_EQ(0.0f, pad_data->as_f32_bufptr()->at(Index{5}));
- ASSERT_EQ(locomotiv::annot_domain(pad), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(pad));
}
TEST(NodeExecution_Pad, tensor_constant_pad_6_dim)
@@ -200,19 +200,19 @@ TEST(NodeExecution_Pad, tensor_constant_pad_6_dim)
auto pad_data = locomotiv::annot_data(pad);
ASSERT_NE(pad_data, nullptr);
- ASSERT_EQ(pad_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(pad_data->shape()), Shape({4, 1, 6, 5, 1, 5}));
-
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 2, 0, 1}), 1.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 2, 0, 2}), 2.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 3, 0, 1}), 3.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 3, 0, 2}), 4.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 2, 0, 1}), 5.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 2, 0, 2}), 6.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 3, 0, 1}), 7.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 3, 0, 2}), 8.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 3, 2, 0, 1}), 9.0f);
- ASSERT_FLOAT_EQ(pad_data->as_f32_bufptr()->at(Index{1, 0, 3, 2, 0, 2}), 10.0f);
-
- ASSERT_EQ(locomotiv::annot_domain(pad), loco::Domain::Tensor);
+ ASSERT_EQ(loco::DataType::FLOAT32, pad_data->dtype());
+ ASSERT_EQ(Shape({4, 1, 6, 5, 1, 5}), *(pad_data->shape()));
+
+ ASSERT_FLOAT_EQ(1.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 2, 0, 1}));
+ ASSERT_FLOAT_EQ(2.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 2, 0, 2}));
+ ASSERT_FLOAT_EQ(3.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 3, 0, 1}));
+ ASSERT_FLOAT_EQ(4.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 1, 3, 0, 2}));
+ ASSERT_FLOAT_EQ(5.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 2, 0, 1}));
+ ASSERT_FLOAT_EQ(6.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 2, 0, 2}));
+ ASSERT_FLOAT_EQ(7.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 3, 0, 1}));
+ ASSERT_FLOAT_EQ(8.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 2, 3, 0, 2}));
+ ASSERT_FLOAT_EQ(9.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 3, 2, 0, 1}));
+ ASSERT_FLOAT_EQ(10.0f, pad_data->as_f32_bufptr()->at(Index{1, 0, 3, 2, 0, 2}));
+
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(pad));
}
diff --git a/compiler/locomotiv/src/Node/TensorReduce.cpp b/compiler/locomotiv/src/Node/TensorReduce.cpp
index fae7a75c5..a60ebd890 100644
--- a/compiler/locomotiv/src/Node/TensorReduce.cpp
+++ b/compiler/locomotiv/src/Node/TensorReduce.cpp
@@ -121,9 +121,8 @@ namespace locomotiv
void NodeExecution::execute(loco::TensorReduce *node)
{
auto input_data = annot_data(node->input());
- auto input_shape = input_data->shape();
-
validate(input_data, "Input not ready");
+ auto input_shape = input_data->shape();
validate(annot_domain(node->input()) == loco::Domain::Tensor,
"Input domain of TensorReduce is not Tensor");
diff --git a/compiler/locomotiv/src/Node/TensorReduce.test.cpp b/compiler/locomotiv/src/Node/TensorReduce.test.cpp
index 68398cacd..d0e73a248 100644
--- a/compiler/locomotiv/src/Node/TensorReduce.test.cpp
+++ b/compiler/locomotiv/src/Node/TensorReduce.test.cpp
@@ -60,12 +60,12 @@ TEST(NodeExecution_Fixed_Reduce_Mean, f32_0)
auto kShape = Shape{1, 1, 2};
auto reduce_data = locomotiv::annot_data(reduce_node);
ASSERT_NE(reduce_data, nullptr);
- ASSERT_EQ(reduce_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(reduce_data->shape()), kShape);
- ASSERT_FLOAT_EQ(reduce_data->as_f32_bufptr()->at(Index{0, 0, 0}), 3.3f);
- ASSERT_FLOAT_EQ(reduce_data->as_f32_bufptr()->at(Index{0, 0, 1}), 4.4f);
+ ASSERT_EQ(loco::DataType::FLOAT32, reduce_data->dtype());
+ ASSERT_EQ(kShape, *(reduce_data->shape()));
+ ASSERT_FLOAT_EQ(3.3f, reduce_data->as_f32_bufptr()->at(Index{0, 0, 0}));
+ ASSERT_FLOAT_EQ(4.4f, reduce_data->as_f32_bufptr()->at(Index{0, 0, 1}));
- ASSERT_EQ(locomotiv::annot_domain(reduce_node), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(reduce_node));
}
TEST(NodeExecution_Fixed_Reduce_Mean, f32_1)
@@ -96,9 +96,9 @@ TEST(NodeExecution_Fixed_Reduce_Mean, f32_1)
auto kShape = Shape{1, 1, 1};
auto reduce_data = locomotiv::annot_data(reduce_node);
ASSERT_NE(reduce_data, nullptr);
- ASSERT_EQ(reduce_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(reduce_data->shape()), kShape);
- ASSERT_FLOAT_EQ(reduce_data->as_f32_bufptr()->at(Index{0, 0, 0}), 3.85f);
+ ASSERT_EQ(loco::DataType::FLOAT32, reduce_data->dtype());
+ ASSERT_EQ(kShape, *(reduce_data->shape()));
+ ASSERT_FLOAT_EQ(3.85f, reduce_data->as_f32_bufptr()->at(Index{0, 0, 0}));
- ASSERT_EQ(locomotiv::annot_domain(reduce_node), loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, locomotiv::annot_domain(reduce_node));
}
diff --git a/compiler/locomotiv/src/Node/TransposedConv2D.test.cpp b/compiler/locomotiv/src/Node/TransposedConv2D.test.cpp
index bd955a06b..ef759f51b 100644
--- a/compiler/locomotiv/src/Node/TransposedConv2D.test.cpp
+++ b/compiler/locomotiv/src/Node/TransposedConv2D.test.cpp
@@ -101,10 +101,10 @@ void run_test(const float *ifm, const float *ker, const float *expected_ofm, con
for (nncc::core::ADT::tensor::IndexEnumerator e{ofm_shape}; e.valid(); e.advance())
{
const auto &ind = e.current();
- ASSERT_FLOAT_EQ(conv2d_result->as_f32_bufptr()->at(ind), ofm_overlay.at(ind));
+ ASSERT_FLOAT_EQ(ofm_overlay.at(ind), conv2d_result->as_f32_bufptr()->at(ind));
}
- ASSERT_EQ(locomotiv::annot_domain(tr_conv2d), loco::Domain::Feature);
+ ASSERT_EQ(loco::Domain::Feature, locomotiv::annot_domain(tr_conv2d));
}
} // namespace
diff --git a/compiler/locomotiv/src/NodeData.test.cpp b/compiler/locomotiv/src/NodeData.test.cpp
index b1c9832d5..65bd3e1a8 100644
--- a/compiler/locomotiv/src/NodeData.test.cpp
+++ b/compiler/locomotiv/src/NodeData.test.cpp
@@ -35,9 +35,9 @@ TEST(NodeData, as_s32_buffer_wrapper)
auto data = locomotiv::make_data(buf);
- ASSERT_EQ(data->dtype(), loco::DataType::S32);
- ASSERT_EQ(*(data->shape()), shape);
- ASSERT_EQ(data->as_s32_bufptr()->at(Index{0}), 42);
+ ASSERT_EQ(loco::DataType::S32, data->dtype());
+ ASSERT_EQ(shape, *(data->shape()));
+ ASSERT_EQ(42, data->as_s32_bufptr()->at(Index{0}));
}
TEST(NodeData, as_f32_buffer_wrapper)
@@ -48,7 +48,7 @@ TEST(NodeData, as_f32_buffer_wrapper)
auto data = locomotiv::make_data(buf);
- ASSERT_EQ(data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(data->shape()), shape);
- ASSERT_FLOAT_EQ(data->as_f32_bufptr()->at(Index{0}), 3.14f);
+ ASSERT_EQ(loco::DataType::FLOAT32, data->dtype());
+ ASSERT_EQ(shape, *(data->shape()));
+ ASSERT_FLOAT_EQ(3.14f, data->as_f32_bufptr()->at(Index{0}));
}
diff --git a/compiler/locomotiv/src/NodeDataImpl.test.cpp b/compiler/locomotiv/src/NodeDataImpl.test.cpp
index b85956063..3fb0cc264 100644
--- a/compiler/locomotiv/src/NodeDataImpl.test.cpp
+++ b/compiler/locomotiv/src/NodeDataImpl.test.cpp
@@ -39,7 +39,7 @@ TEST(NodeDataImpl, as_annotation)
auto g = loco::make_graph();
auto node = g->nodes()->create<loco::Pull>();
- ASSERT_EQ(locomotiv::annot_data(node), nullptr);
+ ASSERT_EQ(nullptr, locomotiv::annot_data(node));
// Set annotation
locomotiv::annot_data(node, std::move(data));
@@ -48,11 +48,11 @@ TEST(NodeDataImpl, as_annotation)
const locomotiv::NodeData *obtained = locomotiv::annot_data(node);
ASSERT_NE(obtained, nullptr);
- ASSERT_EQ(obtained->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(obtained->shape()), shape);
- ASSERT_FLOAT_EQ(obtained->as_f32_bufptr()->at(Index{0}), 3.14f);
+ ASSERT_EQ(loco::DataType::FLOAT32, obtained->dtype());
+ ASSERT_EQ(shape, *(obtained->shape()));
+ ASSERT_FLOAT_EQ(3.14f, obtained->as_f32_bufptr()->at(Index{0}));
// Erase annotation
locomotiv::erase_annot_data(node);
- ASSERT_EQ(locomotiv::annot_data(node), nullptr);
+ ASSERT_EQ(nullptr, locomotiv::annot_data(node));
}
diff --git a/compiler/locomotiv/src/NodeDomain.test.cpp b/compiler/locomotiv/src/NodeDomain.test.cpp
index 9cfcf2eb8..87c8135e5 100644
--- a/compiler/locomotiv/src/NodeDomain.test.cpp
+++ b/compiler/locomotiv/src/NodeDomain.test.cpp
@@ -22,16 +22,16 @@ TEST(NodeDomain, as_annotation)
{
loco::Pull node;
- ASSERT_EQ(locomotiv::annot_domain(&node), loco::Domain::Unknown);
+ ASSERT_EQ(loco::Domain::Unknown, locomotiv::annot_domain(&node));
// Set annotation
locomotiv::annot_domain(&node, loco::Domain::Tensor);
// Get annotation
const loco::Domain obtained = locomotiv::annot_domain(&node);
- ASSERT_EQ(obtained, loco::Domain::Tensor);
+ ASSERT_EQ(loco::Domain::Tensor, obtained);
// Erase annotation
locomotiv::erase_annot_domain(&node);
- ASSERT_EQ(locomotiv::annot_domain(&node), loco::Domain::Unknown);
+ ASSERT_EQ(loco::Domain::Unknown, locomotiv::annot_domain(&node));
}
diff --git a/compiler/locomotiv/src/NodeExecution.cpp b/compiler/locomotiv/src/NodeExecution.cpp
index e532b5af6..2a8697181 100644
--- a/compiler/locomotiv/src/NodeExecution.cpp
+++ b/compiler/locomotiv/src/NodeExecution.cpp
@@ -72,9 +72,9 @@ void NodeExecution::eltwise_unary(loco::Node *node, const UnaryFunc &f)
auto input_node = node->arg(0);
auto input_domain = annot_domain(input_node);
auto input_data = annot_data(input_node);
+ validate(input_data, "Input is not ready");
auto input_dtype = input_data->dtype();
- validate(input_data, "Input is not ready");
validate(input_domain != loco::Domain::Unknown, "Input domain is unknown");
auto output_node = node;
diff --git a/compiler/locomotiv/src/Session.test.cpp b/compiler/locomotiv/src/Session.test.cpp
index 6d4a2414f..b73e4fa8b 100644
--- a/compiler/locomotiv/src/Session.test.cpp
+++ b/compiler/locomotiv/src/Session.test.cpp
@@ -57,8 +57,8 @@ TEST(Session, graph_IO_size)
// Make session
locomotiv::Session s(g.get());
- ASSERT_EQ(s.input_size(), inputs);
- ASSERT_EQ(s.output_size(), outputs);
+ ASSERT_EQ(inputs, s.input_size());
+ ASSERT_EQ(outputs, s.output_size());
}
TEST(Session, set_input)
@@ -173,9 +173,9 @@ TEST(Session, inference_identity)
auto output_data = s.get_output(0);
ASSERT_NE(output_data, nullptr);
- ASSERT_EQ(output_data->dtype(), loco::DataType::FLOAT32);
- ASSERT_EQ(*(output_data->shape()), Shape{1});
- ASSERT_EQ(output_data->as_f32_bufptr()->at(Index{0}), 3.14f);
+ ASSERT_EQ(loco::DataType::FLOAT32, output_data->dtype());
+ ASSERT_EQ(Shape{1}, *(output_data->shape()));
+ ASSERT_EQ(3.14f, output_data->as_f32_bufptr()->at(Index{0}));
}
}
@@ -234,63 +234,63 @@ TEST(Session, session_for_subgraph)
{
// Session to get t1 only
locomotiv::Session s(g.get(), {t1});
- ASSERT_EQ(s.output_size(), 1);
- ASSERT_EQ(s.get_output_node(0), dynamic_cast<loco::Node *>(t1));
+ ASSERT_EQ(1, s.output_size());
+ ASSERT_EQ(dynamic_cast<loco::Node *>(t1), s.get_output_node(0));
s.infer();
auto t1_data = s.get_output(0);
ASSERT_NE(t1_data, nullptr);
- ASSERT_EQ(*(t1_data->shape()), Shape{2});
+ ASSERT_EQ(Shape{2}, *(t1_data->shape()));
auto t1_buf = t1_data->as_f32_bufptr();
- ASSERT_EQ(t1_buf->at({0}), 0.1f);
- ASSERT_EQ(t1_buf->at({1}), 0.2f);
+ ASSERT_EQ(0.1f, t1_buf->at({0}));
+ ASSERT_EQ(0.2f, t1_buf->at({1}));
}
{
// Session to get t2 only
locomotiv::Session s(g.get(), {t2});
- ASSERT_EQ(s.output_size(), 1);
- ASSERT_EQ(s.get_output_node(0), dynamic_cast<loco::Node *>(t2));
+ ASSERT_EQ(1, s.output_size());
+ ASSERT_EQ(dynamic_cast<loco::Node *>(t2), s.get_output_node(0));
s.infer();
auto t2_data = s.get_output(0);
ASSERT_NE(t2_data, nullptr);
- ASSERT_EQ(*(t2_data->shape()), Shape{2});
+ ASSERT_EQ(Shape{2}, *(t2_data->shape()));
auto t2_buf = t2_data->as_f32_bufptr();
- ASSERT_EQ(t2_buf->at({0}), 0.3f);
- ASSERT_EQ(t2_buf->at({1}), 0.4f);
+ ASSERT_EQ(0.3f, t2_buf->at({0}));
+ ASSERT_EQ(0.4f, t2_buf->at({1}));
}
{
// Session to get t2 and push
locomotiv::Session s(g.get(), {t2, push});
- ASSERT_EQ(s.output_size(), 2);
- ASSERT_EQ(s.get_output_node(0), dynamic_cast<loco::Node *>(t2));
- ASSERT_EQ(s.get_output_node(1), dynamic_cast<loco::Node *>(push));
+ ASSERT_EQ(2, s.output_size());
+ ASSERT_EQ(dynamic_cast<loco::Node *>(t2), s.get_output_node(0));
+ ASSERT_EQ(dynamic_cast<loco::Node *>(push), s.get_output_node(1));
s.infer();
auto t2_data = s.get_output(0);
ASSERT_NE(t2_data, nullptr);
- ASSERT_EQ(*(t2_data->shape()), Shape{2});
+ ASSERT_EQ(Shape{2}, *(t2_data->shape()));
auto t2_buf = t2_data->as_f32_bufptr();
- ASSERT_EQ(t2_buf->at({0}), 0.3f);
- ASSERT_EQ(t2_buf->at({1}), 0.4f);
+ ASSERT_EQ(0.3f, t2_buf->at({0}));
+ ASSERT_EQ(0.4f, t2_buf->at({1}));
auto push_data = s.get_output(1);
ASSERT_NE(push_data, nullptr);
- ASSERT_EQ(*(push_data->shape()), Shape{4});
+ ASSERT_EQ(Shape{4}, *(push_data->shape()));
auto push_buf = push_data->as_f32_bufptr();
- ASSERT_EQ(push_buf->at({0}), 0.1f);
- ASSERT_EQ(push_buf->at({1}), 0.2f);
- ASSERT_EQ(push_buf->at({2}), 0.3f);
- ASSERT_EQ(push_buf->at({3}), 0.4f);
+ ASSERT_EQ(0.1f, push_buf->at({0}));
+ ASSERT_EQ(0.2f, push_buf->at({1}));
+ ASSERT_EQ(0.3f, push_buf->at({2}));
+ ASSERT_EQ(0.4f, push_buf->at({3}));
}
}
@@ -321,19 +321,19 @@ TEST(Session, ctor_by_range)
auto constgen_data = s.get_output(0);
ASSERT_NE(constgen_data, nullptr);
- ASSERT_EQ(*(constgen_data->shape()), Shape{2});
+ ASSERT_EQ(Shape{2}, *(constgen_data->shape()));
auto constgen_buf = constgen_data->as_f32_bufptr();
- ASSERT_EQ(constgen_buf->at({0}), 0.1f);
- ASSERT_EQ(constgen_buf->at({1}), -0.1f);
+ ASSERT_EQ(0.1f, constgen_buf->at({0}));
+ ASSERT_EQ(-0.1f, constgen_buf->at({1}));
auto push_data = s.get_output(1);
ASSERT_NE(push_data, nullptr);
- ASSERT_EQ(*(push_data->shape()), Shape{2});
+ ASSERT_EQ(Shape{2}, *(push_data->shape()));
auto push_buf = push_data->as_f32_bufptr();
- ASSERT_EQ(push_buf->at({0}), 0.1f);
- ASSERT_EQ(push_buf->at({1}), 0.0f);
+ ASSERT_EQ(0.1f, push_buf->at({0}));
+ ASSERT_EQ(0.0f, push_buf->at({1}));
}
// Below here is internal test for locomotiv, i.e. not public usage of locomotiv
@@ -363,17 +363,17 @@ TEST(Session, dtor)
s.set_input(0, std::move(data));
auto data_annotated = locomotiv::annot_data(pull);
- ASSERT_EQ(data_annotated, nullptr);
+ ASSERT_EQ(nullptr, data_annotated);
auto user_data_annotated = locomotiv::user_data(pull);
ASSERT_NE(user_data_annotated, nullptr);
auto domain_annotated = locomotiv::annot_domain(pull);
- ASSERT_EQ(domain_annotated, loco::Domain::Unknown);
+ ASSERT_EQ(loco::Domain::Unknown, domain_annotated);
}
auto data_annotated = locomotiv::annot_data(pull);
- ASSERT_EQ(data_annotated, nullptr);
+ ASSERT_EQ(nullptr, data_annotated);
auto user_data_annotated = locomotiv::user_data(pull);
- ASSERT_EQ(user_data_annotated, nullptr);
+ ASSERT_EQ(nullptr, user_data_annotated);
auto domain_annotated = locomotiv::annot_domain(pull);
- ASSERT_EQ(domain_annotated, loco::Domain::Unknown);
+ ASSERT_EQ(loco::Domain::Unknown, domain_annotated);
}
diff --git a/compiler/locop/requires.cmake b/compiler/locop/requires.cmake
new file mode 100644
index 000000000..d314ae55d
--- /dev/null
+++ b/compiler/locop/requires.cmake
@@ -0,0 +1 @@
+require("pp")
diff --git a/compiler/locop/src/CanonicalNodeSummaryBuilder.cpp b/compiler/locop/src/CanonicalNodeSummaryBuilder.cpp
index b962f490b..61d9e8ae7 100644
--- a/compiler/locop/src/CanonicalNodeSummaryBuilder.cpp
+++ b/compiler/locop/src/CanonicalNodeSummaryBuilder.cpp
@@ -71,9 +71,7 @@ std::string opname(const loco::Node *node)
{
if (node->dialect() == loco::CanonicalDialect::get())
{
- auto canonical_node = dynamic_cast<const loco::CanonicalNode *>(node);
-
- assert(canonical_node != nullptr);
+ auto canonical_node = loco::must_cast<const loco::CanonicalNode *>(node);
switch (canonical_node->opcode())
{
@@ -288,8 +286,7 @@ bool CanonicalNodeSummaryBuilder::build(const loco::Node *node, locop::NodeSumma
return false;
}
- auto canonical_node = dynamic_cast<const loco::CanonicalNode *>(node);
- assert(canonical_node != nullptr);
+ auto canonical_node = loco::must_cast<const loco::CanonicalNode *>(node);
out = canonical_node_desc(*_tbl, canonical_node);
return true;
}
diff --git a/compiler/locop/src/FormattedGraph.cpp b/compiler/locop/src/FormattedGraph.cpp
index 84de1e888..bf4175768 100644
--- a/compiler/locop/src/FormattedGraph.cpp
+++ b/compiler/locop/src/FormattedGraph.cpp
@@ -67,6 +67,9 @@ std::string str(const loco::DataType &dtype)
case loco::DataType::FLOAT64:
return "FLOAT64";
+ case loco::DataType::BOOL:
+ return "BOOL";
+
default:
break;
};
diff --git a/compiler/logo/include/logo/DeadNodeQueryService.h b/compiler/logo/include/logo/DeadNodeQueryService.h
new file mode 100644
index 000000000..e74a4bc58
--- /dev/null
+++ b/compiler/logo/include/logo/DeadNodeQueryService.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOGO_DEAD_NODE_QUERY_SERVICE_H__
+#define __LOGO_DEAD_NODE_QUERY_SERVICE_H__
+
+#include <loco.h>
+#include <loco/IR/DialectService.h>
+
+namespace logo
+{
+
+struct DeadNodeQueryService : public ::loco::DialectService
+{
+ virtual ~DeadNodeQueryService() = default;
+ /// @brief Check if the node is dead node
+ virtual bool isDeadNode(loco::Node *node) = 0;
+};
+
+} // namespace logo
+
+#endif // __LOGO_DEAD_NODE_QUERY_SERVICE_H__
diff --git a/compiler/logo/include/logo/RemoveDeadNodeWithQueryPass.h b/compiler/logo/include/logo/RemoveDeadNodeWithQueryPass.h
new file mode 100644
index 000000000..de0867117
--- /dev/null
+++ b/compiler/logo/include/logo/RemoveDeadNodeWithQueryPass.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOGO_REMOVE_DEAD_NODE_WITH_QUERY_PASS_H__
+#define __LOGO_REMOVE_DEAD_NODE_WITH_QUERY_PASS_H__
+
+#include <logo/Pass.h>
+
+namespace logo
+{
+
+struct RemoveDeadNodeWithQueryPass final : public Pass
+{
+ const char *name(void) const final { return "RemoveDeadNodeWithQueryPass"; }
+
+ bool run(loco::Graph *g);
+};
+
+} // namespace logo
+
+#endif // __LOGO_REMOVE_DEAD_NODE_WITH_QUERY_PASS_H__
diff --git a/compiler/logo/src/Passes/ConstantFoldingPass.test.cpp b/compiler/logo/src/Passes/ConstantFoldingPass.test.cpp
index 824027762..b9c4942c4 100644
--- a/compiler/logo/src/Passes/ConstantFoldingPass.test.cpp
+++ b/compiler/logo/src/Passes/ConstantFoldingPass.test.cpp
@@ -82,7 +82,7 @@ TEST(ConstantFolding, const_relu_to_const)
}
auto push = logo::test::find_first_node_by_type<loco::Push>(graph.get());
- auto const_gen = dynamic_cast<loco::ConstGen *>(push->from());
+ auto const_gen = loco::must_cast<loco::ConstGen *>(push->from());
ASSERT_NE(const_gen, nullptr);
ASSERT_EQ(const_gen->size<loco::DataType::FLOAT32>(), 2);
@@ -168,7 +168,7 @@ TEST(ConstantFolding, const_relu_to_concat)
}
auto push = logo::test::find_first_node_by_type<loco::Push>(graph.get());
- auto const_gen = dynamic_cast<loco::ConstGen *>(push->from());
+ auto const_gen = loco::must_cast<loco::ConstGen *>(push->from());
ASSERT_NE(const_gen, nullptr);
ASSERT_EQ(const_gen->size<loco::DataType::FLOAT32>(), 4);
diff --git a/compiler/logo/src/Passes/RemoveDeadNodeWithQueryPass.cpp b/compiler/logo/src/Passes/RemoveDeadNodeWithQueryPass.cpp
new file mode 100644
index 000000000..5c745212a
--- /dev/null
+++ b/compiler/logo/src/Passes/RemoveDeadNodeWithQueryPass.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <logo/RemoveDeadNodeWithQueryPass.h>
+#include <logo/DeadNodeQueryService.h>
+
+#include <loco/IR/Algorithm.h>
+#include <loco/IR/CanonicalDialect.h>
+#include <loco/IR/CanonicalNode.h>
+
+#include <set>
+
+namespace logo
+{
+
+bool RemoveDeadNodeWithQueryPass::run(loco::Graph *g)
+{
+ // Let's enumerate nodes required to compute output nodes
+ auto active_nodes = loco::active_nodes(loco::output_nodes(g));
+
+ // List dead(= non-active) nodes candidates
+ std::set<loco::Node *> candidates;
+
+ for (auto node : loco::all_nodes(g))
+ {
+ if (active_nodes.find(node) == active_nodes.end())
+ {
+ candidates.insert(node);
+ }
+ }
+
+ // Find the nodes that should not be dead node in candidates
+ for (auto node : candidates)
+ {
+ if (auto service = node->dialect()->service<DeadNodeQueryService>())
+ {
+ if (!service->isDeadNode(node))
+ {
+ candidates.erase(node);
+ }
+ }
+ }
+
+ for (auto node : candidates)
+ {
+ node->drop();
+ }
+
+ for (auto node : candidates)
+ {
+ g->nodes()->destroy(node);
+ }
+
+ return candidates.size() > 0;
+}
+
+} // namespace logo
diff --git a/compiler/logo/src/Passes/RemoveForwardNodePass.cpp b/compiler/logo/src/Passes/RemoveForwardNodePass.cpp
index c951cfac4..966b270f6 100644
--- a/compiler/logo/src/Passes/RemoveForwardNodePass.cpp
+++ b/compiler/logo/src/Passes/RemoveForwardNodePass.cpp
@@ -47,7 +47,7 @@ bool RemoveForwardNodePass::run(loco::Graph *g)
{
if (node->dialect() == loco::CanonicalDialect::get())
{
- auto canonical_node = dynamic_cast<loco::CanonicalNode *>(node);
+ auto canonical_node = loco::must_cast<loco::CanonicalNode *>(node);
canonical_node->accept(&collector);
}
}
diff --git a/compiler/logo/src/Passes/ReorderDecodePass.cpp b/compiler/logo/src/Passes/ReorderDecodePass.cpp
index 724db5780..863f180e7 100644
--- a/compiler/logo/src/Passes/ReorderDecodePass.cpp
+++ b/compiler/logo/src/Passes/ReorderDecodePass.cpp
@@ -37,49 +37,54 @@ bool isReLU(const loco::Node *node)
return node->opnum() == static_cast<uint32_t>(loco::CanonicalOpcode::ReLU);
}
-} // namespace
-
-namespace logo
-{
-
-bool ReorderDecodePass<loco::TensorBiasAdd>::run(loco::Graph *g)
+// Update queue
+class Collector final : public loco::CanonicalNodeMutableVisitor<void>
{
- std::queue<loco::FeatureDecode *> q;
-
- // Update queue
- class Collector final : public loco::CanonicalNodeMutableVisitor<void>
+public:
+ Collector(std::queue<loco::FeatureDecode *> *out) : _out{out}
{
- public:
- Collector(std::queue<loco::FeatureDecode *> *out) : _out{out}
- {
- // DO NOTHING
- }
+ // DO NOTHING
+ }
- void visit(loco::FeatureDecode *node) final
+ void visit(loco::FeatureDecode *node) final
+ {
+ if (node->input() != nullptr)
{
- if (node->input() != nullptr)
- {
- _out->push(node);
- }
+ _out->push(node);
}
+ }
- void visit(loco::Node *) final { return; }
+ void visit(loco::Node *) final { return; }
- private:
- // TODO This definition should be revised to support other decode operations
- std::queue<loco::FeatureDecode *> *_out;
- };
+private:
+ // TODO This definition should be revised to support other decode operations
+ std::queue<loco::FeatureDecode *> *_out;
+};
+void gather_candidates(loco::Graph *g, std::queue<loco::FeatureDecode *> &q)
+{
Collector collector{&q};
for (auto node : loco::all_nodes(g))
{
if (node->dialect() == loco::CanonicalDialect::get())
{
- auto canonical_node = dynamic_cast<loco::CanonicalNode *>(node);
+ auto canonical_node = loco::must_cast<loco::CanonicalNode *>(node);
canonical_node->accept(&collector);
}
}
+}
+
+} // namespace
+
+namespace logo
+{
+
+bool ReorderDecodePass<loco::TensorBiasAdd>::run(loco::Graph *g)
+{
+ std::queue<loco::FeatureDecode *> q;
+
+ gather_candidates(g, q);
bool changed = false;
@@ -125,9 +130,7 @@ bool ReorderDecodePass<loco::TensorBiasAdd>::run(loco::Graph *g)
// Q. Is it better to create an independent transform for this rewriting rule?
if (isTensorBiasAdd(u))
{
- auto old_badd = dynamic_cast<loco::TensorBiasAdd *>(u);
-
- assert(old_badd != nullptr);
+ auto old_badd = loco::must_cast<loco::TensorBiasAdd *>(u);
/**
* Let us consider the following example:
@@ -182,40 +185,7 @@ bool ReorderDecodePass<loco::ReLU>::run(loco::Graph *g)
{
std::queue<loco::FeatureDecode *> q;
- // Update queue
- class Collector final : public loco::CanonicalNodeMutableVisitor<void>
- {
- public:
- Collector(std::queue<loco::FeatureDecode *> *out) : _out{out}
- {
- // DO NOTHING
- }
-
- void visit(loco::FeatureDecode *node) final
- {
- if (node->input() != nullptr)
- {
- _out->push(node);
- }
- }
-
- void visit(loco::Node *) final { return; }
-
- private:
- // TODO This definition should be revised to support other decode operations
- std::queue<loco::FeatureDecode *> *_out;
- };
-
- Collector collector{&q};
-
- for (auto node : loco::all_nodes(g))
- {
- if (node->dialect() == loco::CanonicalDialect::get())
- {
- auto canonical_node = dynamic_cast<loco::CanonicalNode *>(node);
- canonical_node->accept(&collector);
- }
- }
+ gather_candidates(g, q);
bool changed = false;
diff --git a/compiler/logo/src/Passes/ResolveDuplicateReshapePass.cpp b/compiler/logo/src/Passes/ResolveDuplicateReshapePass.cpp
index d3c74cb77..94ff6291d 100644
--- a/compiler/logo/src/Passes/ResolveDuplicateReshapePass.cpp
+++ b/compiler/logo/src/Passes/ResolveDuplicateReshapePass.cpp
@@ -61,7 +61,7 @@ bool is_duplicate_reshape(loco::Node *node)
*/
void remap_input(loco::FixedReshape *reshape)
{
- auto input_reshape = dynamic_cast<loco::FixedReshape *>(reshape->input());
+ auto input_reshape = loco::must_cast<loco::FixedReshape *>(reshape->input());
auto volume = [](loco::FixedReshape *node) {
uint32_t vol = 1;
@@ -94,7 +94,7 @@ bool ResolveDuplicateReshapePass::run(loco::Graph *graph)
{
if (is_duplicate_reshape(node))
{
- auto node_as_reshape = dynamic_cast<loco::FixedReshape *>(node);
+ auto node_as_reshape = loco::must_cast<loco::FixedReshape *>(node);
remap_input(node_as_reshape);
diff --git a/compiler/logo/src/Passes/SimplifyDomainConversionPass.cpp b/compiler/logo/src/Passes/SimplifyDomainConversionPass.cpp
index 9b7a8d1c7..0bda85b6f 100644
--- a/compiler/logo/src/Passes/SimplifyDomainConversionPass.cpp
+++ b/compiler/logo/src/Passes/SimplifyDomainConversionPass.cpp
@@ -411,7 +411,7 @@ bool SimplifyDomainConversionPass::run(loco::Graph *g)
{
if (node->dialect() == loco::CanonicalDialect::get())
{
- auto canonical_node = dynamic_cast<loco::CanonicalNode *>(node);
+ auto canonical_node = loco::must_cast<loco::CanonicalNode *>(node);
canonical_node->accept(&collector);
}
}
diff --git a/compiler/logo/src/Passes/SimplifyDomainConversionPass.test.cpp b/compiler/logo/src/Passes/SimplifyDomainConversionPass.test.cpp
index 6bd93c1b2..9a05763b4 100644
--- a/compiler/logo/src/Passes/SimplifyDomainConversionPass.test.cpp
+++ b/compiler/logo/src/Passes/SimplifyDomainConversionPass.test.cpp
@@ -227,7 +227,7 @@ TEST(SimplifyDomainConversionPass, FilterEncode_FilterDecode_equal_perms)
ASSERT_EQ(loco::output_nodes(graph.get()).size(), 1);
loco::Node *output_node = loco::output_nodes(graph.get())[0];
- auto forward = dynamic_cast<loco::Forward *>(output_node->arg(0));
+ auto forward = loco::must_cast<loco::Forward *>(output_node->arg(0));
ASSERT_NE(forward, nullptr);
auto const_gen = dynamic_cast<loco::ConstGen *>(forward->arg(0));
ASSERT_NE(const_gen, nullptr);
diff --git a/compiler/luci-interpreter/CMakeLists.txt b/compiler/luci-interpreter/CMakeLists.txt
new file mode 100644
index 000000000..33fdc52aa
--- /dev/null
+++ b/compiler/luci-interpreter/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(LUCI_INTERPRETER_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include")
+set(LUCI_INTERPRETER_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src")
+
+add_subdirectory(src)
diff --git a/compiler/luci-interpreter/include/luci_interpreter/Interpreter.h b/compiler/luci-interpreter/include/luci_interpreter/Interpreter.h
new file mode 100644
index 000000000..7a14bf6f8
--- /dev/null
+++ b/compiler/luci-interpreter/include/luci_interpreter/Interpreter.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_INTERPRETER_H
+#define LUCI_INTERPRETER_INTERPRETER_H
+
+#include "luci_interpreter/core/Tensor.h"
+
+#include <luci/IR/Nodes/CircleInput.h>
+#include <luci/IR/Nodes/CircleOutput.h>
+
+#include <luci/IR/Module.h>
+
+#include <memory>
+#include <vector>
+#include <unordered_map>
+
+namespace luci_interpreter
+{
+
+class ExecutionObserver
+{
+public:
+ virtual ~ExecutionObserver();
+
+ // Called when the value of a tensor has been updated during execution.
+ virtual void postTensorWrite(const luci::CircleNode *node, const Tensor *tensor);
+
+ // Called before / after executing an operator.
+ // Note that these methods are not called for auxiliary operators (CircleInput, CircleOutput,
+ // CircleConst and Circle*Out).
+ virtual void preOperatorExecute(const luci::CircleNode *node);
+ virtual void postOperatorExecute(const luci::CircleNode *node);
+};
+
+class Interpreter
+{
+public:
+ explicit Interpreter(const luci::Module *module);
+
+ ~Interpreter();
+
+ void writeInputTensor(const luci::CircleInput *input_node, const void *data, size_t data_size);
+
+ void readOutputTensor(const luci::CircleOutput *output_node, void *data, size_t data_size);
+
+ void interpret();
+
+ void attachObserver(ExecutionObserver *observer);
+
+ const Tensor *getTensor(const loco::Node *node) { return _node_to_tensor[node]; }
+
+private:
+ std::unique_ptr<class RuntimeModule> _runtime_module;
+
+ // Observer functionality support.
+ std::unique_ptr<struct RuntimeToIR> _runtime_to_ir;
+ std::unordered_map<const loco::Node *, Tensor *> _node_to_tensor;
+ std::unique_ptr<class EventNotifier> _event_notifier;
+ std::vector<ExecutionObserver *> _observers;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_INTERPRETER_H
diff --git a/compiler/luci-interpreter/include/luci_interpreter/core/DataType.h b/compiler/luci-interpreter/include/luci_interpreter/core/DataType.h
new file mode 100644
index 000000000..27bf719b5
--- /dev/null
+++ b/compiler/luci-interpreter/include/luci_interpreter/core/DataType.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_DATATYPE_H
+#define LUCI_INTERPRETER_CORE_DATATYPE_H
+
+#include <loco/IR/DataType.h>
+#include <loco/IR/DataTypeTraits.h>
+
+#include <cstddef>
+
+namespace luci_interpreter
+{
+
+using DataType = loco::DataType;
+
+template <DataType DT> using DataTypeImpl = loco::DataTypeImpl<DT>;
+
+inline size_t getDataTypeSize(DataType data_type) { return loco::size(data_type); }
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_DATATYPE_H
diff --git a/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h b/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h
new file mode 100644
index 000000000..998789882
--- /dev/null
+++ b/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_TENSOR_H
+#define LUCI_INTERPRETER_CORE_TENSOR_H
+
+#include "luci_interpreter/core/DataType.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace luci_interpreter
+{
+
+class Shape
+{
+public:
+ explicit Shape(int rank) : _dims(rank, 0) {}
+
+ Shape(std::initializer_list<int32_t> dims) : _dims(dims.begin(), dims.end()) {}
+
+ int num_dims() const { return _dims.size(); }
+
+ int32_t dim(int i) const
+ {
+ assert(i >= 0 && i < static_cast<int>(_dims.size()));
+ return _dims[i];
+ }
+
+ int32_t &dim(int i)
+ {
+ assert(i >= 0 && i < static_cast<int>(_dims.size()));
+ return _dims[i];
+ }
+
+ int32_t num_elements() const
+ {
+ int32_t result = 1;
+ for (const int32_t dim : _dims)
+ {
+ result *= dim;
+ }
+ return result;
+ }
+
+ bool operator==(const Shape &other) const { return _dims == other._dims; }
+
+ bool operator!=(const Shape &other) const { return !operator==(other); }
+
+private:
+ std::vector<int32_t> _dims;
+};
+
+// Tensor affine quantization parameters.
+//
+// The relationship between real and quantized values:
+// real_value = (quantized_value - zero_point) * scale
+//
+// In per-tensor case, 'scale' and 'zero_point' are one element each.
+// In per-channel case, 'scale' and 'zero_point' are N elements each, where N is the size
+// of the quantized dimension.
+//
+// Note that due to historical and performance reasons, per-tensor quantization uses unsigned
+// integer types, while per-channel uses signed types assuming 'zero_point' == 0.
+//
+// TODO Add 'quantized_dimension' field for per-channel case when IR provides it.
+struct AffineQuantization
+{
+ std::vector<float> scale;
+ std::vector<int32_t> zero_point;
+};
+
+class Tensor
+{
+public:
+ Tensor(DataType element_type, Shape shape, AffineQuantization quantization, std::string name);
+
+ DataType element_type() const { return _element_type; }
+
+ const Shape &shape() const { return _shape; }
+
+ float scale() const
+ {
+ assert(_quantization.scale.size() == 1);
+ return _quantization.scale[0];
+ }
+
+ float zero_point() const
+ {
+ assert(_quantization.zero_point.size() == 1);
+ return _quantization.zero_point[0];
+ }
+
+ template <typename T> const T *data() const { return reinterpret_cast<const T *>(_data.get()); }
+
+ template <typename T> T *data() { return reinterpret_cast<T *>(_data.get()); }
+
+ const std::string &name() const { return _name; }
+
+ void readData(void *data_ptr, size_t data_size) const;
+
+ void writeData(const void *data_ptr, size_t data_size);
+
+ void resize(const Shape &new_shape);
+
+private:
+ DataType _element_type;
+ Shape _shape;
+ AffineQuantization _quantization;
+ std::unique_ptr<uint8_t[]> _data;
+ std::string _name;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_TENSOR_H
diff --git a/compiler/luci-interpreter/requires.cmake b/compiler/luci-interpreter/requires.cmake
new file mode 100644
index 000000000..f411f387a
--- /dev/null
+++ b/compiler/luci-interpreter/requires.cmake
@@ -0,0 +1 @@
+require(luci)
diff --git a/compiler/luci-interpreter/src/CMakeLists.txt b/compiler/luci-interpreter/src/CMakeLists.txt
new file mode 100644
index 000000000..6a66f1425
--- /dev/null
+++ b/compiler/luci-interpreter/src/CMakeLists.txt
@@ -0,0 +1,35 @@
+nnas_find_package(TensorFlowSource EXACT 2.1.0 QUIET)
+nnas_find_package(TensorFlowGEMMLowpSource EXACT 2.1.0 QUIET)
+nnas_find_package(TensorFlowEigenSource EXACT 2.1.0 QUIET)
+
+if (NOT TensorFlowSource_FOUND)
+ message(STATUS "Skipping luci-interpreter: TensorFlow not found")
+ return()
+endif ()
+
+if (NOT TensorFlowGEMMLowpSource_FOUND)
+ message(STATUS "Skipping luci-interpreter: gemmlowp not found")
+ return()
+endif ()
+
+if (NOT TensorFlowEigenSource_FOUND)
+ message(STATUS "Skipping luci-interpreter: Eigen not found")
+ return()
+endif ()
+
+add_subdirectory(core)
+add_subdirectory(kernels)
+add_subdirectory(loader)
+
+set(SOURCES
+ "${LUCI_INTERPRETER_INCLUDE_DIR}/luci_interpreter/Interpreter.h"
+ Interpreter.cpp)
+
+add_library(luci_interpreter SHARED ${SOURCES})
+target_include_directories(luci_interpreter PUBLIC "${LUCI_INTERPRETER_INCLUDE_DIR}")
+target_include_directories(luci_interpreter PRIVATE "${LUCI_INTERPRETER_SOURCE_DIR}")
+target_link_libraries(luci_interpreter
+ PUBLIC luci_lang luci_interpreter_loader luci_interpreter_core
+ PRIVATE nncc_common)
+
+install(TARGETS luci_interpreter DESTINATION lib)
diff --git a/compiler/luci-interpreter/src/Interpreter.cpp b/compiler/luci-interpreter/src/Interpreter.cpp
new file mode 100644
index 000000000..639ffc1f0
--- /dev/null
+++ b/compiler/luci-interpreter/src/Interpreter.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci_interpreter/Interpreter.h"
+
+#include "loader/ModuleLoader.h"
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace
+{
+
+class EventNotifierImpl final : public EventNotifier
+{
+public:
+ EventNotifierImpl(const RuntimeToIR &runtime_to_ir,
+ const std::vector<ExecutionObserver *> &observers)
+ : _runtime_to_ir(runtime_to_ir), _observers(observers)
+ {
+ }
+
+ void postTensorWrite(const Tensor *tensor) override
+ {
+ assert(tensor != nullptr);
+ for (const auto &observer : _observers)
+ {
+ observer->postTensorWrite(_runtime_to_ir.tensor_to_node.at(tensor), tensor);
+ }
+ }
+
+ void preOperatorExecute(const Kernel *kernel) override
+ {
+ assert(kernel != nullptr);
+ for (const auto &observer : _observers)
+ {
+ observer->preOperatorExecute(_runtime_to_ir.kernel_to_node.at(kernel));
+ }
+ }
+
+ void postOperatorExecute(const Kernel *kernel) override
+ {
+ assert(kernel != nullptr);
+ for (const auto &observer : _observers)
+ {
+ observer->postOperatorExecute(_runtime_to_ir.kernel_to_node.at(kernel));
+ }
+ }
+
+private:
+ const RuntimeToIR &_runtime_to_ir;
+ const std::vector<ExecutionObserver *> &_observers;
+};
+
+} // namespace
+
+Interpreter::Interpreter(const luci::Module *module)
+{
+ _runtime_to_ir = std::make_unique<RuntimeToIR>();
+ _event_notifier = std::make_unique<EventNotifierImpl>(*_runtime_to_ir, _observers);
+ _runtime_module = std::make_unique<RuntimeModule>(_event_notifier.get());
+ ModuleLoader loader(module, _runtime_module.get(), *_runtime_to_ir, _node_to_tensor);
+ loader.load();
+}
+
+Interpreter::~Interpreter() = default;
+
+void Interpreter::writeInputTensor(const luci::CircleInput *input_node, const void *data,
+ size_t data_size)
+{
+ Tensor *tensor = _runtime_module->getInputTensors()[input_node->index()];
+ if (tensor == nullptr)
+ {
+ const std::string &name = input_node->name();
+ throw std::runtime_error("Cannot find tensor for input node named \"" + name + "\".");
+ }
+ if (data != nullptr)
+ tensor->writeData(data, data_size);
+}
+
+void Interpreter::readOutputTensor(const luci::CircleOutput *output_node, void *data,
+ size_t data_size)
+{
+ Tensor *tensor = _runtime_module->getOutputTensors()[output_node->index()];
+ if (tensor == nullptr)
+ {
+ const std::string &name = output_node->name();
+ throw std::runtime_error("Cannot find tensor for output node named \"" + name + "\".");
+ }
+ if (data != nullptr)
+ tensor->readData(data, data_size);
+}
+
+void Interpreter::interpret() { _runtime_module->execute(); }
+
+void Interpreter::attachObserver(ExecutionObserver *observer)
+{
+ if (std::find(_observers.cbegin(), _observers.cend(), observer) != _observers.cend())
+ throw std::runtime_error("Observer is already attached.");
+ _observers.push_back(observer);
+}
+
+ExecutionObserver::~ExecutionObserver() = default;
+
+void ExecutionObserver::postTensorWrite(const luci::CircleNode *, const Tensor *) {}
+
+void ExecutionObserver::preOperatorExecute(const luci::CircleNode *) {}
+
+void ExecutionObserver::postOperatorExecute(const luci::CircleNode *) {}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/core/CMakeLists.txt b/compiler/luci-interpreter/src/core/CMakeLists.txt
new file mode 100644
index 000000000..e576dbd94
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/CMakeLists.txt
@@ -0,0 +1,17 @@
+set(SOURCES
+ "${LUCI_INTERPRETER_INCLUDE_DIR}/luci_interpreter/core/DataType.h"
+ "${LUCI_INTERPRETER_INCLUDE_DIR}/luci_interpreter/core/Tensor.h"
+ EventNotifier.h
+ Kernel.h
+ KernelParams.h
+ RuntimeGraph.h
+ RuntimeGraph.cpp
+ RuntimeModule.h
+ Tensor.cpp)
+
+add_library(luci_interpreter_core STATIC ${SOURCES})
+set_target_properties(luci_interpreter_core PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(luci_interpreter_core PUBLIC "${LUCI_INTERPRETER_INCLUDE_DIR}")
+target_include_directories(luci_interpreter_core PUBLIC "${LUCI_INTERPRETER_SOURCE_DIR}")
+target_link_libraries(luci_interpreter_core PUBLIC luci_lang)
+target_link_libraries(luci_interpreter_core PRIVATE nncc_common)
diff --git a/compiler/luci-interpreter/src/core/EventNotifier.h b/compiler/luci-interpreter/src/core/EventNotifier.h
new file mode 100644
index 000000000..5c4fbd3be
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/EventNotifier.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_EVENTNOTIFIER_H
+#define LUCI_INTERPRETER_CORE_EVENTNOTIFIER_H
+
+namespace luci_interpreter
+{
+
+// Used at execution stage to tell the interpreter that the runtime state has changed in some way.
+class EventNotifier
+{
+public:
+ virtual ~EventNotifier() = default;
+
+ virtual void postTensorWrite(const Tensor *tensor) = 0;
+ virtual void preOperatorExecute(const Kernel *kernel) = 0;
+ virtual void postOperatorExecute(const Kernel *kernel) = 0;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_EVENTNOTIFIER_H
diff --git a/compiler/luci-interpreter/src/core/Kernel.h b/compiler/luci-interpreter/src/core/Kernel.h
new file mode 100644
index 000000000..5f5efb219
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/Kernel.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_KERNEL_H
+#define LUCI_INTERPRETER_CORE_KERNEL_H
+
+#include "luci_interpreter/core/Tensor.h"
+
+#include <vector>
+
+namespace luci_interpreter
+{
+
+// Base class for all kernels.
+class Kernel
+{
+protected:
+ Kernel(std::vector<const Tensor *> inputs, std::vector<Tensor *> outputs)
+ : _inputs(std::move(inputs)), _outputs(std::move(outputs))
+ {
+ }
+
+public:
+ virtual ~Kernel() = default;
+
+ std::vector<const Tensor *> getInputTensors() const { return _inputs; }
+ std::vector<Tensor *> getOutputTensors() const { return _outputs; }
+
+ // Configures the kernel.
+ // This function is currently called once for each kernel during interpreter construction,
+ // which makes it a convenient place for preparing (resizing) output tensors.
+ virtual void configure() = 0;
+
+ // Executes the kernel.
+ virtual void execute() const = 0;
+
+protected:
+ // NOTE Prefer not to use these in derived classes.
+ const std::vector<const Tensor *> _inputs;
+ const std::vector<Tensor *> _outputs;
+};
+
+// Base class for kernels with parameters.
+template <typename Params> class KernelWithParams : public Kernel
+{
+protected:
+ KernelWithParams(std::vector<const Tensor *> inputs, std::vector<Tensor *> outputs,
+ const Params &params)
+ : Kernel(std::move(inputs), std::move(outputs)), _params(params)
+ {
+ }
+
+public:
+ const Params &params() const { return _params; }
+
+protected:
+ const Params _params;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_KERNEL_H
diff --git a/compiler/luci-interpreter/src/core/KernelParams.h b/compiler/luci-interpreter/src/core/KernelParams.h
new file mode 100644
index 000000000..a32e0d4a5
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/KernelParams.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_KERNELPARAMS_H
+#define LUCI_INTERPRETER_CORE_KERNELPARAMS_H
+
+#include <luci/IR/AttrPadding.h>
+#include <luci/IR/AttrFusedActFunc.h>
+#include <luci_interpreter/core/DataType.h>
+
+#include <cstdint>
+#include <vector>
+
+namespace luci_interpreter
+{
+
+// Inject commonly used types into `luci_interpreter` namespace for convenience.
+using Activation = luci::FusedActFunc;
+using Padding = luci::Padding;
+
+struct AddParams
+{
+ Activation activation;
+};
+
+struct ArgMaxParams
+{
+ DataType output_type;
+};
+
+struct ConcatenationParams
+{
+ int axis;
+};
+
+struct Conv2DParams
+{
+ Padding padding;
+ int32_t stride_height;
+ int32_t stride_width;
+ int32_t dilation_height_factor;
+ int32_t dilation_width_factor;
+ Activation activation;
+};
+
+struct DepthwiseConv2DParams
+{
+ Padding padding;
+ int32_t depth_multiplier; // TODO Remove, as it can be calculated.
+ int32_t stride_height;
+ int32_t stride_width;
+ int32_t dilation_height_factor;
+ int32_t dilation_width_factor;
+ Activation activation;
+};
+
+struct FullyConnectedParams
+{
+ Activation activation;
+};
+
+struct L2NormParams
+{
+ Activation activation;
+};
+
+struct LeakyReluParams
+{
+ float alpha;
+};
+
+struct LocalResponseNormalizationParams
+{
+ int32_t radius;
+ float bias;
+ float alpha;
+ float beta;
+};
+
+struct MulParams
+{
+ Activation activation;
+};
+
+struct Pool2DParams
+{
+ Padding padding;
+ int32_t filter_height;
+ int32_t filter_width;
+ int32_t stride_height;
+ int32_t stride_width;
+ Activation activation;
+};
+
+struct ReducerParams
+{
+ bool keep_dims;
+};
+
+struct SpaceToDepthParams
+{
+ int block_size;
+};
+
+struct SoftmaxParams
+{
+ float beta;
+};
+
+struct StridedSliceParams
+{
+ int32_t begin_mask;
+ int32_t end_mask;
+ int32_t ellipsis_mask;
+ int32_t new_axis_mask;
+ int32_t shrink_axis_mask;
+};
+
+struct SqueezeParams
+{
+ std::vector<int32_t> squeeze_dims;
+};
+
+struct TransposeConvParams
+{
+ Padding padding;
+ int32_t stride_height;
+ int32_t stride_width;
+};
+
+struct UnpackParams
+{
+ int axis;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_KERNELPARAMS_H
diff --git a/compiler/luci-interpreter/src/core/RuntimeGraph.cpp b/compiler/luci-interpreter/src/core/RuntimeGraph.cpp
new file mode 100644
index 000000000..06f0fed15
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/RuntimeGraph.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "core/RuntimeGraph.h"
+
+#include "core/RuntimeModule.h"
+
+#include <algorithm>
+
+namespace luci_interpreter
+{
+
+Tensor *RuntimeGraph::addTensor(std::unique_ptr<Tensor> &&tensor)
+{
+ assert(tensor != nullptr);
+ _tensors.push_back(std::move(tensor));
+ return _tensors.back().get();
+}
+
+void RuntimeGraph::setInputTensors(const std::vector<Tensor *> &input_tensors)
+{
+ assert(std::all_of(input_tensors.cbegin(), input_tensors.cend(),
+ [](Tensor *tensor) { return tensor != nullptr; }));
+ _input_tensors = input_tensors;
+}
+
+void RuntimeGraph::setOutputTensors(const std::vector<Tensor *> &output_tensors)
+{
+ assert(std::all_of(output_tensors.cbegin(), output_tensors.cend(),
+ [](Tensor *tensor) { return tensor != nullptr; }));
+ _output_tensors = output_tensors;
+}
+
+void RuntimeGraph::addKernel(std::unique_ptr<Kernel> &&kernel)
+{
+ assert(kernel != nullptr);
+ _kernels.push_back(std::move(kernel));
+}
+
+void RuntimeGraph::execute() const
+{
+ EventNotifier *event_notifier = _owning_module->getEventNotifier();
+
+ // Notify the observers that the input tensors have changed.
+ if (event_notifier != nullptr)
+ {
+ for (const Tensor *input_tensor : getInputTensors())
+ {
+ event_notifier->postTensorWrite(input_tensor);
+ }
+ }
+
+ for (const auto &kernel : _kernels)
+ {
+ if (event_notifier != nullptr)
+ {
+ event_notifier->preOperatorExecute(kernel.get());
+ }
+
+ // TODO The `configure` method should only be called if the outputs of an operator need to be
+ // resized.
+ kernel->configure();
+ kernel->execute();
+
+ if (event_notifier != nullptr)
+ {
+ event_notifier->postOperatorExecute(kernel.get());
+ }
+
+ for (const Tensor *tensor : kernel->getOutputTensors())
+ {
+ if (event_notifier != nullptr)
+ {
+ event_notifier->postTensorWrite(tensor);
+ }
+ }
+ }
+}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/core/RuntimeGraph.h b/compiler/luci-interpreter/src/core/RuntimeGraph.h
new file mode 100644
index 000000000..6ddbea4e9
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/RuntimeGraph.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_RUNTIMEGRAPH_H
+#define LUCI_INTERPRETER_CORE_RUNTIMEGRAPH_H
+
+#include "luci_interpreter/core/Tensor.h"
+#include "core/Kernel.h"
+
+#include <memory>
+#include <vector>
+
+namespace luci_interpreter
+{
+
+class RuntimeModule;
+
+class RuntimeGraph
+{
+public:
+ explicit RuntimeGraph(RuntimeModule *owning_module) : _owning_module(owning_module) {}
+
+ Tensor *addTensor(std::unique_ptr<Tensor> &&tensor);
+
+ void setInputTensors(const std::vector<Tensor *> &input_tensors);
+ void setOutputTensors(const std::vector<Tensor *> &output_tensors);
+
+ const std::vector<Tensor *> &getInputTensors() const { return _input_tensors; }
+ const std::vector<Tensor *> &getOutputTensors() const { return _output_tensors; }
+
+ void addKernel(std::unique_ptr<Kernel> &&kernel);
+
+ void execute() const;
+
+private:
+ RuntimeModule *_owning_module;
+ std::vector<std::unique_ptr<Tensor>> _tensors;
+ std::vector<Tensor *> _input_tensors;
+ std::vector<Tensor *> _output_tensors;
+
+ // Kernels in execution order.
+ std::vector<std::unique_ptr<Kernel>> _kernels;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_RUNTIMEGRAPH_H
diff --git a/compiler/luci-interpreter/src/core/RuntimeModule.h b/compiler/luci-interpreter/src/core/RuntimeModule.h
new file mode 100644
index 000000000..dccc3a173
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/RuntimeModule.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_CORE_RUNTIMEMODULE_H
+#define LUCI_INTERPRETER_CORE_RUNTIMEMODULE_H
+
+#include "core/RuntimeGraph.h"
+#include "core/EventNotifier.h"
+
+#include <memory>
+#include <vector>
+
+namespace luci_interpreter
+{
+
+class RuntimeModule
+{
+public:
+ explicit RuntimeModule(EventNotifier *event_notifier) : _event_notifier(event_notifier) {}
+
+ EventNotifier *getEventNotifier() const { return _event_notifier; }
+
+ RuntimeGraph *addGraph()
+ {
+ _graphs.push_back(std::make_unique<RuntimeGraph>(this));
+ return _graphs.back().get();
+ }
+
+ const std::vector<Tensor *> &getInputTensors() const { return getMainGraph()->getInputTensors(); }
+ const std::vector<Tensor *> &getOutputTensors() const
+ {
+ return getMainGraph()->getOutputTensors();
+ }
+
+ void execute() const { getMainGraph()->execute(); }
+
+private:
+ RuntimeGraph *getMainGraph() const { return _graphs[0].get(); }
+
+ EventNotifier *const _event_notifier;
+ std::vector<std::unique_ptr<RuntimeGraph>> _graphs;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_CORE_RUNTIMEMODULE_H
diff --git a/compiler/luci-interpreter/src/core/Tensor.cpp b/compiler/luci-interpreter/src/core/Tensor.cpp
new file mode 100644
index 000000000..4fe7479e5
--- /dev/null
+++ b/compiler/luci-interpreter/src/core/Tensor.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci_interpreter/core/Tensor.h"
+
+#include <cstring>
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+Tensor::Tensor(DataType element_type, Shape shape, AffineQuantization quantization,
+ std::string name)
+ : _element_type(element_type), _shape(std::move(shape)), _quantization(std::move(quantization)),
+ _name(std::move(name))
+{
+ const size_t element_size = getDataTypeSize(_element_type);
+ const int32_t num_elements = _shape.num_elements();
+ _data = std::make_unique<uint8_t[]>(num_elements * element_size);
+}
+
+void Tensor::readData(void *data_ptr, size_t data_size) const
+{
+ const size_t element_size = getDataTypeSize(element_type());
+ const int32_t num_elements = shape().num_elements();
+ if (data_size != num_elements * element_size)
+ {
+ throw std::invalid_argument("Invalid data size.");
+ }
+ assert(data_ptr != nullptr);
+ std::memcpy(data_ptr, data<void>(), data_size);
+}
+
+void Tensor::writeData(const void *data_ptr, size_t data_size)
+{
+ const size_t element_size = getDataTypeSize(element_type());
+ const int32_t num_elements = shape().num_elements();
+ if (data_size != num_elements * element_size)
+ {
+ throw std::invalid_argument("Invalid data size.");
+ }
+ assert(data_ptr != nullptr);
+ std::memcpy(data<void>(), data_ptr, data_size);
+}
+
+void Tensor::resize(const Shape &new_shape)
+{
+ _shape = new_shape;
+ const size_t element_size = getDataTypeSize(_element_type);
+ const int32_t num_elements = _shape.num_elements();
+ // NOTE: _data can be nullptr for empty tensors
+ _data = std::make_unique<uint8_t[]>(num_elements * element_size);
+}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Add.cpp b/compiler/luci-interpreter/src/kernels/Add.cpp
new file mode 100644
index 000000000..9b9334792
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Add.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Add.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/add.h>
+#include <tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Add::Add(const Tensor *input1, const Tensor *input2, Tensor *output, const AddParams &params)
+ : KernelWithParams<AddParams>({input1, input2}, {output}, params)
+{
+}
+
+void Add::configure()
+{
+ assert(input1()->element_type() == input2()->element_type());
+ output()->resize(calculateShapeForBroadcast(input1()->shape(), input2()->shape()));
+}
+
+void Add::execute() const
+{
+ switch (input1()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Add::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::ArithmeticParams params{};
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ const bool need_broadcast = tflite::reference_ops::ProcessBroadcastShapes(
+ getTensorShape(input1()), getTensorShape(input2()), &params);
+
+ if (need_broadcast)
+ {
+ tflite::reference_ops::BroadcastAdd4DSlow(
+ params, getTensorShape(input1()), getTensorData<float>(input1()), getTensorShape(input2()),
+ getTensorData<float>(input2()), getTensorShape(output()), getTensorData<float>(output()));
+ }
+ else
+ {
+ tflite::reference_ops::Add(params, getTensorShape(input1()), getTensorData<float>(input1()),
+ getTensorShape(input2()), getTensorData<float>(input2()),
+ getTensorShape(output()), getTensorData<float>(output()));
+ }
+}
+
+void Add::evalQuantized() const
+{
+ const auto input1_scale = static_cast<double>(input1()->scale());
+ const auto input2_scale = static_cast<double>(input2()->scale());
+ const auto output_scale = static_cast<double>(output()->scale());
+
+ const int left_shift = 20;
+ const double twice_max_input_scale = 2 * std::max(input1_scale, input2_scale);
+ const double real_input1_multiplier = input1_scale / twice_max_input_scale;
+ const double real_input2_multiplier = input2_scale / twice_max_input_scale;
+ const double real_output_multiplier = twice_max_input_scale / ((1 << left_shift) * output_scale);
+
+ int32_t input1_multiplier{}, input2_multiplier{}, output_multiplier{};
+ int input1_shift{}, input2_shift{}, output_shift{};
+ quantizeMultiplierSmallerThanOneExp(real_input1_multiplier, &input1_multiplier, &input1_shift);
+ quantizeMultiplierSmallerThanOneExp(real_input2_multiplier, &input2_multiplier, &input2_shift);
+ quantizeMultiplierSmallerThanOneExp(real_output_multiplier, &output_multiplier, &output_shift);
+
+ int32_t activation_min{};
+ int32_t activation_max{};
+ calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
+
+ tflite::ArithmeticParams params{};
+ params.left_shift = left_shift;
+ // The kernel expects inputs' zero points to be negated.
+ params.input1_offset = -input1()->zero_point(); // Note the '-'.
+ params.input1_multiplier = input1_multiplier;
+ params.input1_shift = input1_shift;
+ params.input2_offset = -input2()->zero_point(); // Note the '-'.
+ params.input2_multiplier = input2_multiplier;
+ params.input2_shift = input2_shift;
+ params.output_offset = output()->zero_point();
+ params.output_multiplier = output_multiplier;
+ params.output_shift = output_shift;
+ params.quantized_activation_min = activation_min;
+ params.quantized_activation_max = activation_max;
+
+ const bool need_broadcast = tflite::reference_ops::ProcessBroadcastShapes(
+ getTensorShape(input1()), getTensorShape(input2()), &params);
+
+ if (need_broadcast)
+ {
+ tflite::reference_ops::BroadcastAdd4DSlow(
+ params, getTensorShape(input1()), getTensorData<uint8_t>(input1()),
+ getTensorShape(input2()), getTensorData<uint8_t>(input2()), getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+ }
+ else
+ {
+ tflite::reference_ops::Add(params, getTensorShape(input1()), getTensorData<uint8_t>(input1()),
+ getTensorShape(input2()), getTensorData<uint8_t>(input2()),
+ getTensorShape(output()), getTensorData<uint8_t>(output()));
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Add.h b/compiler/luci-interpreter/src/kernels/Add.h
new file mode 100644
index 000000000..a1f7e0406
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Add.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_ADD_H
+#define LUCI_INTERPRETER_KERNELS_ADD_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Add : public KernelWithParams<AddParams>
+{
+public:
+ Add(const Tensor *input1, const Tensor *input2, Tensor *output, const AddParams &params);
+
+ const Tensor *input1() const { return _inputs[0]; }
+ const Tensor *input2() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_ADD_H
diff --git a/compiler/luci-interpreter/src/kernels/Add.test.cpp b/compiler/luci-interpreter/src/kernels/Add.test.cpp
new file mode 100644
index 000000000..54e1cc672
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Add.test.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Add.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+// for quantized Add, the error shouldn't exceed step
+float GetTolerance(float min, float max)
+{
+ float kQuantizedStep = (max - min) / 255.0;
+ return kQuantizedStep;
+}
+
+TEST(AddTest, Uint8)
+{
+ std::initializer_list<int32_t> base_shape = {2, 3, 1, 2};
+ std::initializer_list<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
+ 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
+ std::initializer_list<int32_t> test_shapes[] = {
+ {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
+ std::initializer_list<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
+ std::initializer_list<int32_t> output_shapes[] = {
+ {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
+ std::vector<std::vector<float>> output_data = {
+ {-0.1f, 2.6f, -0.7f, 2.8f, 0.7f, 3.0f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
+ 1.0f, -0.8f, 0.4f, -0.6f, 1.8f, -0.2f, 1.4f, 3.0f, 0.8f, 3.0f, 2.2f, 3.0f,
+ -1.4f, 0.3f, -2.0f, 0.5f, -0.6f, 0.9f, 0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
+ {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f, -1.3f},
+ {-0.1f, 2.5f, 0.0f, 2.6f, -0.7f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
+ 1.0f, -0.9f, 1.1f, -0.8f, 0.4f, -1.5f, 1.7f, 3.0f, 2.2f, 3.0f, 2.1f, 3.0f,
+ -1.1f, 0.5f, -0.6f, 1.0f, -0.7f, 0.9f, 1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
+ {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f, -1.3f}};
+ float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
+ for (int i = 0; i < output_data.size(); i++)
+ {
+ Tensor input1_tensor{
+ getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor input2_tensor{
+ getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
+ std::vector<uint8_t> quantized_input1_value =
+ quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantized_input2_value =
+ quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
+ input1_tensor.writeData(quantized_input1_value.data(),
+ quantized_input1_value.size() * sizeof(uint8_t));
+ input2_tensor.writeData(quantized_input2_value.data(),
+ quantized_input2_value.size() * sizeof(uint8_t));
+ Tensor output_tensor =
+ makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
+
+ AddParams params{};
+ params.activation = Activation::NONE;
+
+ Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
+ output_tensor.scale(), output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
+ }
+ // Re-run with exchanged inputs.
+ for (int i = 0; i < output_data.size(); i++)
+ {
+ Tensor input1_tensor{
+ getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor input2_tensor{
+ getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
+ std::vector<uint8_t> quantized_input1_value =
+ quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantized_input2_value =
+ quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
+ input1_tensor.writeData(quantized_input1_value.data(),
+ quantized_input1_value.size() * sizeof(uint8_t));
+ input2_tensor.writeData(quantized_input2_value.data(),
+ quantized_input2_value.size() * sizeof(uint8_t));
+ Tensor output_tensor =
+ makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
+
+ AddParams params{};
+ params.activation = Activation::NONE;
+
+ Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
+ output_tensor.scale(), output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
+ }
+}
+
+TEST(AddTest, Float)
+{
+ Shape base_shape = {2, 3, 1, 2};
+ std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
+ std::vector<std::vector<float>> test_outputs = {
+ {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
+ 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
+ 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
+ {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
+ {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
+ 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
+ 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
+ {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
+ std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
+ 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
+ std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
+ for (size_t i = 0; i < test_shapes.size(); ++i)
+ {
+ Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
+ Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ AddParams params{};
+ params.activation = Activation::RELU;
+
+ Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
+ << "With shape number " << i;
+ }
+ // Re-run with exchanged inputs.
+ for (size_t i = 0; i < test_shapes.size(); ++i)
+ {
+ Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
+ Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ AddParams params{};
+ params.activation = Activation::RELU;
+
+ Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
+ << "With shape number " << i;
+ }
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/ArgMax.cpp b/compiler/luci-interpreter/src/kernels/ArgMax.cpp
new file mode 100644
index 000000000..5c464ed09
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/ArgMax.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/ArgMax.h"
+#include "kernels/Utils.h"
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+ArgMax::ArgMax(const Tensor *input, const Tensor *axis, Tensor *output, const ArgMaxParams &params)
+ : KernelWithParams<ArgMaxParams>({input, axis}, {output}, params)
+{
+}
+
+void ArgMax::configure()
+{
+ assert(axis()->element_type() == DataType::S32 || axis()->element_type() == DataType::S64);
+ assert(input()->shape().num_dims() >= 1);
+ const Shape &input_shape = input()->shape();
+ const int num_dims = input_shape.num_dims();
+ Shape output_shape(num_dims - 1);
+
+ // If axis value is negative, then update by adding input_shape's num_dims.
+ // If updated value also negative, then assert.
+ assert(axis()->shape().num_elements() == 1);
+ int axis_value = getTensorData<int32_t>(axis())[0];
+ if (axis_value < 0)
+ axis_value = axis_value + num_dims;
+ assert(axis_value >= 0);
+
+ int j = 0;
+ for (int i = 0; i < num_dims; i++)
+ {
+ if (i == axis_value)
+ continue;
+ output_shape.dim(j++) = input_shape.dim(i);
+ }
+
+ assert(output()->element_type() == _params.output_type);
+
+ output()->resize(output_shape);
+}
+
+void ArgMax::execute() const
+{
+
+#define TF_LITE_ARG_MAX(data_type, axis_type, output_type) \
+ tflite::optimized_ops::ArgMinMax(getTensorShape(input()), getTensorData<data_type>(input()), \
+ getTensorData<axis_type>(axis()), getTensorShape(output()), \
+ getTensorData<output_type>(output()), \
+ std::greater<data_type>())
+ if (axis()->element_type() == DataType::S32)
+ {
+ switch (_params.output_type)
+ {
+ case DataType::S32:
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ TF_LITE_ARG_MAX(float, int32_t, int32_t);
+ break;
+ case DataType::U8:
+ TF_LITE_ARG_MAX(uint8_t, int32_t, int32_t);
+ break;
+ default:
+ throw std::runtime_error("Unsupported input type.");
+ }
+ break;
+ case DataType::S64:
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ TF_LITE_ARG_MAX(float, int32_t, int64_t);
+ break;
+ case DataType::U8:
+ TF_LITE_ARG_MAX(uint8_t, int32_t, int64_t);
+ break;
+ default:
+ throw std::runtime_error("Unsupported input type.");
+ }
+ break;
+ default:
+ throw std::runtime_error("Unsupported output type.");
+ }
+ }
+ else
+ {
+ switch (_params.output_type)
+ {
+ case DataType::S32:
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ TF_LITE_ARG_MAX(float, int64_t, int32_t);
+ break;
+ case DataType::U8:
+ TF_LITE_ARG_MAX(uint8_t, int64_t, int32_t);
+ break;
+ default:
+ throw std::runtime_error("Unsupported input type.");
+ }
+ break;
+ case DataType::S64:
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ TF_LITE_ARG_MAX(float, int64_t, int64_t);
+ break;
+ case DataType::U8:
+ TF_LITE_ARG_MAX(uint8_t, int64_t, int64_t);
+ break;
+ default:
+ throw std::runtime_error("Unsupported input type.");
+ }
+ break;
+ default:
+ throw std::runtime_error("Unsupported output type.");
+ }
+ }
+#undef TF_LITE_ARG_MAX
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/ArgMax.h b/compiler/luci-interpreter/src/kernels/ArgMax.h
new file mode 100644
index 000000000..c851b5891
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/ArgMax.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_ARGMAX_H
+#define LUCI_INTERPRETER_KERNELS_ARGMAX_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class ArgMax : public KernelWithParams<ArgMaxParams>
+{
+public:
+ ArgMax(const Tensor *input, const Tensor *axis, Tensor *output, const ArgMaxParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *axis() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_ARGMAX_H
diff --git a/compiler/luci-interpreter/src/kernels/ArgMax.test.cpp b/compiler/luci-interpreter/src/kernels/ArgMax.test.cpp
new file mode 100644
index 000000000..5ac3b2f7a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/ArgMax.test.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/ArgMax.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T1, typename T2>
+void Check(std::initializer_list<int32_t> input_shape,
+ std::initializer_list<int32_t> dimension_shape,
+ std::initializer_list<int32_t> output_shape, std::initializer_list<T1> input_data,
+ std::initializer_list<int32_t> dimension_data, std::initializer_list<T2> output_data)
+{
+
+ Tensor input_tensor{getElementType<T1>(), input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(T1));
+ Tensor dimension_tensor{DataType::S32, dimension_shape, {}, ""};
+ dimension_tensor.writeData(dimension_data.begin(), dimension_data.size() * sizeof(int32_t));
+
+ Tensor output_tensor = makeOutputTensor(getElementType<T2>());
+
+ ArgMaxParams params{};
+ params.output_type = getElementType<T2>();
+ ArgMax kernel(&input_tensor, &dimension_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<T2>(output_tensor), ::testing::ElementsAreArray(output_data));
+ EXPECT_THAT(extractTensorShape(output_tensor), output_shape);
+}
+
+template <typename T> class ArgMaxTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(ArgMaxTest, DataTypes);
+
+TYPED_TEST(ArgMaxTest, Simple)
+{
+ Check<TypeParam, int32_t>(/*input_shape=*/{1, 1, 1, 4}, /*dimension_shape=*/{},
+ /*output_shape=*/{1, 1, 1},
+ /*input_data=*/
+ {
+ 1, 9, 7, 3,
+ },
+ /*dimension_data=*/{3}, /*output_data=*/{1});
+ Check<TypeParam, int64_t>(/*input_shape=*/{1, 1, 1, 4}, /*dimension_shape=*/{},
+ /*output_shape=*/{1, 1, 1},
+ /*input_data=*/
+ {
+ 1, 9, 7, 3,
+ },
+ /*dimension_data=*/{3}, /*output_data=*/{1});
+}
+
+TYPED_TEST(ArgMaxTest, MultiDimensions)
+{
+ Check<TypeParam, int32_t>(/*input_shape=*/{1, 1, 2, 4}, /*dimension_shape=*/{},
+ /*output_shape=*/{1, 1, 2},
+ /*input_data=*/
+ {
+ 1, 2, 7, 8, 1, 9, 7, 3,
+ },
+ /*dimension_data=*/{3}, /*output_data=*/{3, 1});
+ Check<TypeParam, int64_t>(/*input_shape=*/{1, 1, 2, 4}, /*dimension_shape=*/{},
+ /*output_shape=*/{1, 1, 2},
+ /*input_data=*/
+ {
+ 1, 2, 7, 8, 1, 9, 7, 3,
+ },
+ /*dimension_data=*/{3}, /*output_data=*/{3, 1});
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/AveragePool2D.cpp b/compiler/luci-interpreter/src/kernels/AveragePool2D.cpp
new file mode 100644
index 000000000..6d1b8ead4
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/AveragePool2D.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/AveragePool2D.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/pooling.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+AveragePool2D::AveragePool2D(const Tensor *input, Tensor *output, const Pool2DParams &params)
+ : KernelWithParams<Pool2DParams>({input}, {output}, params)
+{
+}
+
+void AveragePool2D::configure()
+{
+ const Shape &input_shape = input()->shape();
+
+ const int32_t batches = input_shape.dim(0);
+ const int32_t input_height = input_shape.dim(1);
+ const int32_t input_width = input_shape.dim(2);
+ const int32_t depth = input_shape.dim(3);
+
+ const int32_t output_height = computeOutputSize(_params.padding, input_height,
+ _params.filter_height, _params.stride_height);
+ const int32_t output_width =
+ computeOutputSize(_params.padding, input_width, _params.filter_width, _params.stride_width);
+
+ _padding_height =
+ computePadding(_params.stride_height, 1, input_height, _params.filter_height, output_height);
+ _padding_width =
+ computePadding(_params.stride_width, 1, input_width, _params.filter_width, output_width);
+
+ output()->resize({batches, output_height, output_width, depth});
+}
+
+void AveragePool2D::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void AveragePool2D::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::PoolParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.filter_height = _params.filter_height;
+ params.filter_width = _params.filter_width;
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ tflite::reference_ops::AveragePool(params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+void AveragePool2D::evalQuantized() const
+{
+ int32_t activation_min{};
+ int32_t activation_max{};
+ calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
+
+ tflite::PoolParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.filter_height = _params.filter_height;
+ params.filter_width = _params.filter_width;
+ params.quantized_activation_min = activation_min;
+ params.quantized_activation_max = activation_max;
+
+ tflite::reference_ops::AveragePool(params, getTensorShape(input()),
+ getTensorData<uint8_t>(input()), getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/AveragePool2D.h b/compiler/luci-interpreter/src/kernels/AveragePool2D.h
new file mode 100644
index 000000000..91f212b3a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/AveragePool2D.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_AVERAGEPOOL2D_H
+#define LUCI_INTERPRETER_KERNELS_AVERAGEPOOL2D_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class AveragePool2D : public KernelWithParams<Pool2DParams>
+{
+public:
+ AveragePool2D(const Tensor *input, Tensor *output, const Pool2DParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ int32_t _padding_height{};
+ int32_t _padding_width{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_AVERAGEPOOL2D_H
diff --git a/compiler/luci-interpreter/src/kernels/AveragePool2D.test.cpp b/compiler/luci-interpreter/src/kernels/AveragePool2D.test.cpp
new file mode 100644
index 000000000..7160e49e9
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/AveragePool2D.test.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/AveragePool2D.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(AveragePool2DTest, Float)
+{
+ Shape input_shape{1, 3, 5, 1};
+ std::vector<float> input_data{
+ -4, -3, -2, -1, 0, //
+ 1, 2, 3, 4, 5, //
+ 6, 7, 8, 9, 10, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.filter_height = 2;
+ params.filter_width = 3;
+ params.stride_height = 1;
+ params.stride_width = 2;
+ params.activation = Activation::RELU6;
+
+ AveragePool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 0, 1.5, //
+ 4.5, 6, //
+ };
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 2, 1}));
+}
+
+TEST(AveragePool2DTest, Uint8_0)
+{
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-15.9375f, 15.9375f);
+ Tensor input_tensor{DataType::U8, {1, 2, 4, 1}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+
+ std::vector<uint8_t> quant_input = quantize<uint8_t>(
+ {
+ 0, -6, 12, 4, //
+ -3, -2, 10, 7, //
+ },
+ quant_param.first, quant_param.second);
+ input_tensor.writeData(quant_input.data(), quant_input.size() * sizeof(uint8_t));
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+ params.activation = Activation::RELU6;
+
+ AveragePool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(dequantize(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear({0.0, 6.0})));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 1, 2, 1}));
+}
+
+TEST(AveragePool2DTest, Uint8_1)
+{
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-15.9375f, 15.9375f);
+ Tensor input_tensor{DataType::U8, {1, 2, 4, 1}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+
+ std::vector<uint8_t> quant_input = quantize<uint8_t>(
+ {
+ 0, 6, 12, 4, //
+ 3, 2, 10, 7, //
+ },
+ quant_param.first, quant_param.second);
+ input_tensor.writeData(quant_input.data(), quant_input.size() * sizeof(uint8_t));
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+ params.activation = Activation::RELU6;
+
+ AveragePool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(dequantize(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear({2.75, 6.0})));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 1, 2, 1}));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/CMakeLists.txt b/compiler/luci-interpreter/src/kernels/CMakeLists.txt
new file mode 100644
index 000000000..fe3623135
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/CMakeLists.txt
@@ -0,0 +1,106 @@
+find_package(Threads REQUIRED)
+nnas_find_package(GTest REQUIRED)
+
+set(SOURCES
+ Add.h
+ Add.cpp
+ ArgMax.h
+ ArgMax.cpp
+ AveragePool2D.h
+ AveragePool2D.cpp
+ Concatenation.h
+ Concatenation.cpp
+ Conv2D.h
+ Conv2D.cpp
+ DepthwiseConv2D.h
+ DepthwiseConv2D.cpp
+ Elu.h
+ Elu.cpp
+ FullyConnected.h
+ FullyConnected.cpp
+ If.h
+ If.cpp
+ L2Normalize.h
+ L2Normalize.cpp
+ L2Pool2D.h
+ L2Pool2D.cpp
+ LeakyRelu.h
+ LeakyRelu.cpp
+ LocalResponseNormalization.h
+ LocalResponseNormalization.cpp
+ Logistic.h
+ Logistic.cpp
+ MaxPool2D.h
+ MaxPool2D.cpp
+ Mean.h
+ Mean.cpp
+ Mul.h
+ Mul.cpp
+ Pad.h
+ Pad.cpp
+ Reshape.h
+ Reshape.cpp
+ Softmax.h
+ Softmax.cpp
+ SpaceToDepth.h
+ SpaceToDepth.cpp
+ Split.h
+ Split.cpp
+ StridedSlice.h
+ StridedSlice.cpp
+ Squeeze.h
+ Squeeze.cpp
+ Transpose.h
+ Transpose.cpp
+ TransposeConv.h
+ TransposeConv.cpp
+ Unpack.h
+ Unpack.cpp)
+
+list(APPEND SOURCES Utils.h Utils.cpp)
+
+add_library(luci_interpreter_kernels STATIC ${SOURCES})
+set_target_properties(luci_interpreter_kernels PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(luci_interpreter_kernels PUBLIC ${LUCI_INTERPRETER_SOURCE_DIR})
+target_include_directories(luci_interpreter_kernels SYSTEM PRIVATE
+ "${TensorFlowGEMMLowpSource_DIR}"
+ "${TensorFlowEigenSource_DIR}"
+ "${TensorFlowSource_DIR}")
+target_link_libraries(luci_interpreter_kernels
+ PUBLIC luci_interpreter_core
+ PRIVATE nncc_common Threads::Threads)
+
+
+set(TEST_SOURCES
+ Add.test.cpp
+ ArgMax.test.cpp
+ AveragePool2D.test.cpp
+ Concatenation.test.cpp
+ Conv2D.test.cpp
+ DepthwiseConv2D.test.cpp
+ Elu.test.cpp
+ FullyConnected.test.cpp
+ If.test.cpp
+ L2Normalize.test.cpp
+ L2Pool2D.test.cpp
+ LeakyRelu.test.cpp
+ LocalResponseNormalization.test.cpp
+ Logistic.test.cpp
+ MaxPool2D.test.cpp
+ Mean.test.cpp
+ Mul.test.cpp
+ Pad.test.cpp
+ Reshape.test.cpp
+ Softmax.test.cpp
+ SpaceToDepth.test.cpp
+ Split.test.cpp
+ StridedSlice.test.cpp
+ Squeeze.test.cpp
+ Transpose.test.cpp
+ TransposeConv.test.cpp
+ Unpack.test.cpp)
+
+list(APPEND TEST_SOURCES TestUtils.h TestUtils.cpp)
+
+GTest_AddTest(luci_interpreter_kernels_test ${TEST_SOURCES})
+target_link_libraries(luci_interpreter_kernels_test luci_interpreter_kernels)
diff --git a/compiler/luci-interpreter/src/kernels/Concatenation.cpp b/compiler/luci-interpreter/src/kernels/Concatenation.cpp
new file mode 100644
index 000000000..812ab7609
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Concatenation.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Concatenation.h"
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Concatenation::Concatenation(std::vector<const Tensor *> inputs, Tensor *output,
+ const ConcatenationParams &params)
+ : KernelWithParams<ConcatenationParams>(std::move(inputs), {output}, params)
+{
+}
+
+void Concatenation::configure()
+{
+ const int num_inputs = _inputs.size();
+ assert(num_inputs > 0);
+ const Tensor *t0 = _inputs[0];
+
+ int axis = _params.axis;
+ if (axis < 0)
+ axis += t0->shape().num_dims();
+ assert(axis >= 0 && axis < t0->shape().num_dims());
+
+ int32_t sum_axis = t0->shape().dim(axis);
+ for (int i = 1; i < num_inputs; ++i)
+ {
+ const Tensor *tensor = _inputs[i];
+ assert(tensor->element_type() == t0->element_type());
+ assert(tensor->shape().num_dims() == t0->shape().num_dims());
+ for (int d = 0; d < t0->shape().num_dims(); ++d)
+ {
+ if (d == axis)
+ {
+ sum_axis += tensor->shape().dim(axis);
+ }
+ else
+ {
+ assert(tensor->shape().dim(d) == t0->shape().dim(d));
+ }
+ }
+ }
+
+ Shape output_shape = t0->shape();
+ output_shape.dim(axis) = sum_axis;
+
+ // TODO S8 type needs more checking: quantization parameters of all input tensors and the output
+ // tensor should be the same. Note that there is no such requirement for U8 type.
+ if (t0->element_type() == DataType::S8)
+ throw std::runtime_error("Unsupported type.");
+
+ output()->resize(output_shape);
+}
+
+void Concatenation::execute() const
+{
+ switch (_inputs[0]->element_type())
+ {
+ case DataType::FLOAT32:
+ evalGeneric<float>();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ case DataType::S8:
+ evalGeneric<int8_t>();
+ break;
+ case DataType::S32:
+ evalGeneric<int32_t>();
+ break;
+ case DataType::S64:
+ evalGeneric<int64_t>();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+template <typename T> void Concatenation::evalGeneric() const
+{
+ int axis = _params.axis;
+ if (axis < 0)
+ axis += output()->shape().num_dims();
+
+ VectorOfTensors<T, true> inputs(_inputs);
+ tflite::ConcatenationParams params{};
+ params.axis = axis;
+ params.inputs_count = _inputs.size();
+ tflite::reference_ops::Concatenation(params, inputs.shapes(), inputs.data(),
+ getTensorShape(output()), getTensorData<T>(output()));
+}
+
+void Concatenation::evalQuantized() const
+{
+ int axis = _params.axis;
+ if (axis < 0)
+ axis += output()->shape().num_dims();
+
+ VectorOfQuantizedTensors<true> inputs(_inputs);
+ tflite::ConcatenationParams params{};
+ params.axis = axis;
+ params.input_zeropoint = inputs.zero_point();
+ params.input_scale = inputs.scale();
+ params.inputs_count = _inputs.size();
+ params.output_zeropoint = output()->zero_point();
+ params.output_scale = output()->scale();
+
+ tflite::reference_ops::ConcatenationWithScaling(params, inputs.shapes(), inputs.data(),
+ getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Concatenation.h b/compiler/luci-interpreter/src/kernels/Concatenation.h
new file mode 100644
index 000000000..b48c8ed1e
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Concatenation.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_CONCATENATION_H
+#define LUCI_INTERPRETER_KERNELS_CONCATENATION_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Concatenation : public KernelWithParams<ConcatenationParams>
+{
+public:
+ Concatenation(std::vector<const Tensor *> inputs, Tensor *output,
+ const ConcatenationParams &params);
+
+ const Tensor *input(int index) const { return _inputs[index]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ template <typename T> void evalGeneric() const;
+ void evalQuantized() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_CONCATENATION_H
diff --git a/compiler/luci-interpreter/src/kernels/Concatenation.test.cpp b/compiler/luci-interpreter/src/kernels/Concatenation.test.cpp
new file mode 100644
index 000000000..d9a7097d0
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Concatenation.test.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Concatenation.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(ConcatenationTest, Float)
+{
+ std::vector<float> input1_data{1, 2, 3, 4, 5, 6};
+ std::vector<float> input2_data{7, 8, 9, 10, 11, 12};
+ Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({2, 3}, input1_data);
+ Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>({2, 3}, input2_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+ ConcatenationParams params{};
+
+ // Try different 'axis' and expect different results.
+ {
+ params.axis = 0;
+
+ Concatenation kernel({&input1_tensor, &input2_tensor}, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})));
+ }
+ {
+ params.axis = -2; // Same as '0'.
+
+ Concatenation kernel({&input1_tensor, &input2_tensor}, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})));
+ }
+ {
+ params.axis = 1;
+
+ Concatenation kernel({&input1_tensor, &input2_tensor}, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12})));
+ }
+ {
+ params.axis = -1; // Same as '1'.
+
+ Concatenation kernel({&input1_tensor, &input2_tensor}, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12})));
+ }
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Conv2D.cpp b/compiler/luci-interpreter/src/kernels/Conv2D.cpp
new file mode 100644
index 000000000..60e6134ab
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Conv2D.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Conv2D.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+
+#include <stdexcept>
+#include <thread>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Conv2D::Conv2D(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output,
+ const Conv2DParams &params)
+ : KernelWithParams<Conv2DParams>({input, filter, bias}, {output}, params)
+{
+}
+
+void Conv2D::configure()
+{
+ // TensorFlow Lite (as of v2.2.0) supports the following combinations of types:
+ // | input filter bias output |
+ // ----+---------------------------+
+ // (1) | float float float float |
+ // (2) | float int8 float float | hybrid
+ // (3) | uint8 uint8 int32 uint8 | quantized
+ // (4) | int8 int8 int32 int8 | quantized per channel
+ //
+ // We only support (1) and (3) for now.
+ if (input()->element_type() == DataType::FLOAT32 && filter()->element_type() == DataType::FLOAT32)
+ {
+ assert(bias() == nullptr || bias()->element_type() == DataType::FLOAT32);
+ }
+ else if (input()->element_type() == DataType::U8 && filter()->element_type() == DataType::U8)
+ {
+ assert(bias() == nullptr || bias()->element_type() == DataType::S32);
+ }
+ else
+ {
+ throw std::runtime_error("Unsupported type.");
+ }
+ assert(output()->element_type() == input()->element_type());
+
+ const Shape &input_shape = input()->shape();
+ const Shape &filter_shape = filter()->shape();
+ assert(input_shape.num_dims() == 4 && filter_shape.num_dims() == 4);
+
+ const int32_t batches = input_shape.dim(0);
+ const int32_t input_height = input_shape.dim(1);
+ const int32_t input_width = input_shape.dim(2);
+ const int32_t output_depth = filter_shape.dim(0);
+ const int32_t filter_height = filter_shape.dim(1);
+ const int32_t filter_width = filter_shape.dim(2);
+ assert(filter_shape.dim(3) == input_shape.dim(3));
+
+ assert(bias() == nullptr ||
+ (bias()->shape().num_dims() == 1 && bias()->shape().dim(0) == output_depth));
+
+ const int32_t output_height =
+ computeOutputSize(_params.padding, input_height, filter_height, _params.stride_height,
+ _params.dilation_height_factor);
+ const int32_t output_width =
+ computeOutputSize(_params.padding, input_width, filter_width, _params.stride_width,
+ _params.dilation_width_factor);
+
+ _padding_height = computePadding(_params.stride_height, _params.dilation_height_factor,
+ input_height, filter_height, output_height);
+ _padding_width = computePadding(_params.stride_width, _params.dilation_width_factor, input_width,
+ filter_width, output_width);
+
+ output()->resize({batches, output_height, output_width, output_depth});
+
+ // Allocate tensor for Im2Col, if needed.
+ // The checks here should be aligned with the actual implementation.
+ const bool need_dilated_im2col =
+ _params.dilation_height_factor != 1 || _params.dilation_width_factor != 1;
+ const bool need_non_dilated_im2col = _params.stride_height != 1 || _params.stride_width != 1 ||
+ filter_height != 1 || filter_width != 1;
+ const bool need_im2col = need_dilated_im2col || need_non_dilated_im2col;
+ if (need_im2col)
+ {
+ const int input_depth = input_shape.dim(3);
+ Shape im2col_shape{batches, output_height, output_width,
+ input_depth * filter_height * filter_width};
+ _im2col =
+ std::make_unique<Tensor>(input()->element_type(), im2col_shape, AffineQuantization{}, "");
+ }
+}
+
+void Conv2D::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ if (filter()->element_type() == DataType::FLOAT32)
+ {
+ evalFloat();
+ break;
+ }
+ throw std::runtime_error("Unsupported type.");
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Conv2D::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::ConvParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.dilation_height_factor = _params.dilation_height_factor;
+ params.dilation_width_factor = _params.dilation_width_factor;
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ tflite::optimized_ops::Conv(params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(filter()), getTensorData<float>(filter()),
+ getTensorShape(bias()), getTensorData<float>(bias()),
+ getTensorShape(output()), getTensorData<float>(output()),
+ getTensorShape(_im2col.get()), getTensorData<float>(_im2col.get()));
+}
+
+void Conv2D::evalQuantized() const
+{
+ const auto input_scale = static_cast<double>(input()->scale());
+ const auto filter_scale = static_cast<double>(filter()->scale());
+ const auto output_scale = static_cast<double>(output()->scale());
+
+ const double real_multiplier = input_scale * filter_scale / output_scale;
+ int32_t output_multiplier{};
+ int output_shift{};
+ quantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
+
+ int32_t activation_min{};
+ int32_t activation_max{};
+ calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
+
+ tflite::ConvParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.dilation_height_factor = _params.dilation_height_factor;
+ params.dilation_width_factor = _params.dilation_width_factor;
+ // The kernel expects input and filter zero points to be negated.
+ params.input_offset = -input()->zero_point(); // Note the '-'.
+ params.weights_offset = -filter()->zero_point(); // Note the '-'.
+ params.output_offset = output()->zero_point();
+ params.output_multiplier = output_multiplier;
+ params.output_shift = output_shift;
+ params.quantized_activation_min = activation_min;
+ params.quantized_activation_max = activation_max;
+
+ // TODO This should only be done once (although it takes only a few microseconds).
+ // Also, the user should be able to adjust the number of threads.
+ auto gemmlowp_context = std::make_unique<gemmlowp::GemmContext>();
+ gemmlowp_context->set_max_num_threads(static_cast<int>(std::thread::hardware_concurrency()));
+
+ tflite::optimized_ops::Conv(
+ params, getTensorShape(input()), getTensorData<uint8_t>(input()), getTensorShape(filter()),
+ getTensorData<uint8_t>(filter()), getTensorShape(bias()), getTensorData<int32_t>(bias()),
+ getTensorShape(output()), getTensorData<uint8_t>(output()), getTensorShape(_im2col.get()),
+ getTensorData<uint8_t>(_im2col.get()), gemmlowp_context.get());
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Conv2D.h b/compiler/luci-interpreter/src/kernels/Conv2D.h
new file mode 100644
index 000000000..69e309852
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Conv2D.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_CONV2D_H
+#define LUCI_INTERPRETER_KERNELS_CONV2D_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+#include <memory>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Conv2D : public KernelWithParams<Conv2DParams>
+{
+public:
+ Conv2D(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output,
+ const Conv2DParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *filter() const { return _inputs[1]; }
+ const Tensor *bias() const { return _inputs[2]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ std::unique_ptr<Tensor> _im2col;
+ int32_t _padding_height{};
+ int32_t _padding_width{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_CONV2D_H
diff --git a/compiler/luci-interpreter/src/kernels/Conv2D.test.cpp b/compiler/luci-interpreter/src/kernels/Conv2D.test.cpp
new file mode 100644
index 000000000..ef9ace903
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Conv2D.test.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Conv2D.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(Conv2DTest, Float)
+{
+ Shape input_shape{1, 4, 3, 2};
+ Shape filter_shape{2, 2, 2, 2};
+ Shape bias_shape{2};
+ std::vector<float> input_data{
+ 1, 2, 3, 4, 5, 6, // row = 0
+ 7, 8, 9, 10, 11, 12, // row = 1
+ 13, 14, 15, 16, 17, 18, // row = 2
+ 19, 20, 21, 22, 23, 24, // row = 3
+ };
+ std::vector<float> filter_data{
+ 1, 2, -3, -4, // out = 0, row = 0
+ -5, 6, -7, 8, // out = 1, row = 0
+ 4, -2, 3, -1, // out = 0, row = 1
+ -8, -6, 7, 5, // out = 1, row = 1
+ };
+ std::vector<float> bias_data{1, 2};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor filter_tensor = makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data);
+ Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Conv2DParams params{};
+ params.padding = Padding::VALID;
+ params.stride_height = 2;
+ params.stride_width = 1;
+ params.dilation_height_factor = 1;
+ params.dilation_width_factor = 1;
+ params.activation = Activation::RELU;
+
+ Conv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 11, 16, 7, 20, // row = 0
+ 0, 40, 0, 44, // row = 1
+ };
+ std::vector<int32_t> ref_output_shape{1, 2, 2, 2};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(Conv2DTest, FloatCheck)
+{
+ Shape input_shape{2, 2, 4, 1};
+ Shape filter_shape{3, 2, 2, 1};
+ Shape bias_shape{3};
+ std::vector<float> input_data{
+ // First batch
+ 1, 1, 1, 1, // row = 1
+ 2, 2, 2, 2, // row = 2
+ // Second batch
+ 1, 2, 3, 4, // row = 1
+ 1, 2, 3, 4, // row = 2
+ };
+ std::vector<float> filter_data{
+ 1, 2, 3, 4, // first 2x2 filter
+ -1, 1, -1, 1, // second 2x2 filter
+ -1, -1, 1, 1, // third 2x2 filter
+ };
+ std::vector<float> bias_data{1, 2, 3};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor filter_tensor = makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data);
+ Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Conv2DParams params{};
+ params.padding = Padding::VALID;
+ params.stride_height = 2;
+ params.stride_width = 2;
+ params.dilation_height_factor = 1;
+ params.dilation_width_factor = 1;
+ params.activation = Activation::NONE;
+
+ Conv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 18, 2, 5, // first batch, left
+ 18, 2, 5, // first batch, right
+ 17, 4, 3, // second batch, left
+ 37, 4, 3, // second batch, right
+ };
+ std::vector<int32_t> ref_output_shape{2, 1, 2, 3};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(Conv2DTest, Uint8)
+{
+ std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
+ std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
+ Shape bias_shape = {3};
+ Tensor input_tensor{
+ DataType::U8, {2, 2, 4, 1}, {{input_quant_param.first}, {input_quant_param.second}}, ""};
+ Tensor filter_tensor{
+ DataType::U8, {3, 2, 2, 1}, {{input_quant_param.first}, {input_quant_param.second}}, ""};
+ Tensor bias_tensor{
+ DataType::S32, bias_shape, {{input_quant_param.first * input_quant_param.first}, {0}}, ""};
+ Tensor output_tensor =
+ makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
+ std::vector<uint8_t> quantized_input = quantize<uint8_t>(
+ {
+ // First batch
+ 1, 1, 1, 1, // row = 1
+ 2, 2, 2, 2, // row = 2
+ // Second batch
+ 1, 2, 3, 4, // row = 1
+ 1, 2, 3, 4, // row = 2
+ },
+ input_quant_param.first, input_quant_param.second);
+ std::vector<uint8_t> quantized_filter = quantize<uint8_t>(
+ {
+ 1, 2, 3, 4, // first 2x2 filter
+ -1, 1, -1, 1, // second 2x2 filter
+ -1, -1, 1, 1, // third 2x2 filter
+ },
+ input_quant_param.first, input_quant_param.second);
+ std::vector<int32_t> bias_data =
+ quantize<int32_t>({1, 2, 3}, input_quant_param.first * input_quant_param.first, 0);
+ input_tensor.writeData(quantized_input.data(), quantized_input.size() * sizeof(uint8_t));
+ filter_tensor.writeData(quantized_filter.data(), quantized_filter.size() * sizeof(uint8_t));
+ bias_tensor.writeData(bias_data.data(), bias_data.size() * sizeof(int32_t));
+
+ Conv2DParams params{};
+ params.padding = Padding::VALID;
+ params.stride_height = 2;
+ params.stride_width = 2;
+ params.dilation_height_factor = 1;
+ params.dilation_width_factor = 1;
+ params.activation = Activation::NONE;
+
+ Conv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 18, 2, 5, // first batch, left
+ 18, 2, 5, // first batch, right
+ 17, 4, 3, // second batch, left
+ 37, 4, 3, // second batch, right
+ };
+ std::vector<int32_t> ref_output_shape{2, 1, 2, 3};
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
+ output_quant_param.first, output_quant_param.second),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.cpp b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.cpp
new file mode 100644
index 000000000..b01a5e086
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/DepthwiseConv2D.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
+#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+DepthwiseConv2D::DepthwiseConv2D(const Tensor *input, const Tensor *filter, const Tensor *bias,
+ Tensor *output, const DepthwiseConv2DParams &params)
+ : KernelWithParams<DepthwiseConv2DParams>({input, filter, bias}, {output}, params)
+{
+}
+
+void DepthwiseConv2D::configure()
+{
+ // TensorFlow Lite (as of v2.2.0) supports the following combinations of types:
+ // | input filter bias output |
+ // ----+---------------------------+
+ // (1) | float float float float |
+ // (2) | float int8 float float | hybrid
+ // (3) | uint8 uint8 int32 uint8 | quantized
+ // (4) | int8 int8 int32 int8 | quantized per channel
+ // (5) | int16 int8 int64 int16 | quantized per channel 16x8
+ //
+ // We only support (1) and (3) for now.
+ if (input()->element_type() == DataType::FLOAT32 && filter()->element_type() == DataType::FLOAT32)
+ {
+ assert(bias() == nullptr || bias()->element_type() == DataType::FLOAT32);
+ }
+ else if (input()->element_type() == DataType::U8 && filter()->element_type() == DataType::U8)
+ {
+ assert(bias() == nullptr || bias()->element_type() == DataType::S32);
+ }
+ else
+ {
+ throw std::runtime_error("Unsupported type.");
+ }
+ assert(output()->element_type() == input()->element_type());
+
+ const Shape &input_shape = input()->shape();
+ const Shape &filter_shape = filter()->shape();
+ assert(input_shape.num_dims() == 4 && filter_shape.num_dims() == 4);
+
+ const int32_t batches = input_shape.dim(0);
+ const int32_t input_height = input_shape.dim(1);
+ const int32_t input_width = input_shape.dim(2);
+ // Filter format: [1, H, W, O].
+ assert(filter_shape.dim(0) == 1);
+ const int32_t filter_height = filter_shape.dim(1);
+ const int32_t filter_width = filter_shape.dim(2);
+ const int32_t channels_out = filter_shape.dim(3);
+
+ assert(bias() == nullptr ||
+ (bias()->shape().num_dims() == 1 && bias()->shape().dim(0) == channels_out));
+
+ const int32_t output_height =
+ computeOutputSize(_params.padding, input_height, filter_height, _params.stride_height,
+ _params.dilation_height_factor);
+ const int32_t output_width =
+ computeOutputSize(_params.padding, input_width, filter_width, _params.stride_width,
+ _params.dilation_width_factor);
+
+ _padding_height = computePadding(_params.stride_height, _params.dilation_height_factor,
+ input_height, filter_height, output_height);
+ _padding_width = computePadding(_params.stride_width, _params.dilation_width_factor, input_width,
+ filter_width, output_width);
+
+ output()->resize({batches, output_height, output_width, channels_out});
+}
+
+void DepthwiseConv2D::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ if (filter()->element_type() == DataType::FLOAT32)
+ {
+ evalFloat();
+ break;
+ }
+ throw std::runtime_error("Unsupported type.");
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void DepthwiseConv2D::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::DepthwiseParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.dilation_height_factor = _params.dilation_height_factor;
+ params.dilation_width_factor = _params.dilation_width_factor;
+ params.depth_multiplier = _params.depth_multiplier;
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ tflite::reference_ops::DepthwiseConv(
+ params, getTensorShape(input()), getTensorData<float>(input()), getTensorShape(filter()),
+ getTensorData<float>(filter()), getTensorShape(bias()), getTensorData<float>(bias()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+void DepthwiseConv2D::evalQuantized() const
+{
+ const auto input_scale = static_cast<double>(input()->scale());
+ const auto filter_scale = static_cast<double>(filter()->scale());
+ const auto output_scale = static_cast<double>(output()->scale());
+
+ const double real_multiplier = input_scale * filter_scale / output_scale;
+ int32_t output_multiplier{};
+ int output_shift{};
+ quantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
+
+ int32_t activation_min{};
+ int32_t activation_max{};
+ calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
+
+ tflite::DepthwiseParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.dilation_height_factor = _params.dilation_height_factor;
+ params.dilation_width_factor = _params.dilation_width_factor;
+ params.depth_multiplier = _params.depth_multiplier;
+ // The kernel expects input and filter zero points to be negated.
+ params.input_offset = -input()->zero_point(); // Note the '-'.
+ params.weights_offset = -filter()->zero_point(); // Note the '-'.
+ params.output_offset = output()->zero_point();
+ params.output_multiplier = output_multiplier;
+ params.output_shift = output_shift;
+ params.quantized_activation_min = activation_min;
+ params.quantized_activation_max = activation_max;
+
+ tflite::reference_ops::DepthwiseConv(
+ params, getTensorShape(input()), getTensorData<uint8_t>(input()), getTensorShape(filter()),
+ getTensorData<uint8_t>(filter()), getTensorShape(bias()), getTensorData<int32_t>(bias()),
+ getTensorShape(output()), getTensorData<uint8_t>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.h b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.h
new file mode 100644
index 000000000..62f4bff0e
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_DEPTHWISECONV2D_H
+#define LUCI_INTERPRETER_KERNELS_DEPTHWISECONV2D_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class DepthwiseConv2D : public KernelWithParams<DepthwiseConv2DParams>
+{
+public:
+ DepthwiseConv2D(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output,
+ const DepthwiseConv2DParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *filter() const { return _inputs[1]; }
+ const Tensor *bias() const { return _inputs[2]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ int32_t _padding_height{};
+ int32_t _padding_width{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_DEPTHWISECONV2D_H
diff --git a/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.test.cpp b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.test.cpp
new file mode 100644
index 000000000..a9b43d864
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/DepthwiseConv2D.test.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/DepthwiseConv2D.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(DepthwiseConv2DTest, Float)
+{
+ Shape input_shape{1, 4, 2, 2};
+ Shape filter_shape{1, 2, 2, 4};
+ Shape bias_shape{4};
+ std::vector<float> input_data{
+ 1, 2, 7, 8, //
+ 3, 4, 9, 10, //
+ 5, 6, 11, 12, //
+ 13, 14, 15, 16, //
+ };
+ std::vector<float> filter_data{
+ 1, 2, 3, 4, //
+ -9, 10, -11, 12, //
+ 5, 6, 7, 8, //
+ 13, -14, 15, -16, //
+ };
+ std::vector<float> bias_data{1, 2, 3, 4};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor filter_tensor = makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data);
+ Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ DepthwiseConv2DParams params{};
+ params.padding = Padding::VALID;
+ params.depth_multiplier = 2;
+ params.stride_height = 2;
+ params.stride_width = 1;
+ params.dilation_height_factor = 1;
+ params.dilation_width_factor = 1;
+ params.activation = Activation::RELU;
+
+ DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 71, 0, 99, 0, //
+ 167, 0, 227, 28, //
+ };
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 1, 4}));
+}
+
+TEST(DepthwiseConv2DTest, Uint8)
+{
+ std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
+ std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
+
+ Tensor input_tensor{
+ DataType::U8, {1, 3, 2, 2}, {{input_quant_param.first}, {input_quant_param.second}}, ""};
+ Tensor filter_tensor{
+ DataType::U8, {1, 2, 2, 4}, {{input_quant_param.first}, {input_quant_param.second}}, ""};
+ Tensor bias_tensor{
+ DataType::S32, {4}, {{input_quant_param.first * input_quant_param.first}, {0}}, ""};
+ Tensor output_tensor =
+ makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
+
+ std::vector<uint8_t> quant_input = quantize<uint8_t>(
+ {
+ 1, 2, 7, 8, // column 1
+ 3, 4, 9, 10, // column 2
+ 5, 6, 11, 12, // column 3
+ },
+ input_quant_param.first, input_quant_param.second);
+ std::vector<uint8_t> quant_filter = quantize<uint8_t>(
+ {
+ 1, 2, 3, 4, //
+ -9, 10, -11, 12, //
+ 5, 6, 7, 8, //
+ 13, -14, 15, -16, //
+ },
+ input_quant_param.first, input_quant_param.second);
+ std::vector<int32_t> quant_bias =
+ quantize<int32_t>({1, 2, 3, 4}, input_quant_param.first * input_quant_param.first, 0);
+
+ input_tensor.writeData(quant_input.data(), quant_input.size() * sizeof(uint8_t));
+ filter_tensor.writeData(quant_filter.data(), quant_filter.size() * sizeof(uint8_t));
+ bias_tensor.writeData(quant_bias.data(), quant_bias.size() * sizeof(int32_t));
+
+ DepthwiseConv2DParams params{};
+ params.padding = Padding::VALID;
+ params.depth_multiplier = 2;
+ params.stride_height = 1;
+ params.stride_width = 1;
+ params.dilation_height_factor = 1;
+ params.dilation_width_factor = 1;
+ params.activation = Activation::NONE;
+
+ DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 71, -34, 99, -20, //
+ 91, -26, 127, -4, //
+ };
+ EXPECT_THAT(dequantize(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 1, 4}));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Elu.cpp b/compiler/luci-interpreter/src/kernels/Elu.cpp
new file mode 100644
index 000000000..5de4a1f3b
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Elu.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Elu.h"
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+Elu::Elu(const Tensor *input, Tensor *output) : Kernel({input}, {output}) {}
+
+void Elu::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ output()->resize(input()->shape());
+}
+
+void Elu::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ tflite::optimized_ops::Elu(getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Elu.h b/compiler/luci-interpreter/src/kernels/Elu.h
new file mode 100644
index 000000000..c844ab57f
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Elu.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_ELU_H
+#define LUCI_INTERPRETER_KERNELS_ELU_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Elu : public Kernel
+{
+public:
+ Elu(const Tensor *input, Tensor *output);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_ELU_H
diff --git a/compiler/luci-interpreter/src/kernels/Elu.test.cpp b/compiler/luci-interpreter/src/kernels/Elu.test.cpp
new file mode 100644
index 000000000..52444cbea
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Elu.test.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Elu.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> output_shape,
+ std::initializer_list<float> input_data, std::initializer_list<float> output_data)
+{
+ Tensor input_tensor{DataType::FLOAT32, input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(float));
+
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Elu kernel(&input_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ (void)output_shape;
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ::testing::ElementsAreArray(ArrayFloatNear(output_data)));
+}
+
+TEST(EluTest, SimpleElu)
+{
+ Check(
+ /*input_shape=*/{1, 2, 4, 1}, /*output_shape=*/{1, 2, 4, 1},
+ /*input_data=*/
+ {
+ 0, -6, 2, -4, //
+ 3, -2, 10, -0.1, //
+ },
+ /*output_data=*/
+ {
+ 0.0, -0.997521, 2.0, -0.981684, //
+ 3.0, -0.864665, 10.0, -0.0951626, //
+ });
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/FullyConnected.cpp b/compiler/luci-interpreter/src/kernels/FullyConnected.cpp
new file mode 100644
index 000000000..6529c5e77
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/FullyConnected.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/FullyConnected.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/fully_connected.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+FullyConnected::FullyConnected(const Tensor *input, const Tensor *weights, const Tensor *bias,
+ Tensor *output, const FullyConnectedParams &params)
+ : KernelWithParams<FullyConnectedParams>({input, weights, bias}, {output}, params)
+{
+}
+
+void FullyConnected::configure()
+{
+ if (weights()->element_type() != DataType::FLOAT32)
+ throw std::runtime_error("Unsupported type.");
+
+ assert(input()->element_type() == DataType::FLOAT32);
+ assert(weights()->element_type() == DataType::FLOAT32);
+ assert(bias() == nullptr || bias()->element_type() == DataType::FLOAT32);
+
+ const Shape &input_shape = input()->shape();
+ const Shape &weights_shape = weights()->shape();
+
+ assert(weights_shape.num_dims() == 2);
+ assert(bias() == nullptr || bias()->shape().num_elements() == weights_shape.dim(0));
+
+ assert(input_shape.num_elements() % weights_shape.dim(1) == 0);
+ const int32_t batch_size = input_shape.num_elements() / weights_shape.dim(1);
+ const int32_t num_units = weights_shape.dim(0);
+
+ output()->resize({batch_size, num_units});
+}
+
+void FullyConnected::execute() const { evalFloat(); }
+
+void FullyConnected::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::FullyConnectedParams params{};
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+ params.weights_format = tflite::FullyConnectedWeightsFormat::kDefault;
+
+ tflite::reference_ops::FullyConnected(
+ params, getTensorShape(input()), getTensorData<float>(input()), getTensorShape(weights()),
+ getTensorData<float>(weights()), getTensorShape(bias()), getTensorData<float>(bias()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/FullyConnected.h b/compiler/luci-interpreter/src/kernels/FullyConnected.h
new file mode 100644
index 000000000..2e3174c74
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/FullyConnected.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_FULLYCONNECTED_H
+#define LUCI_INTERPRETER_KERNELS_FULLYCONNECTED_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class FullyConnected : public KernelWithParams<FullyConnectedParams>
+{
+public:
+ FullyConnected(const Tensor *input, const Tensor *weights, const Tensor *bias, Tensor *output,
+ const FullyConnectedParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *weights() const { return _inputs[1]; }
+ const Tensor *bias() const { return _inputs[2]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_FULLYCONNECTED_H
diff --git a/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp b/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp
new file mode 100644
index 000000000..8077fcb5c
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/FullyConnected.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(FullyConnectedTest, Float)
+{
+ Shape input_shape{3, 2, 2, 1};
+ std::vector<float> input_data{
+ -3, -5, 5, 4, 9, -2, // batch = 0
+ -3, -2, -4, 9, -8, 1, // batch = 1
+ };
+ Shape weights_shape{3, 6};
+ std::vector<float> weights_data{
+ -3, -7, 4, -4, -6, 4, // unit = 0
+ 3, 5, 2, 3, -3, -8, // unit = 1
+ -3, 7, 4, 9, 0, -5, // unit = 2
+ };
+ Shape bias_shape{3};
+ std::vector<float> bias_data{-1, -5, -8};
+
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor weights_tensor = makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data);
+ Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ FullyConnectedParams params{};
+ params.activation = Activation::RELU;
+
+ FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 0, 0, 32, // batch = 0
+ 22, 11, 47, // batch = 1
+ };
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/If.cpp b/compiler/luci-interpreter/src/kernels/If.cpp
new file mode 100644
index 000000000..e6bdee338
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/If.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/If.h"
+
+#include <cstring>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+static std::vector<const Tensor *> joinInputs(const Tensor *cond,
+ const std::vector<const Tensor *> &inputs)
+{
+ std::vector<const Tensor *> result{cond};
+ result.insert(result.cend(), inputs.cbegin(), inputs.cend());
+ return result;
+}
+
+If::If(const Tensor *cond, const std::vector<const Tensor *> &inputs, std::vector<Tensor *> outputs,
+ RuntimeGraph *then_graph, RuntimeGraph *else_graph)
+ : Kernel(joinInputs(cond, inputs), std::move(outputs)), _then_graph(then_graph),
+ _else_graph(else_graph)
+{
+}
+
+void If::configure()
+{
+ assert(cond()->element_type() == DataType::BOOL);
+ assert(cond()->shape().num_elements() == 1);
+
+ for (RuntimeGraph *graph : {_then_graph, _else_graph})
+ {
+ (void)graph;
+ assert(graph->getInputTensors().size() == getInputTensors().size() - 1);
+ assert(graph->getOutputTensors().size() == getOutputTensors().size());
+ }
+}
+
+void If::execute() const
+{
+ const bool cond_value = cond()->data<bool>()[0];
+
+ RuntimeGraph *active_graph = cond_value ? _then_graph : _else_graph;
+ const auto &graph_inputs = active_graph->getInputTensors();
+ const auto &graph_outputs = active_graph->getOutputTensors();
+
+ // Copy kernel inputs to active graph inputs.
+ for (size_t i = 0; i < getInputTensors().size() - 1; ++i)
+ {
+ assert(graph_inputs[i]->element_type() == input(i)->element_type());
+ graph_inputs[i]->resize(input(i)->shape());
+
+ const int32_t num_elements = input(i)->shape().num_elements();
+ const std::size_t element_size = getDataTypeSize(input(i)->element_type());
+ std::memcpy(graph_inputs[i]->data<void>(), input(i)->data<void>(), num_elements * element_size);
+ }
+
+ active_graph->execute();
+
+ // Copy graph outputs to kernel outputs.
+ for (size_t i = 0; i < getOutputTensors().size(); ++i)
+ {
+ assert(graph_outputs[i]->element_type() == output(i)->element_type());
+ output(i)->resize(graph_outputs[i]->shape());
+
+ const int32_t num_elements = output(i)->shape().num_elements();
+ const std::size_t element_size = getDataTypeSize(output(i)->element_type());
+ std::memcpy(output(i)->data<void>(), graph_outputs[i]->data<void>(),
+ num_elements * element_size);
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/If.h b/compiler/luci-interpreter/src/kernels/If.h
new file mode 100644
index 000000000..fa6ab371a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/If.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_IF_H
+#define LUCI_INTERPRETER_KERNELS_IF_H
+
+#include "core/Kernel.h"
+#include "core/RuntimeGraph.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class If : public Kernel
+{
+public:
+ If(const Tensor *cond, const std::vector<const Tensor *> &inputs, std::vector<Tensor *> outputs,
+ RuntimeGraph *then_graph, RuntimeGraph *else_graph);
+
+ const Tensor *cond() const { return _inputs[0]; }
+ const Tensor *input(int index) const { return _inputs[1 + index]; }
+ Tensor *output(int index) const { return _outputs[index]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ RuntimeGraph *const _then_graph;
+ RuntimeGraph *const _else_graph;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_IF_H
diff --git a/compiler/luci-interpreter/src/kernels/If.test.cpp b/compiler/luci-interpreter/src/kernels/If.test.cpp
new file mode 100644
index 000000000..9b3857ce3
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/If.test.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "core/RuntimeModule.h"
+#include "kernels/Add.h"
+#include "kernels/If.h"
+#include "kernels/Mul.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+RuntimeGraph *buildAddSubgraph(RuntimeModule *module)
+{
+ RuntimeGraph *graph = module->addGraph();
+ Tensor *input1 = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+ Tensor *input2 = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+ Tensor *output = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+
+ graph->setInputTensors({input1, input2});
+ graph->setOutputTensors({output});
+
+ AddParams params{};
+ params.activation = Activation::NONE;
+ graph->addKernel(std::make_unique<Add>(input1, input2, output, params));
+
+ return graph;
+}
+
+RuntimeGraph *buildMulSubgraph(RuntimeModule *module)
+{
+ RuntimeGraph *graph = module->addGraph();
+ Tensor *input1 = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+ Tensor *input2 = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+ Tensor *output = graph->addTensor(
+ std::make_unique<Tensor>(DataType::FLOAT32, Shape{}, AffineQuantization{}, ""));
+
+ graph->setInputTensors({input1, input2});
+ graph->setOutputTensors({output});
+
+ MulParams params{};
+ params.activation = Activation::NONE;
+ graph->addKernel(std::make_unique<Mul>(input1, input2, output, params));
+
+ return graph;
+}
+
+TEST(IfTest, CondTrue)
+{
+ Tensor cond = makeInputTensor<DataType::BOOL>({1}, {true});
+ Tensor input1 = makeInputTensor<DataType::FLOAT32>({2}, {5, 7});
+ Tensor input2 = makeInputTensor<DataType::FLOAT32>({1, 2}, {1, 2});
+ Tensor output = makeOutputTensor(DataType::FLOAT32);
+
+ RuntimeModule module(nullptr);
+ RuntimeGraph *then_graph = buildAddSubgraph(&module);
+ RuntimeGraph *else_graph = buildMulSubgraph(&module);
+
+ If kernel(&cond, {&input1, &input2}, {&output}, then_graph, else_graph);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output), ElementsAreArray(ArrayFloatNear({6, 9})));
+}
+
+TEST(IfTest, CondFalse)
+{
+ Tensor cond = makeInputTensor<DataType::BOOL>({1}, {false});
+ Tensor input1 = makeInputTensor<DataType::FLOAT32>({2}, {5, 7});
+ Tensor input2 = makeInputTensor<DataType::FLOAT32>({1, 2}, {1, 2});
+ Tensor output = makeOutputTensor(DataType::FLOAT32);
+
+ RuntimeModule module(nullptr);
+ RuntimeGraph *then_graph = buildAddSubgraph(&module);
+ RuntimeGraph *else_graph = buildMulSubgraph(&module);
+
+ If kernel(&cond, {&input1, &input2}, {&output}, then_graph, else_graph);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output), ElementsAreArray(ArrayFloatNear({5, 14})));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/L2Normalize.cpp b/compiler/luci-interpreter/src/kernels/L2Normalize.cpp
new file mode 100644
index 000000000..cfa535075
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Normalize.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/L2Normalize.h"
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+L2Normalize::L2Normalize(const Tensor *input, Tensor *output, const L2NormParams &params)
+ : KernelWithParams<L2NormParams>({input}, {output}, params)
+{
+}
+
+void L2Normalize::configure()
+{
+ assert(input()->shape().num_dims() <= 4);
+ assert(output()->element_type() == DataType::FLOAT32 || output()->element_type() == DataType::U8);
+ assert(input()->element_type() == output()->element_type());
+ if (output()->element_type() == DataType::U8)
+ {
+ assert(output()->scale() == (1. / 128.));
+ assert(output()->zero_point() == 128);
+ }
+ assert(params().activation == Activation::NONE);
+ output()->resize(input()->shape());
+}
+
+void L2Normalize::execute() const
+{
+ switch (output()->element_type())
+ {
+ case DataType::FLOAT32:
+ eval<float>(0);
+ break;
+ case DataType::U8:
+ eval<uint8_t>(input()->zero_point());
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+template <typename T> void L2Normalize::eval(int32_t zero_point) const
+{
+ tflite::L2NormalizationParams op_params{};
+ op_params.input_zero_point = zero_point;
+ tflite::optimized_ops::L2Normalization(op_params, getTensorShape(input()),
+ getTensorData<T>(input()), getTensorShape(output()),
+ getTensorData<T>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/L2Normalize.h b/compiler/luci-interpreter/src/kernels/L2Normalize.h
new file mode 100644
index 000000000..6c7dac698
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Normalize.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_L2NORMALIZE_H
+#define LUCI_INTERPRETER_KERNELS_L2NORMALIZE_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class L2Normalize : public KernelWithParams<L2NormParams>
+{
+public:
+ L2Normalize(const Tensor *input, Tensor *output, const L2NormParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ template <typename T> void eval(int32_t zero_point) const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_L2NORMALIZE_H
diff --git a/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp b/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp
new file mode 100644
index 000000000..fad450d66
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "kernels/L2Normalize.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(L2NormalizeTest, Float)
+{
+ std::vector<float> input_data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ L2NormParams params{};
+ params.activation = Activation::NONE;
+
+ L2Normalize kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+}
+
+TEST(L2NormalizeTest, Uint8Quantized)
+{
+ // TODO
+ // Implement GetDequantizedOutput Function.
+ // Create Test for Uint8 Case
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/L2Pool2D.cpp b/compiler/luci-interpreter/src/kernels/L2Pool2D.cpp
new file mode 100644
index 000000000..37a6ddedc
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Pool2D.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/L2Pool2D.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+L2Pool2D::L2Pool2D(const Tensor *input, Tensor *output, const Pool2DParams &params)
+ : KernelWithParams<Pool2DParams>({input}, {output}, params)
+{
+}
+
+void L2Pool2D::configure()
+{
+ assert(input()->shape().num_dims() == 4);
+ assert(input()->element_type() == output()->element_type());
+
+ int batches = input()->shape().dim(0);
+ int height = input()->shape().dim(1);
+ int width = input()->shape().dim(2);
+ int channels_out = input()->shape().dim(3);
+
+ // Matching GetWindowedOutputSize in TensorFlow.
+ auto padding = params().padding;
+ int out_width, out_height;
+ out_width = computeOutputSize(padding, width, params().filter_width, params().stride_width, 1);
+ out_height =
+ computeOutputSize(padding, height, params().filter_height, params().stride_height, 1);
+ _padding_width =
+ computePadding(params().stride_width, 1, width, params().filter_width, out_width);
+ _padding_height =
+ computePadding(params().stride_height, 1, height, params().filter_height, out_height);
+
+ assert(input()->element_type() == DataType::FLOAT32);
+ output()->resize({batches, out_height, out_width, channels_out});
+}
+
+void L2Pool2D::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ float activation_min, activation_max;
+ calculateActivationRange(params().activation, &activation_min, &activation_max);
+ tflite::PoolParams op_params;
+ op_params.stride_height = params().stride_height;
+ op_params.stride_width = params().stride_width;
+ op_params.filter_height = params().filter_height;
+ op_params.filter_width = params().filter_width;
+ op_params.padding_values.height = _padding_height;
+ op_params.padding_values.width = _padding_width;
+ op_params.float_activation_min = activation_min;
+ op_params.float_activation_max = activation_max;
+ tflite::optimized_ops::L2Pool(op_params, getTensorShape(input()),
+ getTensorData<float>(input()), getTensorShape(output()),
+ getTensorData<float>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/L2Pool2D.h b/compiler/luci-interpreter/src/kernels/L2Pool2D.h
new file mode 100644
index 000000000..d40f5f478
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Pool2D.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_L2POOL2D_H
+#define LUCI_INTERPRETER_KERNELS_L2POOL2D_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+#include <memory>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class L2Pool2D : public KernelWithParams<Pool2DParams>
+{
+public:
+ L2Pool2D(const Tensor *input, Tensor *output, const Pool2DParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ int32_t _padding_height = 0;
+ int32_t _padding_width = 0;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_L2POOL2D_H
diff --git a/compiler/luci-interpreter/src/kernels/L2Pool2D.test.cpp b/compiler/luci-interpreter/src/kernels/L2Pool2D.test.cpp
new file mode 100644
index 000000000..06bb9388f
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/L2Pool2D.test.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/L2Pool2D.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(L2Pool2DTest, FloatNone)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ 0, 6, 2, 4, //
+ 3, 2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.activation = Activation::NONE;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{3.5, 6.5};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatRelu)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ -1, -6, 2, 4, //
+ -3, -2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.activation = Activation::RELU;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{3.53553, 6.5};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatRelu1)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ -0.1, -0.6, 2, 4, //
+ -0.3, -0.2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.activation = Activation::RELU_N1_TO_1;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0.353553, 1.0};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatRelu6)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ -0.1, -0.6, 2, 4, //
+ -0.3, -0.2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.activation = Activation::RELU6;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0.353553, 6.0};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatPaddingSame)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ 0, 6, 2, 4, //
+ 3, 2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::SAME;
+ params.activation = Activation::NONE;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{3.5, 6.5};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatPaddingSameSlide1)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ 0, 6, 2, 4, //
+ 3, 2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::SAME;
+ params.activation = Activation::NONE;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 1;
+ params.stride_width = 1;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{3.5, 6.0, 6.5, 5.70088, 2.54951, 7.2111, 8.63134, 7.0};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(L2Pool2DTest, FloatPaddingValidSlide1)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ 0, 6, 2, 4, //
+ 3, 2, 10, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.activation = Activation::NONE;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 1;
+ params.stride_width = 1;
+
+ L2Pool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{3.5, 6.0, 6.5};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/LeakyRelu.cpp b/compiler/luci-interpreter/src/kernels/LeakyRelu.cpp
new file mode 100644
index 000000000..fce01a605
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LeakyRelu.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/LeakyRelu.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+LeakyRelu::LeakyRelu(const Tensor *input, Tensor *output, const LeakyReluParams &params)
+ : KernelWithParams<LeakyReluParams>({input}, {output}, params)
+{
+}
+
+void LeakyRelu::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ if (input()->element_type() == DataType::U8)
+ {
+ _q_alpha = static_cast<uint8_t>(std::max<float>(
+ std::numeric_limits<uint8_t>::min(),
+ std::min<float>(std::numeric_limits<uint8_t>::max(),
+ std::round(input()->zero_point() + (params().alpha / input()->scale())))));
+ double real_multiplier = input()->scale() * input()->scale() / output()->scale();
+ quantizeMultiplierSmallerThanOneExp(real_multiplier, &_output_multiplier, &_output_shift);
+ }
+ output()->resize(input()->shape());
+}
+
+void LeakyRelu::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void LeakyRelu::evalFloat() const
+{
+ tflite::LeakyReluParams op_params{};
+ op_params.alpha = params().alpha;
+ tflite::optimized_ops::LeakyRelu(op_params, getTensorShape(input()),
+ getTensorData<float>(input()), getTensorShape(output()),
+ getTensorData<float>(output()));
+}
+
+void LeakyRelu::evalQuantized() const
+{
+ tflite::LeakyReluParams op_params{};
+ op_params.input_offset = input()->zero_point();
+ op_params.alpha_offset = input()->zero_point();
+ op_params.output_offset = output()->zero_point();
+
+ op_params.output_multiplier = _output_multiplier;
+ op_params.output_shift = _output_shift;
+
+ tflite::reference_ops::QuantizeLeakyRelu(
+ op_params, _q_alpha, getTensorShape(input()), getTensorData<uint8_t>(input()),
+ getTensorShape(output()), getTensorData<uint8_t>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/LeakyRelu.h b/compiler/luci-interpreter/src/kernels/LeakyRelu.h
new file mode 100644
index 000000000..dcc2be93f
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LeakyRelu.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_LEAKYRELU_H
+#define LUCI_INTERPRETER_KERNELS_LEAKYRELU_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class LeakyRelu : public KernelWithParams<LeakyReluParams>
+{
+public:
+ LeakyRelu(const Tensor *input, Tensor *output, const LeakyReluParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ uint8_t _q_alpha = 0;
+ int32_t _output_multiplier = 0;
+ int _output_shift = 0;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_LEAKYRELU_H
diff --git a/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp b/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp
new file mode 100644
index 000000000..b0c06e7a3
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/LeakyRelu.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> output_shape,
+ std::initializer_list<T> input_data, std::initializer_list<T> output_data, float alpha,
+ DataType element_type)
+{
+ Tensor input_tensor{element_type, input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(T));
+
+ Tensor output_tensor = makeOutputTensor(element_type);
+
+ LeakyReluParams params{};
+ params.alpha = alpha;
+
+ LeakyRelu kernel(&input_tensor, &output_tensor, params);
+
+ kernel.configure();
+ kernel.execute();
+
+ (void)output_shape;
+ EXPECT_THAT(extractTensorData<T>(output_tensor), ::testing::ElementsAreArray(output_data));
+}
+
+TEST(LeakReluTest, FloatSimple)
+{
+ Check<float>(/*input_shape=*/{2, 3}, /*output_shape=*/{2, 3}, /*input_data=*/
+ {
+ 0.0f, 1.0f, 3.0f, // Row 1
+ 1.0f, -1.0f, -2.0f, // Row 2
+ },
+ /*output_data=*/
+ {
+ 0.0f, 1.0f, 3.0f, // Row 1
+ 1.0f, -0.5f, -1.0f, // Row 2
+ },
+ /*alpha=*/0.5f, getElementType<float>());
+}
+
+TEST(LeakReluTest, Uint8Simple)
+{
+ // TODO
+ // Implement GetDequantizedOutput Function.
+ // Create Test for Uint8 Case
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.cpp b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.cpp
new file mode 100644
index 000000000..08efa1d6a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/LocalResponseNormalization.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+LocalResponseNormalization::LocalResponseNormalization(
+ const Tensor *input, Tensor *output, const LocalResponseNormalizationParams &params)
+ : KernelWithParams<LocalResponseNormalizationParams>({input}, {output}, params)
+{
+}
+
+void LocalResponseNormalization::configure()
+{
+ assert(input()->shape().num_dims() == 4);
+ assert(output()->element_type() == DataType::FLOAT32);
+ assert(input()->element_type() == output()->element_type());
+ output()->resize(input()->shape());
+}
+
+void LocalResponseNormalization::execute() const
+{
+ switch (output()->element_type())
+ {
+ case DataType::FLOAT32:
+ tflite::LocalResponseNormalizationParams op_params;
+ op_params.range = params().radius;
+ op_params.bias = params().bias;
+ op_params.alpha = params().alpha;
+ op_params.beta = params().beta;
+ tflite::optimized_ops::LocalResponseNormalization(
+ op_params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.h b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.h
new file mode 100644
index 000000000..60408a104
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_LOCALRESPONSENORMALIZATION_H
+#define LUCI_INTERPRETER_KERNELS_LOCALRESPONSENORMALIZATION_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class LocalResponseNormalization : public KernelWithParams<LocalResponseNormalizationParams>
+{
+public:
+ LocalResponseNormalization(const Tensor *input, Tensor *output,
+ const LocalResponseNormalizationParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_LOCALRESPONSENORMALIZATION_H
diff --git a/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.test.cpp b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.test.cpp
new file mode 100644
index 000000000..4191bdb29
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/LocalResponseNormalization.test.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/LocalResponseNormalization.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(LocalResponseNormalizationTest, SameAsL2Norm)
+{
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ LocalResponseNormalizationParams params{};
+ params.radius = 20;
+ params.bias = 0.0;
+ params.alpha = 1.0;
+ params.beta = 0.5;
+
+ LocalResponseNormalization kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
+}
+
+TEST(LocalResponseNormalizationTest, WithAlpha)
+{
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ LocalResponseNormalizationParams params{};
+ params.radius = 20;
+ params.bias = 0.0;
+ params.alpha = 4.0;
+ params.beta = 0.5;
+
+ LocalResponseNormalization kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
+}
+
+TEST(LocalResponseNormalizationTest, WithBias)
+{
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ LocalResponseNormalizationParams params{};
+ params.radius = 20;
+ params.bias = 9.0;
+ params.alpha = 4.0;
+ params.beta = 0.5;
+
+ LocalResponseNormalization kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
+}
+
+TEST(LocalResponseNormalizationTest, SmallRadius)
+{
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ LocalResponseNormalizationParams params{};
+ params.radius = 2;
+ params.bias = 9.0;
+ params.alpha = 4.0;
+ params.beta = 0.5;
+
+ LocalResponseNormalization kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(
+ ArrayFloatNear({-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Logistic.cpp b/compiler/luci-interpreter/src/kernels/Logistic.cpp
new file mode 100644
index 000000000..c7d45615c
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Logistic.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Logistic.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Logistic::Logistic(const Tensor *input, Tensor *output) : Kernel({input}, {output}) {}
+
+void Logistic::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ if (input()->element_type() == DataType::U8)
+ {
+ assert(output()->scale() == 1. / 256);
+ populateLookupTable();
+ }
+ output()->resize(input()->shape());
+}
+
+void Logistic::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Logistic::evalFloat() const
+{
+ tflite::reference_ops::Logistic(getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+void Logistic::evalQuantized() const
+{
+ const int size = tflite::MatchingFlatSize(getTensorShape(input()), getTensorShape(output()));
+ uint8_t *output_data = getTensorData<uint8_t>(output());
+ const uint8_t *input_data = getTensorData<uint8_t>(input());
+ for (int i = 0; i < size; ++i)
+ {
+ output_data[i] = getTableValue(input_data[i]);
+ }
+}
+
+void Logistic::populateLookupTable()
+{
+ const auto input_scale = static_cast<double>(input()->scale());
+ const auto input_zero_point = static_cast<int32_t>(input()->zero_point());
+ const auto output_scale = static_cast<double>(output()->scale());
+ const auto output_zero_point = static_cast<int32_t>(output()->zero_point());
+ const float inverse_scale = 1 / output_scale;
+ int32_t maxval = std::numeric_limits<uint8_t>::max();
+ int32_t minval = std::numeric_limits<uint8_t>::min();
+ for (int32_t val = minval; val <= maxval; ++val)
+ {
+ const float dequantized = input_scale * (val - input_zero_point);
+ const float transformed = 1.0f / (1.0f + std::exp(-dequantized));
+ const float rescaled = std::round(transformed * inverse_scale);
+ const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
+ setTableValue(static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval)),
+ static_cast<uint8_t>(val));
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Logistic.h b/compiler/luci-interpreter/src/kernels/Logistic.h
new file mode 100644
index 000000000..31de6adf0
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Logistic.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_LOGISTIC_H
+#define LUCI_INTERPRETER_KERNELS_LOGISTIC_H
+
+#include "core/Kernel.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Logistic : public Kernel
+{
+public:
+ Logistic(const Tensor *input, Tensor *output);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+ void populateLookupTable();
+ void setTableValue(uint8_t value, uint8_t idx) { _table[idx] = value; };
+ uint8_t getTableValue(uint8_t idx) const { return _table[idx]; };
+
+private:
+ uint8_t _table[256]{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_LOGISTIC_H
diff --git a/compiler/luci-interpreter/src/kernels/Logistic.test.cpp b/compiler/luci-interpreter/src/kernels/Logistic.test.cpp
new file mode 100644
index 000000000..17456a4a8
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Logistic.test.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Logistic.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(LogisticTest, Float)
+{
+ Shape input_shape{1, 2, 4, 1};
+ std::vector<float> input_data{
+ 0, -6, 2, 4, //
+ 3, -2, 10, 1, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Logistic kernel(&input_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 0.5, 0.002473, 0.880797, 0.982014, //
+ 0.952574, 0.119203, 0.999955, 0.731059, //
+ };
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ // TODO make a Shape checking of output_tensor.
+}
+
+TEST(LogisticTest, Uint8)
+{
+ // Need to Implement GetDequantizedOutput Function.
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/MaxPool2D.cpp b/compiler/luci-interpreter/src/kernels/MaxPool2D.cpp
new file mode 100644
index 000000000..afecf9058
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/MaxPool2D.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/MaxPool2D.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/pooling.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+MaxPool2D::MaxPool2D(const Tensor *input, Tensor *output, const Pool2DParams &params)
+ : KernelWithParams<Pool2DParams>({input}, {output}, params)
+{
+}
+
+void MaxPool2D::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ assert(input()->shape().num_dims() == 4);
+ const Shape &input_shape = input()->shape();
+ const int32_t batches = input_shape.dim(0);
+ const int32_t input_height = input_shape.dim(1);
+ const int32_t input_width = input_shape.dim(2);
+ const int32_t depth = input_shape.dim(3);
+
+ const int32_t output_height = computeOutputSize(_params.padding, input_height,
+ _params.filter_height, _params.stride_height);
+ const int32_t output_width =
+ computeOutputSize(_params.padding, input_width, _params.filter_width, _params.stride_width);
+
+ _padding_height =
+ computePadding(_params.stride_height, 1, input_height, _params.filter_height, output_height);
+ _padding_width =
+ computePadding(_params.stride_width, 1, input_width, _params.filter_width, output_width);
+
+ output()->resize({batches, output_height, output_width, depth});
+ if (input()->element_type() == DataType::U8 || input()->element_type() == DataType::S8)
+ {
+ assert(input()->scale() == output()->scale());
+ assert(input()->zero_point() == output()->zero_point());
+ }
+}
+
+void MaxPool2D::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void MaxPool2D::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::PoolParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.filter_height = _params.filter_height;
+ params.filter_width = _params.filter_width;
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ tflite::reference_ops::MaxPool(params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+void MaxPool2D::evalQuantized() const
+{
+ int32_t activation_min{};
+ int32_t activation_max{};
+ calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
+
+ tflite::PoolParams params{};
+ params.padding_values.height = _padding_height;
+ params.padding_values.width = _padding_width;
+ params.stride_height = _params.stride_height;
+ params.stride_width = _params.stride_width;
+ params.filter_height = _params.filter_height;
+ params.filter_width = _params.filter_width;
+ params.quantized_activation_min = activation_min;
+ params.quantized_activation_max = activation_max;
+
+ tflite::reference_ops::MaxPool(params, getTensorShape(input()), getTensorData<uint8_t>(input()),
+ getTensorShape(output()), getTensorData<uint8_t>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/MaxPool2D.h b/compiler/luci-interpreter/src/kernels/MaxPool2D.h
new file mode 100644
index 000000000..7a59ff022
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/MaxPool2D.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_MAXPOOL2D_H
+#define LUCI_INTERPRETER_KERNELS_MAXPOOL2D_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class MaxPool2D : public KernelWithParams<Pool2DParams>
+{
+public:
+ MaxPool2D(const Tensor *input, Tensor *output, const Pool2DParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ int32_t _padding_height{};
+ int32_t _padding_width{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_MAXPOOL2D_H
diff --git a/compiler/luci-interpreter/src/kernels/MaxPool2D.test.cpp b/compiler/luci-interpreter/src/kernels/MaxPool2D.test.cpp
new file mode 100644
index 000000000..390255d89
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/MaxPool2D.test.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/MaxPool2D.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(MaxPool2DTest, Float)
+{
+ Shape input_shape{1, 3, 5, 1};
+ std::vector<float> input_data{
+ 1, -1, 0, -2, 2, //
+ -7, -6, -5, -4, -3, //
+ 5, 4, 3, 6, 7, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.filter_height = 2;
+ params.filter_width = 3;
+ params.stride_height = 1;
+ params.stride_width = 2;
+ params.activation = Activation::RELU6;
+
+ MaxPool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 1, 2, //
+ 5, 6, //
+ };
+ std::initializer_list<int32_t> ref_output_shape{1, 2, 2, 1};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(MaxPool2DTest, Uint8)
+{
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-15.9375, 15.9375);
+ std::vector<float> input_data{
+ 0, -6, 12, 4, //
+ -3, -2, 10, 7, //
+ };
+ Tensor input_tensor{DataType::U8, {1, 2, 4, 1}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantize_input =
+ quantize<uint8_t>(input_data, quant_param.first, quant_param.second);
+ input_tensor.writeData(quantize_input.data(), quantize_input.size() * sizeof(uint8_t));
+
+ Pool2DParams params{};
+ params.padding = Padding::VALID;
+ params.filter_height = 2;
+ params.filter_width = 2;
+ params.stride_height = 2;
+ params.stride_width = 2;
+ params.activation = Activation::RELU6;
+
+ MaxPool2D kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0.0, 6.0};
+ std::initializer_list<int32_t> ref_output_shape{1, 1, 2, 1};
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Mean.cpp b/compiler/luci-interpreter/src/kernels/Mean.cpp
new file mode 100644
index 000000000..2394e2c0e
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mean.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Mean.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+static void resolveAxes(const int *axes_data, int num_axes, tflite::MeanParams *params)
+{
+ params->axis_count = num_axes;
+ for (int i = 0; i < num_axes; ++i)
+ {
+ params->axis[i] = static_cast<int16>(axes_data[i]);
+ }
+ for (int i = num_axes; i < 4; ++i)
+ {
+ params->axis[i] = 1;
+ }
+}
+
+// Returns the number of axes that will be reduced. Removes duplicates.
+static int getAxisReductionCount(const int *axes_data, int num_axes, int input_num_dims)
+{
+ int reduction_count = num_axes;
+ for (int i = 0; i < num_axes; ++i)
+ {
+ int current = axes_data[i] >= 0 ? axes_data[i] : axes_data[i] + input_num_dims;
+ assert(current >= 0 && current < input_num_dims);
+ for (int j = 0; j < i; j++)
+ {
+ int previous = axes_data[j] >= 0 ? axes_data[j] : axes_data[j] + input_num_dims;
+ // This checks for duplicate axis
+ if (current == previous)
+ {
+ --reduction_count;
+ break;
+ }
+ }
+ }
+ return reduction_count;
+}
+
+static Shape getOutputShape(const Shape &input_shape, const int *axes_data, int num_axes,
+ bool keep_dims)
+{
+ int input_num_dims = input_shape.num_dims();
+ if (input_num_dims == 0)
+ {
+ return Shape(0);
+ }
+
+ if (keep_dims)
+ {
+ Shape output_shape(input_num_dims);
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < num_axes; ++axis_idx)
+ {
+ if (axes_data[axis_idx] == idx || axes_data[axis_idx] + input_num_dims == idx)
+ {
+ is_axis = true;
+ break;
+ }
+ }
+ if (is_axis)
+ {
+ output_shape.dim(idx) = 1;
+ }
+ else
+ {
+ output_shape.dim(idx) = input_shape.dim(idx);
+ }
+ }
+ return output_shape;
+ }
+ else
+ {
+ int num_reduce_axes = getAxisReductionCount(axes_data, num_axes, input_num_dims);
+ Shape output_shape(input_num_dims - num_reduce_axes);
+ int num_skip_axes = 0;
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < num_axes; ++axis_idx)
+ {
+ if (axes_data[axis_idx] == idx || axes_data[axis_idx] + input_num_dims == idx)
+ {
+ ++num_skip_axes;
+ is_axis = true;
+ break;
+ }
+ }
+ if (!is_axis)
+ {
+ output_shape.dim(idx - num_skip_axes) = input_shape.dim(idx);
+ }
+ }
+ return output_shape;
+ }
+}
+
+Mean::Mean(const Tensor *input, const Tensor *axes, Tensor *output, const ReducerParams &params)
+ : KernelWithParams<ReducerParams>({input, axes}, {output}, params)
+{
+}
+
+void Mean::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ assert(axes()->element_type() == DataType::S32);
+ const Shape &input_shape = input()->shape();
+ int input_num_dims = input_shape.num_dims();
+
+ const auto *axes_data = getTensorData<int32_t>(axes());
+ int num_axes = axes()->shape().num_elements();
+ assert(num_axes <= 4);
+
+ Shape output_shape = getOutputShape(input_shape, axes_data, num_axes, _params.keep_dims);
+ output()->resize(output_shape);
+
+ tflite::MeanParams params{};
+ resolveAxes(axes_data, num_axes, &params);
+ const bool need_temporaries =
+ !(_params.keep_dims && input_num_dims == 4 && params.axis_count == 2 &&
+ ((params.axis[0] == 1 && params.axis[1] == 2) ||
+ (params.axis[0] == 2 && params.axis[1] == 1)));
+ if (need_temporaries)
+ {
+ _temp_index =
+ std::make_unique<Tensor>(DataType::S32, Shape(input_num_dims), AffineQuantization{}, "");
+ _resolved_axes =
+ std::make_unique<Tensor>(DataType::S32, Shape(num_axes), AffineQuantization{}, "");
+ _temp_sum = std::make_unique<Tensor>(input()->element_type(), output()->shape(),
+ AffineQuantization{}, "");
+ }
+}
+
+void Mean::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Mean::evalFloat() const
+{
+ const Shape &input_shape = input()->shape();
+ int input_num_dims = input_shape.num_dims();
+ const auto *axes_data = getTensorData<int32_t>(axes());
+ int num_axes = axes()->shape().num_elements();
+
+ tflite::MeanParams params{};
+ resolveAxes(axes_data, num_axes, &params);
+
+ // Defer to specialized implementation for 4D Mean across axes 1 & 2.
+ if (_params.keep_dims && input_num_dims == 4 && params.axis_count == 2 &&
+ ((params.axis[0] == 1 && params.axis[1] == 2) ||
+ (params.axis[0] == 2 && params.axis[1] == 1)))
+ {
+ tflite::reference_ops::Mean(params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+ }
+ else
+ {
+ tflite::reference_ops::Mean(
+ getTensorData<float>(input()), getTensorShape(input()).DimsData(),
+ input()->shape().num_dims(), getTensorData<float>(output()),
+ getTensorShape(output()).DimsData(), output()->shape().num_dims(), axes_data, num_axes,
+ _params.keep_dims, getTensorData<int>(_temp_index.get()),
+ getTensorData<int>(_resolved_axes.get()), getTensorData<float>(_temp_sum.get()));
+ }
+}
+
+void Mean::evalQuantized() const
+{
+ const Shape &input_shape = input()->shape();
+ int input_num_dims = input_shape.num_dims();
+ const auto *axes_data = getTensorData<int32_t>(axes());
+ int num_axes = axes()->shape().num_elements();
+
+ tflite::MeanParams params{};
+ resolveAxes(axes_data, num_axes, &params);
+
+ // Defer to specialized implementation for 4D Mean across axes 1 & 2.
+ if (_params.keep_dims && input_num_dims == 4 && params.axis_count == 2 &&
+ ((params.axis[0] == 1 && params.axis[1] == 2) ||
+ (params.axis[0] == 2 && params.axis[1] == 1)))
+ {
+ tflite::reference_ops::Mean(params, getTensorShape(input()), getTensorData<uint8_t>(input()),
+ input()->zero_point(), input()->scale(), getTensorShape(output()),
+ getTensorData<uint8_t>(output()), output()->zero_point(),
+ output()->scale());
+ }
+ else if (input()->zero_point() == output()->zero_point() && input()->scale() == output()->scale())
+ {
+ tflite::reference_ops::Mean(
+ getTensorData<uint8_t>(input()), getTensorShape(input()).DimsData(),
+ input()->shape().num_dims(), getTensorData<uint8_t>(output()),
+ getTensorShape(output()).DimsData(), output()->shape().num_dims(), axes_data, num_axes,
+ _params.keep_dims, getTensorData<int>(_temp_index.get()),
+ getTensorData<int>(_resolved_axes.get()), getTensorData<int>(_temp_sum.get()));
+ }
+ else
+ {
+ tflite::reference_ops::QuantizedMeanOrSum<>(
+ getTensorData<uint8_t>(input()), input()->zero_point(), input()->scale(),
+ getTensorShape(input()).DimsData(), input()->shape().num_dims(),
+ getTensorData<uint8_t>(output()), output()->zero_point(), output()->scale(),
+ getTensorShape(output()).DimsData(), output()->shape().num_dims(), axes_data, num_axes,
+ _params.keep_dims, getTensorData<int>(_temp_index.get()),
+ getTensorData<int>(_resolved_axes.get()), getTensorData<int>(_temp_sum.get()),
+ /*compute_sum=*/false);
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Mean.h b/compiler/luci-interpreter/src/kernels/Mean.h
new file mode 100644
index 000000000..9cc793c72
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mean.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_MEAN_H
+#define LUCI_INTERPRETER_KERNELS_MEAN_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+#include <memory>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Mean : public KernelWithParams<ReducerParams>
+{
+public:
+ Mean(const Tensor *input, const Tensor *axes, Tensor *output, const ReducerParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *axes() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ std::unique_ptr<Tensor> _temp_index;
+ std::unique_ptr<Tensor> _resolved_axes;
+ std::unique_ptr<Tensor> _temp_sum;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_MEAN_H
diff --git a/compiler/luci-interpreter/src/kernels/Mean.test.cpp b/compiler/luci-interpreter/src/kernels/Mean.test.cpp
new file mode 100644
index 000000000..f4e411ca4
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mean.test.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Mean.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(MeanTest, FloatKeepDims)
+{
+ std::vector<float> input_data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+
+ std::vector<int32_t> axis_data{0, 2};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({4, 3, 2}, input_data);
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({2}, axis_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ ReducerParams params{};
+ params.keep_dims = true;
+
+ Mean kernel(&input_tensor, &axis_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{10.5, 12.5, 14.5};
+ std::initializer_list<int32_t> ref_output_shape{1, 3, 1};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(MeanTest, FloatKeepDims4DMean)
+{
+ std::vector<float> input_data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+
+ std::vector<int32_t> axis_data{1, 2};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({2, 2, 3, 2}, input_data);
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({2}, axis_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ ReducerParams params{};
+ params.keep_dims = true;
+
+ Mean kernel(&input_tensor, &axis_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{6, 7, 18, 19};
+ std::initializer_list<int32_t> ref_output_shape{2, 1, 1, 2};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(MeanTest, FloatNotKeepDims)
+{
+ std::vector<float> input_data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+
+ std::vector<int32_t> axis_data{1, 0, -3, -3};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({4, 3, 2}, input_data);
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({4}, axis_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ ReducerParams params{};
+ params.keep_dims = false;
+
+ Mean kernel(&input_tensor, &axis_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{12, 13};
+ std::initializer_list<int32_t> ref_output_shape{2};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(MeanTest, Uint8KeepDims)
+{
+ float kQuantizedTolerance = getTolerance(-1.0, 1.0, 255);
+ std::vector<float> input_data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-1.0f, 1.0f);
+
+ std::vector<int32_t> axis_data{1};
+ Tensor input_tensor{DataType::U8, {3, 2}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({1}, axis_data);
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantize_input =
+ quantize<uint8_t>(input_data, quant_param.first, quant_param.second);
+ input_tensor.writeData(quantize_input.data(), quantize_input.size() * sizeof(uint8_t));
+
+ ReducerParams params{};
+ params.keep_dims = true;
+
+ Mean kernel(&input_tensor, &axis_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0.3, 0.35, 0.55};
+ std::initializer_list<int32_t> ref_output_shape{3, 1};
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(ref_output_data, kQuantizedTolerance)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+TEST(MeanTest, Uint8NotKeepDims)
+{
+ float kQuantizedTolerance = getTolerance(-1.0, 1.0, 255);
+ std::vector<float> input_data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-1.0f, 1.0f);
+
+ std::vector<int32_t> axis_data{1};
+ Tensor input_tensor{DataType::U8, {1, 3, 2}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({1}, axis_data);
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantize_input =
+ quantize<uint8_t>(input_data, quant_param.first, quant_param.second);
+ input_tensor.writeData(quantize_input.data(), quantize_input.size() * sizeof(uint8_t));
+
+ ReducerParams params{};
+ params.keep_dims = false;
+
+ Mean kernel(&input_tensor, &axis_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0.4, 0.4};
+ std::initializer_list<int32_t> ref_output_shape{1, 2};
+ EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(ref_output_data, kQuantizedTolerance)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Mul.cpp b/compiler/luci-interpreter/src/kernels/Mul.cpp
new file mode 100644
index 000000000..a6e721a09
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mul.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Mul.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Mul::Mul(const Tensor *input1, const Tensor *input2, Tensor *output, const MulParams &params)
+ : KernelWithParams<MulParams>({input1, input2}, {output}, params)
+{
+}
+
+void Mul::configure()
+{
+ assert(input1()->element_type() == input2()->element_type());
+ output()->resize(calculateShapeForBroadcast(input1()->shape(), input2()->shape()));
+}
+
+void Mul::execute() const
+{
+ switch (input1()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Mul::evalFloat() const
+{
+ float activation_min{};
+ float activation_max{};
+ calculateActivationRange(_params.activation, &activation_min, &activation_max);
+
+ tflite::ArithmeticParams params{};
+ params.float_activation_min = activation_min;
+ params.float_activation_max = activation_max;
+
+ const bool need_broadcast = tflite::reference_ops::ProcessBroadcastShapes(
+ getTensorShape(input1()), getTensorShape(input2()), &params);
+
+ if (need_broadcast)
+ {
+ tflite::reference_ops::BroadcastMul4DSlow(
+ params, getTensorShape(input1()), getTensorData<float>(input1()), getTensorShape(input2()),
+ getTensorData<float>(input2()), getTensorShape(output()), getTensorData<float>(output()));
+ }
+ else
+ {
+ tflite::reference_ops::Mul(params, getTensorShape(input1()), getTensorData<float>(input1()),
+ getTensorShape(input2()), getTensorData<float>(input2()),
+ getTensorShape(output()), getTensorData<float>(output()));
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Mul.h b/compiler/luci-interpreter/src/kernels/Mul.h
new file mode 100644
index 000000000..e46160bcb
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mul.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_MUL_H
+#define LUCI_INTERPRETER_KERNELS_MUL_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+#include <cstdint>
+#include <vector>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Mul : public KernelWithParams<MulParams>
+{
+public:
+ Mul(const Tensor *input1, const Tensor *input2, Tensor *output, const MulParams &params);
+
+ const Tensor *input1() const { return _inputs[0]; }
+ const Tensor *input2() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_MUL_H
diff --git a/compiler/luci-interpreter/src/kernels/Mul.test.cpp b/compiler/luci-interpreter/src/kernels/Mul.test.cpp
new file mode 100644
index 000000000..f2255ac3f
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Mul.test.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Mul.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(MulTest, Float)
+{
+ Shape base_shape = {2, 3, 1, 2};
+ std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
+ std::vector<std::vector<float>> test_outputs = {
+ {0.00f, 0.69f, 0.12f, 1.15f, 0.00f, 2.07f, 0.18f, 0.15f, 0.00f, 0.25f, 0.90f, 0.45f,
+ 0.16f, 0.00f, 0.00f, 0.00f, 0.80f, 0.00f, 0.24f, 0.84f, 0.00f, 1.40f, 1.20f, 2.52f,
+ 0.00f, 0.00f, 0.64f, 0.00f, 0.00f, 0.00f, 0.14f, 0.00f, 0.00f, 0.00f, 0.70f, 0.00f},
+ {0.00f, 0.69f, 0.00f, 0.25f, 0.80f, 0.00f, 0.24f, 0.84f, 0.64f, 0.00f, 0.70f, 0.00f},
+ {0.00f, 0.46f, 0.00f, 0.69f, 0.12f, 0.00f, 0.18f, 0.10f, 0.27f, 0.15f, 0.00f, 0.00f,
+ 0.16f, 0.00f, 0.24f, 0.00f, 0.00f, 0.44f, 0.60f, 1.40f, 1.20f, 2.80f, 1.08f, 2.52f,
+ 0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.35f, 0.00f, 0.70f, 0.00f, 0.63f, 0.00f},
+ {0.00f, 0.46f, 0.27f, 0.15f, 0.00f, 0.44f, 0.60f, 1.40f, 0.00f, 0.00f, 0.63f, 0.00f}};
+ std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
+ 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
+ std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
+ for (size_t i = 0; i < test_shapes.size(); ++i)
+ {
+ Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
+ Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ MulParams params{};
+ params.activation = Activation::RELU;
+
+ Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
+ << "With shape number " << i;
+ }
+ // Re-run with exchanged inputs.
+ for (size_t i = 0; i < test_shapes.size(); ++i)
+ {
+ Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
+ Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ MulParams params{};
+ params.activation = Activation::RELU;
+
+ Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
+ << "With shape number " << i;
+ }
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Pad.cpp b/compiler/luci-interpreter/src/kernels/Pad.cpp
new file mode 100644
index 000000000..bdf3a2a95
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Pad.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Pad.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Pad::Pad(const Tensor *input, const Tensor *paddings, Tensor *output)
+ : Kernel({input, paddings}, {output})
+{
+}
+
+void Pad::configure()
+{
+ const Shape &input_shape = input()->shape();
+ const int num_dims = input_shape.num_dims();
+
+ if (num_dims > 4)
+ throw std::runtime_error("Unsupported number of dimensions.");
+
+ assert(output()->element_type() == input()->element_type());
+ assert(paddings()->element_type() == DataType::S32);
+ // Paddings shape should be [N, 2].
+ assert(paddings()->shape().num_dims() == 2);
+ assert(paddings()->shape().dim(0) == num_dims);
+ assert(paddings()->shape().dim(1) == 2);
+
+ Shape output_shape(num_dims);
+ const auto *paddings_data = getTensorData<int32_t>(paddings());
+ for (int i = 0; i < num_dims; ++i)
+ {
+ const int32_t padding_before = paddings_data[i * 2];
+ const int32_t padding_after = paddings_data[i * 2 + 1];
+ assert(padding_before >= 0 && padding_after >= 0);
+ output_shape.dim(i) = input_shape.dim(i) + padding_before + padding_after;
+ }
+
+ output()->resize(output_shape);
+}
+
+void Pad::execute() const
+{
+ const int num_dims = input()->shape().num_dims();
+
+ tflite::PadParams params{};
+ params.left_padding_count = num_dims;
+ params.right_padding_count = num_dims;
+
+ const auto *paddings_data = getTensorData<int32_t>(paddings());
+ for (int i = num_dims - 1; i >= 0; --i)
+ {
+ params.left_padding[i] = paddings_data[i * 2];
+ params.right_padding[i] = paddings_data[i * 2 + 1];
+ }
+
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ {
+ const float pad_value = 0.0f;
+ tflite::reference_ops::Pad(params, getTensorShape(input()), getTensorData<float>(input()),
+ &pad_value, getTensorShape(output()),
+ getTensorData<float>(output()));
+ break;
+ }
+ case DataType::U8:
+ {
+ assert(output()->zero_point() >= std::numeric_limits<uint8_t>::min());
+ assert(output()->zero_point() <= std::numeric_limits<uint8_t>::max());
+ const auto pad_value = static_cast<uint8_t>(output()->zero_point());
+ tflite::reference_ops::Pad(params, getTensorShape(input()), getTensorData<uint8_t>(input()),
+ &pad_value, getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+ break;
+ }
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Pad.h b/compiler/luci-interpreter/src/kernels/Pad.h
new file mode 100644
index 000000000..e05b47f29
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Pad.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_PAD_H
+#define LUCI_INTERPRETER_KERNELS_PAD_H
+
+#include "core/Kernel.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Pad : public Kernel
+{
+public:
+ Pad(const Tensor *input, const Tensor *paddings, Tensor *output);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *paddings() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_PAD_H
diff --git a/compiler/luci-interpreter/src/kernels/Pad.test.cpp b/compiler/luci-interpreter/src/kernels/Pad.test.cpp
new file mode 100644
index 000000000..15fcd0da3
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Pad.test.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Pad.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+float GetTolerance(float min, float max) { return (max - min) / 255.0; }
+
+TEST(Pad, Uint8)
+{
+ float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+ std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-1.0f, 1.0f);
+ std::vector<float> input_data{-0.8, 0.2, 0.9, 0.7, 0.1, -0.3};
+ std::vector<int32_t> paddings_data{0, 0, 0, 2, 1, 3, 0, 0};
+ Tensor input_tensor{DataType::U8, {1, 2, 3, 1}, {{quant_param.first}, {quant_param.second}}, ""};
+ Tensor paddings_tensor = makeInputTensor<DataType::S32>({4, 2}, paddings_data);
+ Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second);
+ std::vector<uint8_t> quantize_input =
+ quantize<uint8_t>(input_data, quant_param.first, quant_param.second);
+ input_tensor.writeData(quantize_input.data(), quantize_input.size() * sizeof(uint8_t));
+
+ Pad kernel(&input_tensor, &paddings_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ EXPECT_THAT(dequantize(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(ref_output_data, kQuantizedTolerance)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 4, 7, 1}));
+}
+
+TEST(Pad, Float)
+{
+ std::vector<float> input_data{1, 2, 3, 4, 5, 6};
+ std::vector<int32_t> paddings_data{1, 0, 0, 2, 0, 3, 0, 0};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({1, 2, 3, 1}, input_data);
+ Tensor paddings_tensor = makeInputTensor<DataType::S32>({4, 2}, paddings_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Pad kernel(&input_tensor, &paddings_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 4, 5,
+ 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ std::initializer_list<int32_t> ref_output_shape{2, 4, 6, 1};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Reshape.cpp b/compiler/luci-interpreter/src/kernels/Reshape.cpp
new file mode 100644
index 000000000..d88b5392a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Reshape.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Reshape.h"
+
+#include <cassert>
+#include <cstring>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+static Shape extractShapeFromTensor(const Tensor *tensor)
+{
+ assert(tensor->element_type() == DataType::S32);
+ Shape shape(tensor->shape().num_elements());
+ const auto *shape_data = tensor->data<int32_t>();
+ for (int i = 0; i < tensor->shape().num_elements(); ++i)
+ {
+ shape.dim(i) = shape_data[i];
+ }
+ return shape;
+}
+
+static void resolveUnknownDimension(const Shape &input_shape, Shape *output_shape)
+{
+ const int32_t num_input_elements = input_shape.num_elements();
+ int32_t num_output_elements = 1;
+ int unknown_dim_index = -1;
+ for (int i = 0; i < output_shape->num_dims(); ++i)
+ {
+ const int32_t value = output_shape->dim(i);
+ if (value == -1)
+ {
+ assert(unknown_dim_index == -1);
+ unknown_dim_index = i;
+ }
+ else
+ {
+ num_output_elements *= value;
+ }
+ }
+ if (unknown_dim_index != -1)
+ {
+ output_shape->dim(unknown_dim_index) = num_input_elements / num_output_elements;
+ num_output_elements *= output_shape->dim(unknown_dim_index);
+ }
+ assert(num_output_elements == num_input_elements);
+}
+
+Reshape::Reshape(const Tensor *input, const Tensor *shape, Tensor *output)
+ : Kernel({input, shape}, {output})
+{
+}
+
+void Reshape::configure()
+{
+ Shape output_shape = extractShapeFromTensor(shape());
+ resolveUnknownDimension(input()->shape(), &output_shape);
+ output()->resize(output_shape);
+}
+
+void Reshape::execute() const
+{
+ const auto *input_data = input()->data<void>();
+ auto *output_data = output()->data<void>();
+
+ const size_t element_size = getDataTypeSize(input()->element_type());
+ const int32_t num_elements = input()->shape().num_elements();
+ std::memcpy(output_data, input_data, num_elements * element_size);
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Reshape.h b/compiler/luci-interpreter/src/kernels/Reshape.h
new file mode 100644
index 000000000..99b947f77
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Reshape.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_RESHAPE_H
+#define LUCI_INTERPRETER_KERNELS_RESHAPE_H
+
+#include "core/Kernel.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Reshape : public Kernel
+{
+public:
+ Reshape(const Tensor *input, const Tensor *shape, Tensor *output);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *shape() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_RESHAPE_H
diff --git a/compiler/luci-interpreter/src/kernels/Reshape.test.cpp b/compiler/luci-interpreter/src/kernels/Reshape.test.cpp
new file mode 100644
index 000000000..7255b8132
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Reshape.test.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Reshape.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+// TODO Test types other than FLOAT32.
+
+TEST(ReshapeTest, Regular)
+{
+ Shape input_shape{1, 2, 2, 3};
+ std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+ Shape shape_shape{2};
+ std::vector<int32_t> shape_data{3, 4};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor shape_tensor = makeInputTensor<DataType::S32>(shape_shape, shape_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Reshape kernel(&input_tensor, &shape_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(input_data)));
+}
+
+TEST(ReshapeTest, UnknownDimension)
+{
+ Shape input_shape{2, 1, 2, 3};
+ std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+ Shape shape_shape{3};
+ std::vector<int32_t> shape_data{2, -1, 2};
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor shape_tensor = makeInputTensor<DataType::S32>(shape_shape, shape_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ Reshape kernel(&input_tensor, &shape_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(input_data)));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Softmax.cpp b/compiler/luci-interpreter/src/kernels/Softmax.cpp
new file mode 100644
index 000000000..2fb7f3f2c
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Softmax.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Softmax.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/softmax.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+Softmax::Softmax(const Tensor *input, Tensor *output, const SoftmaxParams &params)
+ : KernelWithParams<SoftmaxParams>({input}, {output}, params)
+{
+}
+
+void Softmax::configure()
+{
+ assert(input()->element_type() == output()->element_type());
+ output()->resize(input()->shape());
+}
+
+void Softmax::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void Softmax::evalFloat() const
+{
+ tflite::SoftmaxParams params{};
+ params.beta = _params.beta;
+
+ tflite::reference_ops::Softmax(params, getTensorShape(input()), getTensorData<float>(input()),
+ getTensorShape(output()), getTensorData<float>(output()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Softmax.h b/compiler/luci-interpreter/src/kernels/Softmax.h
new file mode 100644
index 000000000..2e4eda492
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Softmax.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_SOFTMAX_H
+#define LUCI_INTERPRETER_KERNELS_SOFTMAX_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Softmax : public KernelWithParams<SoftmaxParams>
+{
+public:
+ Softmax(const Tensor *input, Tensor *output, const SoftmaxParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_SOFTMAX_H
diff --git a/compiler/luci-interpreter/src/kernels/Softmax.test.cpp b/compiler/luci-interpreter/src/kernels/Softmax.test.cpp
new file mode 100644
index 000000000..2193c3e83
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Softmax.test.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Softmax.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(SoftmaxTest, Float)
+{
+ Shape input_shape{2, 1, 2, 3};
+ std::vector<float> input_data{
+ 5, -9, 8, //
+ -7, 2, -4, //
+ 1, -2, 9, //
+ 3, -6, -1, //
+ };
+ Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ SoftmaxParams params{};
+ params.beta = 0.1;
+
+ Softmax kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<float> ref_output_data{
+ 0.38514, 0.09497, 0.51989, //
+ 0.20792, 0.51141, 0.28067, //
+ 0.25212, 0.18678, 0.56110, //
+ 0.48149, 0.19576, 0.32275, //
+ };
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(ref_output_data)));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/SpaceToDepth.cpp b/compiler/luci-interpreter/src/kernels/SpaceToDepth.cpp
new file mode 100644
index 000000000..6a5bd7cf8
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/SpaceToDepth.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToDepth.h"
+#include "Utils.h"
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+SpaceToDepth::SpaceToDepth(const Tensor *input, Tensor *output, const SpaceToDepthParams &params)
+ : KernelWithParams<SpaceToDepthParams>({input}, {output}, params)
+{
+}
+
+void SpaceToDepth::configure()
+{
+ assert(input()->shape().num_dims() == 4);
+ assert(output()->element_type() == DataType::FLOAT32 ||
+ output()->element_type() == DataType::U8 || output()->element_type() == DataType::S8 ||
+ output()->element_type() == DataType::S32 || output()->element_type() == DataType::S64);
+ assert(input()->element_type() == output()->element_type());
+
+ const int block_size = params().block_size;
+ const int32_t input_height = input()->shape().dim(1);
+ const int32_t input_width = input()->shape().dim(2);
+ int32_t output_height = input_height / block_size;
+ int32_t output_width = input_width / block_size;
+
+ assert(input_height == output_height * block_size);
+ assert(input_width == output_width * block_size);
+
+ Shape output_shape(4);
+ output_shape.dim(0) = input()->shape().dim(0);
+ output_shape.dim(1) = output_height;
+ output_shape.dim(2) = output_width;
+ output_shape.dim(3) = input()->shape().dim(3) * block_size * block_size;
+
+ output()->resize(output_shape);
+}
+
+void SpaceToDepth::execute() const
+{
+ tflite::SpaceToDepthParams op_params{};
+ op_params.block_size = params().block_size;
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ tflite::optimized_ops::SpaceToDepth(op_params, getTensorShape(input()),
+ getTensorData<float>(input()), getTensorShape(output()),
+ getTensorData<float>(output()));
+ break;
+ case DataType::U8:
+ tflite::optimized_ops::SpaceToDepth(op_params, getTensorShape(input()),
+ getTensorData<uint8_t>(input()), getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/SpaceToDepth.h b/compiler/luci-interpreter/src/kernels/SpaceToDepth.h
new file mode 100644
index 000000000..e66316b11
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/SpaceToDepth.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_SPACETODEPTH_H
+#define LUCI_INTERPRETER_KERNELS_SPACETODEPTH_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+#include <vector>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class SpaceToDepth : public KernelWithParams<SpaceToDepthParams>
+{
+public:
+ SpaceToDepth(const Tensor *input, Tensor *output, const SpaceToDepthParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_SPACETODEPTH_H
diff --git a/compiler/luci-interpreter/src/kernels/SpaceToDepth.test.cpp b/compiler/luci-interpreter/src/kernels/SpaceToDepth.test.cpp
new file mode 100644
index 000000000..e4a0fd642
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/SpaceToDepth.test.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/SpaceToDepth.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T> class SpaceToDepthTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(SpaceToDepthTest, DataTypes);
+
+TYPED_TEST(SpaceToDepthTest, SimpleCase)
+{
+ std::vector<TypeParam> input_data{1, 5, 6, 7, 2, 3, 4, 8};
+ Shape input_shape{1, 2, 2, 2};
+ Tensor input_tensor{getElementType<TypeParam>(), input_shape, {{}, {}}, ""};
+ input_tensor.writeData(input_data.data(), input_data.size() * sizeof(TypeParam));
+ std::vector<TypeParam> output_data{1, 5, 6, 7, 2, 3, 4, 8};
+ std::vector<int32_t> output_shape{1, 1, 1, 8};
+ Tensor output_tensor = makeOutputTensor(getElementType<TypeParam>());
+
+ SpaceToDepthParams params{};
+ params.block_size = 2;
+
+ SpaceToDepth kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<TypeParam>(output_tensor),
+ ::testing::ElementsAreArray(output_data));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Split.cpp b/compiler/luci-interpreter/src/kernels/Split.cpp
new file mode 100644
index 000000000..325b1c22f
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Split.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Split.h"
+
+#include "Utils.h"
+
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Split::Split(const Tensor *axis, const Tensor *input, std::vector<Tensor *> outputs)
+ : Kernel({axis, input}, std::move(outputs))
+{
+}
+
+void Split::configure()
+{
+ assert(axis()->shape().num_elements() == 1);
+ _axis_value = getTensorData<int32_t>(axis())[0];
+ if (_axis_value < 0)
+ _axis_value += input()->shape().num_dims();
+ assert(_axis_value >= 0 && _axis_value < input()->shape().num_dims());
+
+ const int32_t input_size = input()->shape().dim(_axis_value);
+ assert(input_size % _outputs.size() == 0);
+ const int32_t slice_size = input_size / _outputs.size();
+
+ Shape output_shape = input()->shape();
+ output_shape.dim(_axis_value) = slice_size;
+ for (Tensor *output : _outputs)
+ {
+ output->resize(output_shape);
+ }
+}
+
+void Split::execute() const
+{
+ tflite::SplitParams params{};
+ params.num_split = _outputs.size();
+ params.axis = _axis_value;
+
+#define TF_LITE_SPLIT(scalar) \
+ { \
+ VectorOfTensors<scalar, false> all_outputs(_outputs); \
+ tflite::optimized_ops::Split(params, getTensorShape(input()), getTensorData<scalar>(input()), \
+ all_outputs.shapes(), all_outputs.data()); \
+ }
+
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ TF_LITE_SPLIT(float);
+ break;
+ case DataType::U8:
+ TF_LITE_SPLIT(uint8_t);
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+#undef TF_LITE_SPLIT
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Split.h b/compiler/luci-interpreter/src/kernels/Split.h
new file mode 100644
index 000000000..9542b1e56
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Split.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_SPLIT_H
+#define LUCI_INTERPRETER_KERNELS_SPLIT_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Split : public Kernel
+{
+public:
+ Split(const Tensor *axis, const Tensor *input, std::vector<Tensor *> outputs);
+
+ const Tensor *axis() const { return _inputs[0]; }
+ const Tensor *input() const { return _inputs[1]; }
+ Tensor *output(int index) const { return _outputs[index]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ int32_t _axis_value{};
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_SPLIT_H
diff --git a/compiler/luci-interpreter/src/kernels/Split.test.cpp b/compiler/luci-interpreter/src/kernels/Split.test.cpp
new file mode 100644
index 000000000..11d0b1ea9
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Split.test.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Split.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(int axis, int num_splits, std::initializer_list<int32_t> input_shape,
+ std::initializer_list<int32_t> output_shape, std::initializer_list<T> input_data,
+ std::vector<std::vector<T>> output_data, DataType element_type)
+{
+ Tensor axis_tensor = makeInputTensor<DataType::S32>({}, {axis});
+ Tensor input_tensor{element_type, input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(T));
+
+ std::vector<Tensor> output_tensors;
+ output_tensors.reserve(num_splits);
+ for (int i = 0; i < num_splits; ++i)
+ {
+ output_tensors.emplace_back(makeOutputTensor(element_type));
+ }
+
+ std::vector<Tensor *> output_tensor_ptrs(num_splits);
+ for (int i = 0; i < num_splits; ++i)
+ {
+ output_tensor_ptrs[i] = &output_tensors[i];
+ }
+
+ Split kernel(&axis_tensor, &input_tensor, std::move(output_tensor_ptrs));
+ kernel.configure();
+ kernel.execute();
+
+ for (int i = 0; i < num_splits; ++i)
+ {
+ EXPECT_THAT(extractTensorData<T>(output_tensors[i]),
+ ::testing::ElementsAreArray(output_data[i]));
+ }
+}
+
+template <typename T> class SplitTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(SplitTest, DataTypes);
+
+TYPED_TEST(SplitTest, FourDimensional)
+{
+ Check<TypeParam>(/*axis=*/0, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {
+ {1, 2, 3, 4, 5, 6, 7, 8}, //
+ {9, 10, 11, 12, 13, 14, 15, 16}, //
+ },
+ getElementType<TypeParam>());
+ Check<TypeParam>(
+ /*axis=*/1, /*num_splits=*/2, {2, 2, 2, 2}, {2, 1, 2, 2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {
+ {1, 2, 3, 4, 9, 10, 11, 12}, //
+ {5, 6, 7, 8, 13, 14, 15, 16}, //
+ },
+ getElementType<TypeParam>());
+ Check<TypeParam>(
+ /*axis=*/2, /*num_splits=*/2, {2, 2, 2, 2}, {2, 2, 1, 2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {
+ {1, 2, 5, 6, 9, 10, 13, 14}, //
+ {3, 4, 7, 8, 11, 12, 15, 16}, //
+ },
+ getElementType<TypeParam>());
+ Check<TypeParam>(
+ /*axis=*/3, /*num_splits=*/2, {2, 2, 2, 2}, {2, 2, 2, 1},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {
+ {1, 3, 5, 7, 9, 11, 13, 15}, //
+ {2, 4, 6, 8, 10, 12, 14, 16}, //
+ },
+ getElementType<TypeParam>());
+}
+
+TYPED_TEST(SplitTest, OneDimensional)
+{
+ Check<TypeParam>(
+ /*axis=*/0, /*num_splits=*/8, {8}, {1}, {1, 2, 3, 4, 5, 6, 7, 8},
+ {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}, getElementType<TypeParam>());
+}
+
+TYPED_TEST(SplitTest, NegativeAxis)
+{
+ Check<TypeParam>(
+ /*axis=*/-4, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {
+ {1, 2, 3, 4, 5, 6, 7, 8}, //
+ {9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ getElementType<TypeParam>());
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Squeeze.cpp b/compiler/luci-interpreter/src/kernels/Squeeze.cpp
new file mode 100644
index 000000000..ce43ef789
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Squeeze.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Squeeze.h"
+
+#include "kernels/Utils.h"
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+Squeeze::Squeeze(const Tensor *input, Tensor *output, const SqueezeParams &params)
+ : KernelWithParams<SqueezeParams>({input}, {output}, params)
+{
+}
+
+void Squeeze::configure()
+{
+ int input_num_dims = input()->shape().num_dims();
+ int num_squeeze_dims = params().squeeze_dims.size();
+ assert(input_num_dims <= 8);
+ bool should_squeeze[8] = {false};
+ int num_squeezed_dims = 0;
+ if (num_squeeze_dims == 0)
+ {
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ if (input()->shape().dim(idx) == 1)
+ {
+ should_squeeze[idx] = true;
+ ++num_squeezed_dims;
+ }
+ }
+ }
+ else
+ {
+ for (int idx = 0; idx < num_squeeze_dims; ++idx)
+ {
+ int current = params().squeeze_dims[idx] < 0 ? params().squeeze_dims[idx] + input_num_dims
+ : params().squeeze_dims[idx];
+ assert(current >= 0 && current < input_num_dims && input()->shape().dim(current) == 1);
+ if (!should_squeeze[current])
+ ++num_squeezed_dims;
+ should_squeeze[current] = true;
+ }
+ }
+ Shape output_shape(input_num_dims - num_squeezed_dims);
+ for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx)
+ {
+ if (!should_squeeze[in_idx])
+ {
+ output_shape.dim(out_idx++) = input()->shape().dim(in_idx);
+ }
+ }
+ output()->resize(output_shape);
+}
+
+void Squeeze::execute() const
+{
+ assert(input()->shape().num_elements() == output()->shape().num_elements());
+
+ const auto *input_data = input()->data<void>();
+ auto *output_data = output()->data<void>();
+ std::memcpy(output_data, input_data,
+ getDataTypeSize(input()->element_type()) * input()->shape().num_elements());
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Squeeze.h b/compiler/luci-interpreter/src/kernels/Squeeze.h
new file mode 100644
index 000000000..687af5158
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Squeeze.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_SQUEEZE_H
+#define LUCI_INTERPRETER_KERNELS_SQUEEZE_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Squeeze : public KernelWithParams<SqueezeParams>
+{
+public:
+ Squeeze(const Tensor *input, Tensor *output, const SqueezeParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_SQUEEZE_H
diff --git a/compiler/luci-interpreter/src/kernels/Squeeze.test.cpp b/compiler/luci-interpreter/src/kernels/Squeeze.test.cpp
new file mode 100644
index 000000000..3a34284dd
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Squeeze.test.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Squeeze.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> output_shape,
+ std::initializer_list<T> input_data, std::initializer_list<T> output_data,
+ DataType element_type, std::vector<int32_t> squeeze_dims)
+{
+ Tensor input_tensor{element_type, input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(T));
+ Tensor output_tensor = makeOutputTensor(element_type);
+
+ SqueezeParams params{};
+ for (size_t i = 0; i < squeeze_dims.size(); i++)
+ {
+ params.squeeze_dims.push_back(squeeze_dims.at(i));
+ }
+
+ Squeeze kernel(&input_tensor, &output_tensor, params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<T>(output_tensor), ::testing::ElementsAreArray(output_data));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
+}
+
+template <typename T> class SqueezeTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(SqueezeTest, DataTypes);
+
+TYPED_TEST(SqueezeTest, TotalTest)
+{
+ Check<TypeParam>(
+ /*input_shape=*/{1, 24, 1}, /*output_shape=*/{24},
+ /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
+ /*output_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
+ getElementType<TypeParam>(), {-1, 0});
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/StridedSlice.cpp b/compiler/luci-interpreter/src/kernels/StridedSlice.cpp
new file mode 100644
index 000000000..679485439
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/StridedSlice.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/StridedSlice.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+StridedSlice::StridedSlice(const Tensor *input, const Tensor *begin, const Tensor *end,
+ const Tensor *strides, Tensor *output, const StridedSliceParams &params)
+ : KernelWithParams<StridedSliceParams>({input, begin, end, strides}, {output}, params)
+{
+}
+
+void StridedSlice::configure()
+{
+ assert(begin()->shape().num_dims() == 1);
+ assert(end()->shape().num_dims() == 1);
+ assert(strides()->shape().num_dims() == 1);
+ assert(input()->element_type() == output()->element_type());
+ assert(begin()->element_type() == DataType::S32);
+ assert(end()->element_type() == DataType::S32);
+ assert(strides()->element_type() == DataType::S32);
+ assert(input()->shape().num_dims() <= 4);
+ if (params().ellipsis_mask != 0)
+ {
+ throw std::runtime_error("ellipsis_mask is not implemented yet.");
+ }
+ if (params().new_axis_mask != 0)
+ {
+ throw std::runtime_error("new_axis_mask is not implemented yet.");
+ }
+ if (input()->element_type() == DataType::U8)
+ {
+ assert(input()->scale() == output()->scale());
+ assert(input()->zero_point() == output()->zero_point());
+ }
+ tflite::StridedSliceParams op_params{};
+ op_params.start_indices_count = input()->shape().num_dims();
+ op_params.stop_indices_count = input()->shape().num_dims();
+ op_params.strides_count = input()->shape().num_dims();
+
+ for (int i = 0; i < input()->shape().num_dims(); i++)
+ {
+ op_params.start_indices[i] = getTensorData<int32_t>(begin())[i];
+ op_params.stop_indices[i] = getTensorData<int32_t>(end())[i];
+ op_params.strides[i] = getTensorData<int32_t>(strides())[i];
+ }
+ op_params.begin_mask = params().begin_mask;
+ op_params.ellipsis_mask = 0;
+ op_params.end_mask = params().end_mask;
+ op_params.new_axis_mask = 0;
+ op_params.shrink_axis_mask = params().shrink_axis_mask;
+ std::vector<int32_t> output_shape_vector;
+ for (int i = 0; i < input()->shape().num_dims(); i++)
+ {
+ int idx = input()->shape().num_dims() - i - 1;
+ int32_t stride = getTensorData<int32_t>(strides())[idx];
+ assert(stride != 0);
+ int32_t begin = ::tflite::strided_slice::StartForAxis(op_params, getTensorShape(input()), idx);
+ int32_t end =
+ ::tflite::strided_slice::StopForAxis(op_params, getTensorShape(input()), idx, begin);
+
+ const bool shrink_axis = params().shrink_axis_mask & (1 << idx);
+ if (shrink_axis)
+ {
+ end = begin + 1;
+ }
+
+ int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
+ dim_shape = dim_shape < 0 ? 0 : dim_shape;
+ if (!shrink_axis)
+ {
+ output_shape_vector.push_back(dim_shape);
+ }
+ }
+ Shape output_shape = Shape(output_shape_vector.size());
+ for (size_t i = 0; i < output_shape_vector.size(); i++)
+ {
+ output_shape.dim(i) = output_shape_vector[output_shape_vector.size() - i - 1];
+ }
+ output()->resize(output_shape);
+}
+
+void StridedSlice::execute() const
+{
+ tflite::StridedSliceParams op_params{};
+ op_params.start_indices_count = input()->shape().num_dims();
+ op_params.stop_indices_count = input()->shape().num_dims();
+ op_params.strides_count = input()->shape().num_dims();
+
+ for (int i = 0; i < input()->shape().num_dims(); i++)
+ {
+ op_params.start_indices[i] = getTensorData<int32_t>(begin())[i];
+ op_params.stop_indices[i] = getTensorData<int32_t>(end())[i];
+ op_params.strides[i] = getTensorData<int32_t>(strides())[i];
+ }
+ op_params.begin_mask = params().begin_mask;
+ op_params.ellipsis_mask = 0;
+ op_params.end_mask = params().end_mask;
+ op_params.new_axis_mask = 0;
+ op_params.shrink_axis_mask = params().shrink_axis_mask;
+
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ tflite::reference_ops::StridedSlice(op_params, getTensorShape(input()),
+ getTensorData<float>(input()), getTensorShape(output()),
+ getTensorData<float>(output()));
+ break;
+ case DataType::U8:
+ tflite::reference_ops::StridedSlice(op_params, getTensorShape(input()),
+ getTensorData<uint8_t>(input()), getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/StridedSlice.h b/compiler/luci-interpreter/src/kernels/StridedSlice.h
new file mode 100644
index 000000000..fc96893a7
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/StridedSlice.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_STRIDEDSLICE_H
+#define LUCI_INTERPRETER_KERNELS_STRIDEDSLICE_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class StridedSlice : public KernelWithParams<StridedSliceParams>
+{
+public:
+ StridedSlice(const Tensor *input, const Tensor *begin, const Tensor *end, const Tensor *strides,
+ Tensor *output, const StridedSliceParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *begin() const { return _inputs[1]; }
+ const Tensor *end() const { return _inputs[2]; }
+ const Tensor *strides() const { return _inputs[3]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_STRIDEDSLICE_H
diff --git a/compiler/luci-interpreter/src/kernels/StridedSlice.test.cpp b/compiler/luci-interpreter/src/kernels/StridedSlice.test.cpp
new file mode 100644
index 000000000..5ab06e2ec
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/StridedSlice.test.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/StridedSlice.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+TEST(StridedSliceTest, Float)
+{
+ Shape input_shape{2, 3, 2};
+ std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+ Shape begin_shape{3};
+ std::vector<int32_t> begin_data{0, 0, 0};
+ Shape end_shape{3};
+ std::vector<int32_t> end_data{1, 3, 2};
+ Shape strides_shape{3};
+ std::vector<int32_t> strides_data{1, 1, 1};
+ Tensor input_tensor{DataType::FLOAT32, input_shape, {}, ""};
+ Tensor begin_tensor{DataType::S32, begin_shape, {}, ""};
+ Tensor end_tensor{DataType::S32, end_shape, {}, ""};
+ Tensor strides_tensor{DataType::S32, strides_shape, {}, ""};
+ Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
+
+ input_tensor.writeData(input_data.data(), input_data.size() * sizeof(float));
+ begin_tensor.writeData(begin_data.data(), begin_data.size() * sizeof(int32_t));
+ end_tensor.writeData(end_data.data(), end_data.size() * sizeof(int32_t));
+ strides_tensor.writeData(strides_data.data(), strides_data.size() * sizeof(int32_t));
+
+ StridedSliceParams params{};
+ params.begin_mask = 0;
+ params.end_mask = 0;
+ params.ellipsis_mask = 0;
+ params.new_axis_mask = 0;
+ params.shrink_axis_mask = 1;
+
+ StridedSlice kernel(&input_tensor, &begin_tensor, &end_tensor, &strides_tensor, &output_tensor,
+ params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<int32_t> output_shape{3, 2};
+ std::vector<float> output_data{1, 2, 3, 4, 5, 6};
+ EXPECT_THAT(extractTensorData<float>(output_tensor),
+ ElementsAreArray(ArrayFloatNear(output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
+}
+
+TEST(StridedSliceTest, Uint8)
+{
+ Shape input_shape{2, 3, 2};
+ std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+ std::vector<uint8_t> quant_input_data = quantize<uint8_t>(input_data, 1.0f, 0);
+ Shape begin_shape{3};
+ std::vector<int32_t> begin_data{0, 0, 0};
+ Shape end_shape{3};
+ std::vector<int32_t> end_data{1, 3, 2};
+ Shape strides_shape{3};
+ std::vector<int32_t> strides_data{1, 1, 1};
+ Tensor input_tensor{DataType::U8, input_shape, {{1.0f}, {0}}, ""};
+ Tensor begin_tensor{DataType::S32, begin_shape, {}, ""};
+ Tensor end_tensor{DataType::S32, end_shape, {}, ""};
+ Tensor strides_tensor{DataType::S32, strides_shape, {}, ""};
+ Tensor output_tensor = makeOutputTensor(DataType::U8, 1.0f, 0);
+
+ input_tensor.writeData(quant_input_data.data(), quant_input_data.size() * sizeof(uint8_t));
+ begin_tensor.writeData(begin_data.data(), begin_data.size() * sizeof(int32_t));
+ end_tensor.writeData(end_data.data(), end_data.size() * sizeof(int32_t));
+ strides_tensor.writeData(strides_data.data(), strides_data.size() * sizeof(int32_t));
+
+ StridedSliceParams params{};
+ params.begin_mask = 0;
+ params.end_mask = 0;
+ params.ellipsis_mask = 0;
+ params.new_axis_mask = 0;
+ params.shrink_axis_mask = 1;
+
+ StridedSlice kernel(&input_tensor, &begin_tensor, &end_tensor, &strides_tensor, &output_tensor,
+ params);
+ kernel.configure();
+ kernel.execute();
+
+ std::vector<int32_t> output_shape{3, 2};
+ std::vector<float> output_data{1, 2, 3, 4, 5, 6};
+ EXPECT_THAT(dequantize(extractTensorData<uint8_t>(output_tensor), output_tensor.scale(),
+ output_tensor.zero_point()),
+ ElementsAreArray(ArrayFloatNear(output_data)));
+ EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/TestUtils.cpp b/compiler/luci-interpreter/src/kernels/TestUtils.cpp
new file mode 100644
index 000000000..2c8a6ae78
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/TestUtils.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace testing
+{
+
+using ::testing::FloatNear;
+using ::testing::Matcher;
+
+Tensor makeOutputTensor(DataType element_type) { return Tensor(element_type, {}, {}, ""); }
+
+Tensor makeOutputTensor(DataType element_type, float scale, int32_t zero_point)
+{
+ return Tensor(element_type, {}, {{scale}, {zero_point}}, "");
+}
+
+std::vector<Matcher<float>> ArrayFloatNear(const std::vector<float> &values, float max_abs_error)
+{
+ std::vector<Matcher<float>> matchers;
+ matchers.reserve(values.size());
+ for (const float v : values)
+ {
+ matchers.emplace_back(FloatNear(v, max_abs_error));
+ }
+ return matchers;
+}
+
+std::vector<int32_t> extractTensorShape(const Tensor &tensor)
+{
+ std::vector<int32_t> result;
+ int dims = tensor.shape().num_dims();
+ for (int i = 0; i < dims; i++)
+ {
+ result.push_back(tensor.shape().dim(i));
+ }
+ return result;
+}
+
+} // namespace testing
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/TestUtils.h b/compiler/luci-interpreter/src/kernels/TestUtils.h
new file mode 100644
index 000000000..5311a1949
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/TestUtils.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_TESTUTILS_H
+#define LUCI_INTERPRETER_KERNELS_TESTUTILS_H
+
+#include "luci_interpreter/core/Tensor.h"
+
+#include <type_traits>
+
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace testing
+{
+
+template <DataType DT>
+Tensor makeInputTensor(const Shape &shape, const std::vector<typename DataTypeImpl<DT>::Type> &data)
+{
+ Tensor tensor(DT, shape, {}, "");
+ tensor.writeData(data.data(), data.size() * sizeof(typename DataTypeImpl<DT>::Type));
+ return tensor;
+}
+
+Tensor makeOutputTensor(DataType element_type);
+Tensor makeOutputTensor(DataType element_type, float scale, int32_t zero_point);
+
+std::vector<int32_t> extractTensorShape(const Tensor &tensor);
+
+// Returns the corresponding DataType given the type T.
+template <typename T> constexpr DataType getElementType()
+{
+ if (std::is_same<T, float>::value)
+ return DataType::FLOAT32;
+ if (std::is_same<T, uint8_t>::value)
+ return DataType::U8;
+ if (std::is_same<T, int32_t>::value)
+ return DataType::S32;
+ if (std::is_same<T, int64_t>::value)
+ return DataType::S64;
+ return DataType::Unknown;
+}
+
+template <typename T> std::vector<T> extractTensorData(const Tensor &tensor)
+{
+ const auto *data_ptr = tensor.data<T>();
+ return std::vector<T>(data_ptr, data_ptr + tensor.shape().num_elements());
+}
+
+std::vector<::testing::Matcher<float>> ArrayFloatNear(const std::vector<float> &values,
+ float max_abs_error = 1.0e-5f);
+
+template <typename T>
+inline std::vector<T> quantize(const std::vector<float> &data, float scale, int32_t zero_point)
+{
+ assert(!std::is_floating_point<T>::value);
+ std::vector<T> q;
+ for (const auto &f : data)
+ {
+ q.push_back(static_cast<T>(std::max<float>(
+ std::numeric_limits<T>::lowest(),
+ std::min<float>(std::numeric_limits<T>::max(), std::round(zero_point + (f / scale))))));
+ }
+ return q;
+}
+
+template <typename T>
+inline std::vector<float> dequantize(const std::vector<T> &data, float scale, int32_t zero_point)
+{
+ assert(!std::is_floating_point<T>::value);
+ std::vector<float> f;
+ for (const T &q : data)
+ {
+ f.push_back(scale * (q - zero_point));
+ }
+ return f;
+}
+
+template <typename T> std::pair<float, int32_t> quantizationParams(float f_min, float f_max)
+{
+ if (std::is_floating_point<T>::value)
+ {
+ return {1.0f, 0};
+ }
+ int32_t zero_point = 0;
+ double scale = 0;
+ const T qmin = std::numeric_limits<T>::lowest();
+ const T qmax = std::numeric_limits<T>::max();
+ const double qmin_double = qmin;
+ const double qmax_double = qmax;
+ // 0 should always be a representable value. Let's assume that the initial
+ // min,max range contains 0.
+ assert(f_max >= 0);
+ assert(f_min <= 0);
+ if (f_min == f_max)
+ {
+ // Special case where the min,max range is a point. Should be {0}.
+ assert(f_max == 0);
+ assert(f_min == 0);
+ return {scale, zero_point};
+ }
+
+ // General case.
+ //
+ // First determine the scale.
+ scale = (f_max - f_min) / (qmax_double - qmin_double);
+
+ // Zero-point computation.
+ // First the initial floating-point computation. The zero-point can be
+ // determined from solving an affine equation for any known pair
+ // (real value, corresponding quantized value).
+ // We know two such pairs: (rmin, qmin) and (rmax, qmax).
+ // The arithmetic error on the zero point computed from either pair
+ // will be roughly machine_epsilon * (sum of absolute values of terms)
+ // so we want to use the variant that adds the smaller terms.
+ const double zero_point_from_min = qmin_double - f_min / scale;
+ const double zero_point_from_max = qmax_double - f_max / scale;
+
+ const double zero_point_from_min_error = std::abs(qmin_double) + std::abs(f_min / scale);
+
+ const double zero_point_from_max_error = std::abs(qmax_double) + std::abs(f_max / scale);
+
+ const double zero_point_double = zero_point_from_min_error < zero_point_from_max_error
+ ? zero_point_from_min
+ : zero_point_from_max;
+
+ // Now we need to nudge the zero point to be an integer
+ // (our zero points are integer, and this is motivated by the requirement
+ // to be able to represent the real value "0" exactly as a quantized value,
+ // which is required in multiple places, for example in Im2col with SAME
+ // padding).
+
+ T nudged_zero_point = 0;
+ if (zero_point_double < qmin_double)
+ {
+ nudged_zero_point = qmin;
+ }
+ else if (zero_point_double > qmax_double)
+ {
+ nudged_zero_point = qmax;
+ }
+ else
+ {
+ nudged_zero_point = static_cast<T>(std::round(zero_point_double));
+ }
+
+ // The zero point should always be in the range of quantized value,
+ // // [qmin, qmax].
+ assert(qmax >= nudged_zero_point);
+ assert(qmin <= nudged_zero_point);
+ zero_point = nudged_zero_point;
+ // finally, return the values
+ return {static_cast<float>(scale), zero_point};
+}
+
+inline float getTolerance(float min, float max, int quantize_steps)
+{
+ return ((max - min) / quantize_steps);
+}
+
+} // namespace testing
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_TESTUTILS_H
diff --git a/compiler/luci-interpreter/src/kernels/Transpose.cpp b/compiler/luci-interpreter/src/kernels/Transpose.cpp
new file mode 100644
index 000000000..8265d9937
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Transpose.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Transpose.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+Transpose::Transpose(const Tensor *input, const Tensor *perm, Tensor *output)
+ : Kernel({input, perm}, {output})
+{
+}
+
+void Transpose::configure()
+{
+ // Transpose op only supports 1D-4D input arrays.
+ int dims = input()->shape().num_dims();
+ const int *perm_data = getTensorData<int32_t>(perm());
+
+ assert(input()->shape().num_dims() <= 4);
+ assert(input()->element_type() == output()->element_type());
+
+ assert(perm()->shape().num_dims() == 1);
+ assert(perm()->shape().dim(0) == dims);
+
+ Shape output_shape(dims);
+ for (int i = 0; i < dims; i++)
+ {
+ assert(perm_data[i] < dims && perm_data[i] >= 0);
+ output_shape.dim(i) = input()->shape().dim(perm_data[i]);
+ }
+
+ output()->resize(output_shape);
+}
+
+void Transpose::execute() const
+{
+ tflite::TransposeParams params{};
+ const int *perm_data = getTensorData<int32_t>(perm());
+ const int size = perm()->shape().dim(0);
+ params.perm_count = size;
+ for (int i = 0; i < size; i++)
+ params.perm[i] = perm_data[i];
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ tflite::reference_ops::Transpose(params, getTensorShape(input()),
+ getTensorData<float>(input()), getTensorShape(output()),
+ getTensorData<float>(output()));
+ break;
+ case DataType::U8:
+ tflite::reference_ops::Transpose(params, getTensorShape(input()),
+ getTensorData<uint8_t>(input()), getTensorShape(output()),
+ getTensorData<uint8_t>(output()));
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Transpose.h b/compiler/luci-interpreter/src/kernels/Transpose.h
new file mode 100644
index 000000000..d6f89c352
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Transpose.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_TRANSPOSE_H
+#define LUCI_INTERPRETER_KERNELS_TRANSPOSE_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Transpose : public Kernel
+{
+public:
+ Transpose(const Tensor *input, const Tensor *perm, Tensor *output);
+
+ const Tensor *input() const { return _inputs[0]; }
+ const Tensor *perm() const { return _inputs[1]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_TRANSPOSE_H
diff --git a/compiler/luci-interpreter/src/kernels/Transpose.test.cpp b/compiler/luci-interpreter/src/kernels/Transpose.test.cpp
new file mode 100644
index 000000000..87e6e2a00
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Transpose.test.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Transpose.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> perm_shape,
+ std::initializer_list<int32_t> output_shape, std::initializer_list<T> input_data,
+ std::initializer_list<int32_t> perm_data, std::initializer_list<T> output_data,
+ DataType element_type)
+{
+ Tensor input_tensor{element_type, input_shape, {}, ""};
+ input_tensor.writeData(input_data.begin(), input_data.size() * sizeof(T));
+
+ Tensor perm_tensor{DataType::S32, perm_shape, {}, ""};
+ perm_tensor.writeData(perm_data.begin(), perm_data.size() * sizeof(int32_t));
+ Tensor output_tensor = makeOutputTensor(element_type);
+
+ Transpose kernel(&input_tensor, &perm_tensor, &output_tensor);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<T>(output_tensor), ::testing::ElementsAreArray(output_data));
+}
+
+template <typename T> class TransposeTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(TransposeTest, DataTypes);
+
+TYPED_TEST(TransposeTest, Small3D)
+{
+ Check<TypeParam>(/*input_shape=*/{2, 3, 4}, /*perm_shape=*/{3}, /*output_shape=*/{4, 2, 3},
+ /*input_data=*/{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
+ /*perm_data=*/{2, 0, 1},
+ /*output_data=*/{0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
+ 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23},
+ getElementType<TypeParam>());
+}
+
+TYPED_TEST(TransposeTest, Large4D)
+{
+ Check<TypeParam>(
+ /*input_shape=*/{2, 3, 4, 5}, /*perm_shape=*/{4}, /*output_shape=*/{4, 2, 3, 5},
+ /*input_data=*/{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119},
+ /*perm_data=*/{2, 0, 1, 3},
+ /*output_data=*/{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
+ 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+ 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
+ 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+ 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
+ 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+ 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
+ 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119},
+ getElementType<TypeParam>());
+}
+
+TYPED_TEST(TransposeTest, Large2D)
+{
+ Check<TypeParam>(
+ /*input_shape=*/{10, 12}, /*perm_shape=*/{2}, /*output_shape=*/{12, 10},
+ /*input_data=*/{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119},
+ /*perm_data=*/{1, 0},
+ /*output_data=*/{0, 12, 24, 36, 48, 60, 72, 84, 96, 108, 1, 13, 25, 37, 49,
+ 61, 73, 85, 97, 109, 2, 14, 26, 38, 50, 62, 74, 86, 98, 110,
+ 3, 15, 27, 39, 51, 63, 75, 87, 99, 111, 4, 16, 28, 40, 52,
+ 64, 76, 88, 100, 112, 5, 17, 29, 41, 53, 65, 77, 89, 101, 113,
+ 6, 18, 30, 42, 54, 66, 78, 90, 102, 114, 7, 19, 31, 43, 55,
+ 67, 79, 91, 103, 115, 8, 20, 32, 44, 56, 68, 80, 92, 104, 116,
+ 9, 21, 33, 45, 57, 69, 81, 93, 105, 117, 10, 22, 34, 46, 58,
+ 70, 82, 94, 106, 118, 11, 23, 35, 47, 59, 71, 83, 95, 107, 119},
+ getElementType<TypeParam>());
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/TransposeConv.cpp b/compiler/luci-interpreter/src/kernels/TransposeConv.cpp
new file mode 100644
index 000000000..46380e2fa
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/TransposeConv.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/TransposeConv.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+TransposeConv::TransposeConv(const Tensor *output_shape, const Tensor *filter, const Tensor *input,
+ Tensor *output, const TransposeConvParams &params)
+ : KernelWithParams<TransposeConvParams>({output_shape, filter, input}, {output}, params)
+{
+}
+
+void TransposeConv::configure()
+{
+ assert(output_shape()->shape().num_dims() == 1);
+ assert(input()->shape().num_dims() == 4);
+ assert(filter()->shape().num_dims() == 4);
+ assert(input()->element_type() == DataType::FLOAT32 || input()->element_type() == DataType::U8);
+ assert(input()->element_type() == output()->element_type());
+ assert(input()->shape().dim(3) == filter()->shape().dim(3));
+ if (input()->element_type() == DataType::U8)
+ {
+ _scratch_tensor =
+ std::make_unique<Tensor>(DataType::S32, output()->shape(), AffineQuantization{}, "");
+ double real_multiplier = 0.0;
+ const double input_product_scale = input()->scale() * filter()->scale();
+ assert(input_product_scale >= 0);
+ real_multiplier = input_product_scale / output()->scale();
+ int exponent;
+ quantizeMultiplier(real_multiplier, &_output_multiplier, &exponent);
+ _output_shift = -exponent;
+ }
+
+ const int num_dims = output_shape()->shape().dim(0);
+ Shape out_shape(num_dims);
+ const auto *shape_data = getTensorData<int32_t>(output_shape());
+ for (int i = 0; i < num_dims; i++)
+ out_shape.dim(i) = shape_data[i];
+ output()->resize(out_shape);
+}
+
+void TransposeConv::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ evalFloat();
+ break;
+ case DataType::U8:
+ evalQuantized();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+void TransposeConv::evalFloat() const
+{
+ const int width = output()->shape().dim(2);
+ const int height = output()->shape().dim(1);
+
+ const int filter_width = filter()->shape().dim(2);
+ const int filter_height = filter()->shape().dim(1);
+
+ int unused_output_height, unused_output_width;
+ unused_output_width =
+ computeOutputSize(params().padding, width, filter_width, params().stride_width, 1);
+ unused_output_height =
+ computeOutputSize(params().padding, height, filter_height, params().stride_height, 1);
+ int32_t offset = 0;
+ tflite::ConvParams op_params{};
+ op_params.padding_type = tflite::PaddingType::kSame;
+ op_params.padding_values.height = computePaddingWithOffset(
+ params().stride_height, 1, height, filter_height, unused_output_height, &offset);
+ op_params.padding_values.height_offset = offset;
+ op_params.padding_values.width = computePaddingWithOffset(
+ params().stride_width, 1, width, filter_width, unused_output_width, &offset);
+ op_params.padding_values.width_offset = offset;
+ op_params.stride_height = params().stride_height;
+ op_params.stride_width = params().stride_width;
+ op_params.output_multiplier = _output_multiplier;
+ tflite::reference_ops::TransposeConv(
+ op_params, getTensorShape(input()), getTensorData<float>(input()), getTensorShape(filter()),
+ getTensorData<float>(filter()), getTensorShape(output()), getTensorData<float>(output()),
+ tflite::RuntimeShape(), (float *)nullptr);
+}
+
+void TransposeConv::evalQuantized() const
+{
+ int32_t input_offset = -input()->zero_point();
+ int32_t filter_offset = -filter()->zero_point();
+ int32_t output_offset = filter()->zero_point();
+ const int width = output()->shape().dim(2);
+ const int height = output()->shape().dim(1);
+
+ const int filter_width = filter()->shape().dim(2);
+ const int filter_height = filter()->shape().dim(1);
+
+ int unused_output_height, unused_output_width;
+ unused_output_width =
+ computeOutputSize(params().padding, width, filter_width, params().stride_width, 1);
+ unused_output_height =
+ computeOutputSize(params().padding, height, filter_height, params().stride_height, 1);
+ int32_t offset = 0;
+ tflite::ConvParams op_params{};
+ op_params.padding_type = tflite::PaddingType::kSame;
+ op_params.padding_values.height = computePaddingWithOffset(
+ params().stride_height, 1, height, filter_height, unused_output_height, &offset);
+ op_params.padding_values.width = computePaddingWithOffset(
+ params().stride_width, 1, width, filter_width, unused_output_width, &offset);
+ op_params.stride_height = params().stride_height;
+ op_params.stride_width = params().stride_width;
+ op_params.input_offset = input_offset;
+ op_params.output_offset = output_offset;
+ op_params.weights_offset = filter_offset;
+ op_params.output_multiplier = _output_multiplier;
+ op_params.output_shift = -_output_shift;
+ op_params.quantized_activation_min = std::numeric_limits<uint8_t>::min();
+ op_params.quantized_activation_max = std::numeric_limits<uint8_t>::max();
+
+ tflite::reference_ops::TransposeConv(
+ op_params, getTensorShape(input()), getTensorData<uint8>(input()), getTensorShape(filter()),
+ getTensorData<uint8>(filter()), getTensorShape(output()), getTensorData<uint8>(output()),
+ tflite::RuntimeShape(), (uint8 *)nullptr, getTensorData<int32_t>(_scratch_tensor.get()));
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/TransposeConv.h b/compiler/luci-interpreter/src/kernels/TransposeConv.h
new file mode 100644
index 000000000..d73e939b7
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/TransposeConv.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_TRANSPOSECONV_H
+#define LUCI_INTERPRETER_KERNELS_TRANSPOSECONV_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class TransposeConv : public KernelWithParams<TransposeConvParams>
+{
+public:
+ TransposeConv(const Tensor *output_shape, const Tensor *filter, const Tensor *input,
+ Tensor *output, const TransposeConvParams &params);
+
+ const Tensor *output_shape() const { return _inputs[0]; }
+ const Tensor *filter() const { return _inputs[1]; }
+ const Tensor *input() const { return _inputs[2]; }
+ Tensor *output() const { return _outputs[0]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ void evalFloat() const;
+ void evalQuantized() const;
+
+private:
+ std::unique_ptr<Tensor> _scratch_tensor;
+
+ // The scaling factor from input to output (aka the 'real multiplier') can
+ // be represented as a fixed point multiplier plus a left shift.
+ int32_t _output_multiplier = 0;
+ int _output_shift = 0;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_TRANSPOSECONV_H
diff --git a/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp b/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp
new file mode 100644
index 000000000..3386d3683
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/TransposeConv.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(std::initializer_list<int32_t> output_shape_shape,
+ std::initializer_list<int32_t> weight_shape,
+ std::initializer_list<int32_t> input_data_shape,
+ std::initializer_list<int32_t> output_shape,
+ std::initializer_list<int32_t> output_shape_data, std::initializer_list<T> weight_data,
+ std::initializer_list<T> input_data_data, std::initializer_list<T> output_data,
+ luci::Padding padding, int32_t stride_height, int32_t stride_width,
+ DataType element_type)
+{
+ Tensor output_shape_tensor{element_type, output_shape_shape, {}, ""};
+ output_shape_tensor.writeData(output_shape_data.begin(), output_shape_data.size() * sizeof(T));
+ Tensor weight_tensor{element_type, weight_shape, {}, ""};
+ weight_tensor.writeData(weight_data.begin(), weight_data.size() * sizeof(T));
+ Tensor input_data_tensor{element_type, input_data_shape, {}, ""};
+ input_data_tensor.writeData(input_data_data.begin(), input_data_data.size() * sizeof(T));
+
+ Tensor output_tensor = makeOutputTensor(element_type);
+
+ TransposeConvParams params{};
+ params.padding = padding;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+
+ TransposeConv kernel(&output_shape_tensor, &weight_tensor, &input_data_tensor, &output_tensor,
+ params);
+ kernel.configure();
+ kernel.execute();
+
+ EXPECT_THAT(extractTensorData<T>(output_tensor), ::testing::ElementsAreArray(output_data));
+}
+
+TEST(TransposeConvTest, FloatSimple)
+{
+ Check<float>(
+ /*outputShape_shape=*/{4}, /*weight_shape=*/{1, 3, 3, 1}, /*input_shape=*/{1, 4, 4, 1},
+ /*output_shape=*/{1, 4, 4, 1}, /*outputShape_data=*/{1, 4, 4, 1},
+ /*weight_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9},
+ /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ /*output_data=*/{29, 62, 83, 75, 99, 192, 237, 198, 207, 372, 417, 330, 263, 446, 485, 365},
+ /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1,
+ getElementType<float>());
+}
+
+TEST(TransposeConvTest, FloatTwoFiltersTest)
+{
+ Check<float>(
+ /*outputShape_shape=*/{4}, /*weight_shape=*/{1, 3, 3, 2}, /*input_shape=*/{1, 4, 4, 2},
+ /*output_shape=*/{1, 4, 4, 1}, /*outputShape_data=*/{1, 4, 4, 1},
+ /*weight_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
+ /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ /*output_data=*/{184, 412, 568, 528, 678, 1347, 1689, 1434, 1494, 2715, 3057, 2442, 1968,
+ 3352, 3652, 2760},
+ /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1,
+ getElementType<float>());
+}
+
+TEST(TransposeConvTest, Uint8Simple)
+{
+ // TODO
+ // Implement GetDequantizedOutput Function.
+ // Create Test for Uint8 Case
+}
+TEST(TransposeConvTest, Uint8FiltersTest)
+{
+ // TODO
+ // Implement GetDequantizedOutput Function.
+ // Create Test for Uint8 Case
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Unpack.cpp b/compiler/luci-interpreter/src/kernels/Unpack.cpp
new file mode 100644
index 000000000..834b79926
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Unpack.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Unpack.h"
+
+#include "kernels/Utils.h"
+
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+namespace kernels
+{
+
+Unpack::Unpack(const Tensor *input, std::vector<Tensor *> outputs, const UnpackParams &params)
+ : KernelWithParams<UnpackParams>({input}, std::move(outputs), params)
+{
+}
+
+void Unpack::configure()
+{
+ const Shape &input_shape = input()->shape();
+
+ int axis = _params.axis;
+ if (axis < 0)
+ axis += input()->shape().num_dims();
+ assert(axis >= 0 && axis < input_shape.num_dims());
+
+ Shape output_shape(input_shape.num_dims() - 1);
+ int out_index = 0;
+ for (int in_index = 0; in_index < input_shape.num_dims(); ++in_index)
+ {
+ if (in_index != axis)
+ output_shape.dim(out_index++) = input_shape.dim(in_index);
+ }
+
+ for (Tensor *output : _outputs)
+ {
+ assert(output->element_type() == input()->element_type());
+ output->resize(output_shape);
+ }
+}
+
+template <typename T> void Unpack::executeImpl() const
+{
+ tflite::UnpackParams params{};
+ params.axis = _params.axis;
+ params.num_split = _outputs.size();
+ VectorOfTensors<T, false> all_outputs(_outputs);
+ tflite::reference_ops::Unpack<T>(params, getTensorShape(input()), getTensorData<T>(input()),
+ **all_outputs.shapes(), all_outputs.data());
+}
+
+void Unpack::execute() const
+{
+ switch (input()->element_type())
+ {
+ case DataType::FLOAT32:
+ return executeImpl<float>();
+ case DataType::U8:
+ return executeImpl<uint8_t>();
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Unpack.h b/compiler/luci-interpreter/src/kernels/Unpack.h
new file mode 100644
index 000000000..f4a44ecad
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Unpack.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_UNPACK_H
+#define LUCI_INTERPRETER_KERNELS_UNPACK_H
+
+#include "core/Kernel.h"
+#include "core/KernelParams.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+class Unpack : public KernelWithParams<UnpackParams>
+{
+public:
+ Unpack(const Tensor *input, std::vector<Tensor *> outputs, const UnpackParams &params);
+
+ const Tensor *input() const { return _inputs[0]; }
+ Tensor *output(int index) const { return _outputs[index]; }
+
+ void configure() override;
+ void execute() const override;
+
+private:
+ template <typename T> void executeImpl() const;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_UNPACK_H
diff --git a/compiler/luci-interpreter/src/kernels/Unpack.test.cpp b/compiler/luci-interpreter/src/kernels/Unpack.test.cpp
new file mode 100644
index 000000000..f70c5847a
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Unpack.test.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Unpack.h"
+#include "kernels/TestUtils.h"
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+namespace
+{
+
+using namespace testing;
+
+template <typename T>
+void Check(int axis, Shape input_shape, std::initializer_list<T> input_data,
+ const std::vector<std::initializer_list<int32_t>> &exp_output_shape,
+ std::vector<std::initializer_list<T>> exp_output_data)
+{
+ constexpr DataType element_type = getElementType<T>();
+ const int num_outputs = input_shape.dim(axis < 0 ? axis + input_shape.num_dims() : axis);
+
+ Tensor input_tensor = makeInputTensor<element_type>(input_shape, input_data);
+ std::vector<Tensor> output_tensors;
+ output_tensors.reserve(num_outputs);
+ for (int i = 0; i < num_outputs; ++i)
+ {
+ output_tensors.push_back(makeOutputTensor(element_type));
+ }
+
+ std::vector<Tensor *> output_tensor_ptrs(num_outputs);
+ for (int i = 0; i < num_outputs; ++i)
+ {
+ output_tensor_ptrs[i] = &output_tensors[i];
+ }
+
+ UnpackParams params{};
+ params.axis = axis;
+
+ Unpack kernel(&input_tensor, std::move(output_tensor_ptrs), params);
+ kernel.configure();
+ kernel.execute();
+
+ for (int i = 0; i < num_outputs; ++i)
+ {
+ EXPECT_THAT(extractTensorData<T>(output_tensors[i]),
+ ::testing::ElementsAreArray(exp_output_data[i]));
+ }
+}
+
+template <typename T> class UnpackTest : public ::testing::Test
+{
+};
+
+using DataTypes = ::testing::Types<float, uint8_t>;
+TYPED_TEST_CASE(UnpackTest, DataTypes);
+
+TYPED_TEST(UnpackTest, ThreeOutputs)
+{
+ Check<TypeParam>(/*axis=*/0, /*input_shape=*/{3, 2},
+ /*input_data=*/{1, 2, 3, 4, 5, 6},
+ /*exp_output_shape=*/{{2}, {2}, {2}},
+ /*exp_output_data=*/{{1, 2}, {3, 4}, {5, 6}});
+}
+
+TYPED_TEST(UnpackTest, ThreeOutputsAxisOne)
+{
+ Check<TypeParam>(/*axis=*/1, /*input_shape=*/{3, 2},
+ /*input_data=*/{1, 2, 3, 4, 5, 6},
+ /*exp_output_shape=*/{{3}, {3}},
+ /*exp_output_data=*/{{1, 3, 5}, {2, 4, 6}});
+}
+
+TYPED_TEST(UnpackTest, ThreeOutputsNegativeAxisOne)
+{
+ Check<TypeParam>(/*axis=*/-1, /*input_shape=*/{3, 2},
+ /*input_data=*/{1, 2, 3, 4, 5, 6},
+ /*exp_output_shape=*/{{3}, {3}},
+ /*exp_output_data=*/{{1, 3, 5}, {2, 4, 6}});
+}
+
+TYPED_TEST(UnpackTest, ThreeOutputsNegativeAxisTwo)
+{
+ Check<TypeParam>(/*axis=*/-2, /*input_shape=*/{3, 2},
+ /*input_data=*/{1, 2, 3, 4, 5, 6},
+ /*exp_output_shape=*/{{2}, {2}, {2}},
+ /*exp_output_data=*/{{1, 2}, {3, 4}, {5, 6}});
+}
+
+TYPED_TEST(UnpackTest, OneOutput)
+{
+ Check<TypeParam>(/*axis=*/0, /*input_shape=*/{1, 6},
+ /*input_data=*/{1, 2, 3, 4, 5, 6},
+ /*exp_output_shape=*/{{6}},
+ /*exp_output_data=*/{{1, 2, 3, 4, 5, 6}});
+}
+
+TYPED_TEST(UnpackTest, ThreeDimensionsTwoOutputs)
+{
+ Check<TypeParam>(/*axis=*/2, /*input_shape=*/{2, 2, 2},
+ /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8},
+ /*exp_output_shape=*/{{2, 2}, {2, 2}},
+ /*exp_output_data=*/{{1, 3, 5, 7}, {2, 4, 6, 8}});
+}
+
+TYPED_TEST(UnpackTest, FiveDimensionsTwoOutputs)
+{
+ Check<TypeParam>(
+ /*axis=*/2, /*input_shape=*/{2, 2, 2, 2, 1},
+ /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ /*exp_output_shape=*/{{2, 2, 2, 1}, {2, 2, 2, 1}},
+ /*exp_output_data=*/
+ {{1, 2, 5, 6, 9, 10, 13, 14}, {3, 4, 7, 8, 11, 12, 15, 16}});
+}
+
+TYPED_TEST(UnpackTest, VectorToScalar)
+{
+ Check<TypeParam>(/*axis=*/0, /*input_shape=*/{5},
+ /*input_data=*/{1, 2, 3, 4, 5},
+ /*exp_output_shape=*/{{}, {}, {}, {}, {}},
+ /*exp_output_data=*/{{1}, {2}, {3}, {4}, {5}});
+}
+
+} // namespace
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Utils.cpp b/compiler/luci-interpreter/src/kernels/Utils.cpp
new file mode 100644
index 000000000..b9e7738a9
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Utils.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernels/Utils.h"
+
+#include <cassert>
+#include <cmath>
+#include <limits>
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+void calculateActivationRange(Activation activation, float *activation_min, float *activation_max)
+{
+ switch (activation)
+ {
+ case Activation::NONE:
+ *activation_min = std::numeric_limits<float>::lowest();
+ *activation_max = std::numeric_limits<float>::max();
+ break;
+ case Activation::RELU:
+ *activation_min = 0;
+ *activation_max = std::numeric_limits<float>::max();
+ break;
+ case Activation::RELU_N1_TO_1:
+ *activation_min = -1;
+ *activation_max = 1;
+ break;
+ case Activation::RELU6:
+ *activation_min = 0;
+ *activation_max = 6;
+ break;
+ default:
+ throw std::runtime_error("Unsupported activation.");
+ }
+}
+
+static void calculateActivationRangeQuantizedImpl(Activation activation, int32_t qmin, int32_t qmax,
+ const Tensor *output, int32_t *activation_min,
+ int32_t *activation_max)
+{
+ const float scale = output->scale();
+ const int32_t zero_point = output->zero_point();
+
+ auto quantize = [scale, zero_point](float x) {
+ return zero_point + static_cast<int32_t>(std::round(x / scale));
+ };
+
+ switch (activation)
+ {
+ case Activation::NONE:
+ *activation_min = qmin;
+ *activation_max = qmax;
+ break;
+ case Activation::RELU:
+ *activation_min = std::max(qmin, quantize(0.0f));
+ *activation_max = qmax;
+ break;
+ case Activation::RELU_N1_TO_1:
+ *activation_min = std::max(qmin, quantize(-1.0f));
+ *activation_max = std::min(qmax, quantize(1.0f));
+ break;
+ case Activation::RELU6:
+ *activation_min = std::max(qmin, quantize(0.0f));
+ *activation_max = std::min(qmax, quantize(6.0f));
+ break;
+ default:
+ throw std::runtime_error("Unsupported activation.");
+ }
+}
+
+void calculateActivationRangeQuantized(Activation activation, const Tensor *output,
+ int32_t *activation_min, int32_t *activation_max)
+{
+ int32_t qmin{};
+ int32_t qmax{};
+ switch (output->element_type())
+ {
+ case DataType::U8:
+ qmin = std::numeric_limits<uint8_t>::min();
+ qmax = std::numeric_limits<uint8_t>::max();
+ break;
+ case DataType::S8:
+ qmin = std::numeric_limits<int8_t>::min();
+ qmax = std::numeric_limits<int8_t>::max();
+ break;
+ case DataType::S16:
+ qmin = std::numeric_limits<int16_t>::min();
+ qmax = std::numeric_limits<int16_t>::max();
+ break;
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+
+ calculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, activation_min,
+ activation_max);
+}
+
+void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
+{
+ if (double_multiplier == 0.0)
+ {
+ *quantized_multiplier = 0;
+ *shift = 0;
+ return;
+ }
+
+ const double q = std::frexp(double_multiplier, shift);
+ auto q_fixed = static_cast<int64_t>(std::round(q * (INT64_C(1) << 31)));
+
+ if (q_fixed == (INT64_C(1) << 31))
+ {
+ q_fixed /= 2;
+ ++*shift;
+ }
+ assert(q_fixed <= std::numeric_limits<int32_t>::max());
+ // A shift amount smaller than -31 would cause all bits to be shifted out
+ // and thus all results would be zero. We implement that instead with
+ // q_fixed==0, so as to avoid hitting issues with right-shift
+ // operations with shift amounts greater than 31. Note that this happens
+ // roughly when abs(double_multiplier) < 2^-31 and the present handling means
+ // that we're effectively flushing tiny double_multiplier's to zero.
+ // We could conceivably handle values in the range (roughly) [32, 63]
+ // as 'denormals' i.e. (shift==0, q_fixed < 2^30). In that point of view
+ // the present handling is just doing 'flush denormals to zero'. We could
+ // reconsider and actually generate nonzero denormals if a need arises.
+ if (*shift < -31)
+ {
+ *shift = 0;
+ q_fixed = 0;
+ }
+ *quantized_multiplier = static_cast<int32_t>(q_fixed);
+}
+
+void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quantized_multiplier,
+ int *left_shift)
+{
+ assert(double_multiplier < 1.0);
+ assert(double_multiplier > 0.0);
+ int shift;
+ quantizeMultiplier(double_multiplier, quantized_multiplier, &shift);
+ assert(shift <= 0);
+ *left_shift = shift;
+}
+
+Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape)
+{
+ const int num_input1_dims = input1_shape.num_dims();
+ const int num_input2_dims = input2_shape.num_dims();
+ const int num_out_dims = std::max(num_input1_dims, num_input2_dims);
+ Shape output_shape(num_out_dims);
+
+ for (int i = 0; i < num_out_dims; ++i)
+ {
+ const int32_t input1_dim = i < num_input1_dims ? input1_shape.dim(num_input1_dims - i - 1) : 1;
+ const int32_t input2_dim = i < num_input2_dims ? input2_shape.dim(num_input2_dims - i - 1) : 1;
+ assert(input1_dim == input2_dim || input1_dim == 1 || input2_dim == 1);
+ output_shape.dim(num_out_dims - i - 1) = std::max(input1_dim, input2_dim);
+ }
+
+ return output_shape;
+}
+
+} // namespace kernels
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/kernels/Utils.h b/compiler/luci-interpreter/src/kernels/Utils.h
new file mode 100644
index 000000000..3c2cc8450
--- /dev/null
+++ b/compiler/luci-interpreter/src/kernels/Utils.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_KERNELS_UTILS_H
+#define LUCI_INTERPRETER_KERNELS_UTILS_H
+
+#include "core/KernelParams.h"
+#include "luci_interpreter/core/Tensor.h"
+
+#include <tensorflow/lite/kernels/internal/types.h>
+
+#include <cassert>
+#include <cstdint>
+
+namespace luci_interpreter
+{
+namespace kernels
+{
+
+inline int32_t computePadding(int32_t stride, int32_t dilation_rate, int32_t in_size,
+ int32_t filter_size, int32_t out_size)
+{
+ const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
+ const int32_t padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2;
+ return padding > 0 ? padding : 0;
+}
+
+inline int32_t computePaddingWithOffset(int32_t stride, int32_t dilation_rate, int32_t in_size,
+ int32_t filter_size, int32_t out_size, int32_t *offset)
+{
+ int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
+ int32_t total_padding = ((out_size - 1) * stride + effective_filter_size - in_size);
+ total_padding = total_padding > 0 ? total_padding : 0;
+ *offset = total_padding % 2;
+ return total_padding / 2;
+}
+
+inline int32_t computeOutputSize(Padding padding, int32_t image_size, int32_t filter_size,
+ int32_t stride, int32_t dilation_rate = 1)
+{
+ const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
+ switch (padding)
+ {
+ case Padding::SAME:
+ return (image_size + stride - 1) / stride;
+ case Padding::VALID:
+ return (image_size + stride - effective_filter_size) / stride;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+void calculateActivationRange(Activation activation, float *activation_min, float *activation_max);
+
+void calculateActivationRangeQuantized(Activation activation, const Tensor *output,
+ int32_t *activation_min, int32_t *activation_max);
+
+// Decompose a double multiplier into a Q0.31 int32 representation of its
+// significand, and shift representation of its exponent.
+//
+// Handles an arbitrary positive multiplier. The 'shift' output-value is
+// basically the 'floating-point exponent' of the multiplier:
+// Negative for a right-shift (when the multiplier is <1), positive for a
+// left-shift (when the multiplier is >1)
+void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
+
+// Decompose a double multiplier into a Q0.31 int32 representation of its
+// significand, and shift representation of NEGATIVE its exponent ---
+// this is intended as a RIGHT-shift.
+//
+// Restricted to the case where the multiplier < 1 (and non-negative).
+void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quantized_multiplier,
+ int *left_shift);
+
+Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape);
+
+inline tflite::RuntimeShape getTensorShape(const Tensor *tensor)
+{
+ if (tensor == nullptr)
+ return tflite::RuntimeShape();
+
+ const Shape &shape = tensor->shape();
+ tflite::RuntimeShape runtime_shape(shape.num_dims());
+ for (int i = 0; i < shape.num_dims(); ++i)
+ {
+ runtime_shape.SetDim(i, shape.dim(i));
+ }
+ return runtime_shape;
+}
+
+template <typename T> const T *getTensorData(const Tensor *tensor)
+{
+ return tensor != nullptr ? tensor->data<T>() : nullptr;
+}
+
+template <typename T> T *getTensorData(Tensor *tensor)
+{
+ return tensor != nullptr ? tensor->data<T>() : nullptr;
+}
+
+// A list of tensors in a format that can be used by kernels like split and
+// concatenation.
+template <typename T, bool is_const> class VectorOfTensors
+{
+public:
+ using ElementT = typename std::conditional<is_const, const T, T>::type;
+ using TensorT = typename std::conditional<is_const, const Tensor, Tensor>::type;
+
+ // Build with the tensors in 'tensor_list'.
+ explicit VectorOfTensors(const std::vector<TensorT *> &tensor_list)
+ {
+ const int num_tensors = tensor_list.size();
+
+ all_data_.reserve(num_tensors);
+ all_shape_.reserve(num_tensors);
+ all_shape_ptr_.reserve(num_tensors);
+
+ for (TensorT *tensor : tensor_list)
+ {
+ all_data_.push_back(getTensorData<T>(tensor));
+ all_shape_.push_back(getTensorShape(tensor));
+ }
+
+ // Taking the pointer from inside a std::vector is only OK if the vector is
+ // never modified, so we populate all_shape in the previous loop and then we
+ // are free to grab iterators here.
+ for (tflite::RuntimeShape &shape : all_shape_)
+ {
+ all_shape_ptr_.push_back(&shape);
+ }
+ }
+ // Return a pointer to the data pointers of all tensors in the list. For
+ // example:
+ // float* const* f = v.data();
+ // f[0][1] is the second element of the first tensor.
+ ElementT *const *data() const { return all_data_.data(); }
+
+ // Return a pointer the shape pointers of all tensors in the list. For
+ // example:
+ // const RuntimeShape* const* d = v.dims();
+ // dims[1] are the dimensions of the second tensor in the list.
+ const tflite::RuntimeShape *const *shapes() const { return all_shape_ptr_.data(); }
+
+private:
+ std::vector<ElementT *> all_data_;
+ std::vector<tflite::RuntimeShape> all_shape_;
+ std::vector<tflite::RuntimeShape *> all_shape_ptr_;
+};
+
+// A list of quantized tensors in a format that can be used by kernels like
+// split and concatenation.
+template <bool is_const> class VectorOfQuantizedTensors : public VectorOfTensors<uint8_t, is_const>
+{
+public:
+ using typename VectorOfTensors<uint8_t, is_const>::TensorT;
+
+ // Build with the tensors in 'tensor_list'.
+ explicit VectorOfQuantizedTensors(const std::vector<TensorT *> &tensor_list)
+ : VectorOfTensors<uint8_t, is_const>(tensor_list)
+ {
+ for (TensorT *tensor : tensor_list)
+ {
+ zero_point_.push_back(tensor->zero_point());
+ scale_.push_back(tensor->scale());
+ }
+ }
+
+ const float *scale() const { return scale_.data(); }
+ const int32_t *zero_point() const { return zero_point_.data(); }
+
+private:
+ std::vector<int32_t> zero_point_;
+ std::vector<float> scale_;
+};
+
+} // namespace kernels
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_KERNELS_UTILS_H
diff --git a/compiler/luci-interpreter/src/loader/CMakeLists.txt b/compiler/luci-interpreter/src/loader/CMakeLists.txt
new file mode 100644
index 000000000..fb36c4ab0
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/CMakeLists.txt
@@ -0,0 +1,15 @@
+set(SOURCES
+ GraphLoader.h
+ GraphLoader.cpp
+ KernelBuilder.h
+ KernelBuilder.cpp
+ ModuleLoader.h
+ ModuleLoader.cpp
+ RuntimeToIR.h)
+
+add_library(luci_interpreter_loader STATIC ${SOURCES})
+set_target_properties(luci_interpreter_loader PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(luci_interpreter_loader PUBLIC "${LUCI_INTERPRETER_SOURCE_DIR}")
+target_link_libraries(luci_interpreter_loader
+ PUBLIC luci_lang luci_interpreter_core
+ PRIVATE luci_interpreter_kernels nncc_common)
diff --git a/compiler/luci-interpreter/src/loader/GraphLoader.cpp b/compiler/luci-interpreter/src/loader/GraphLoader.cpp
new file mode 100644
index 000000000..779fa0647
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/GraphLoader.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "loader/GraphLoader.h"
+
+#include "loader/ModuleLoader.h"
+#include "loader/KernelBuilder.h"
+
+#include <loco/IR/Algorithm.h>
+
+namespace luci_interpreter
+{
+namespace
+{
+
+template <typename NodeT> Shape getNodeShape(const NodeT *node)
+{
+ Shape shape(node->rank());
+ for (uint32_t i = 0; i < node->rank(); ++i)
+ {
+ shape.dim(i) = node->dim(i).value();
+ }
+ return shape;
+}
+
+template <DataType DT> const void *getNodeDataImpl(const luci::CircleConst *node, size_t *data_size)
+{
+ const size_t element_size = getDataTypeSize(DT);
+ const int32_t num_elements = node->size<DT>();
+
+ *data_size = num_elements * element_size;
+ if (*data_size > 0)
+ {
+ // FIXME There is no good way to get the pointer to the data currently.
+ return &node->at<DT>(0);
+ }
+ return nullptr;
+}
+
+const void *getNodeData(const luci::CircleConst *node, size_t *data_size)
+{
+ switch (node->dtype())
+ {
+ case DataType::U8:
+ return getNodeDataImpl<DataType::U8>(node, data_size);
+ case DataType::FLOAT32:
+ return getNodeDataImpl<DataType::FLOAT32>(node, data_size);
+ case DataType::S32:
+ return getNodeDataImpl<DataType::S32>(node, data_size);
+ default:
+ throw std::runtime_error("Unsupported type.");
+ }
+}
+
+bool isExecutableNode(const luci::CircleNode *node)
+{
+ switch (node->opcode())
+ {
+ // These nodes denote inputs / outputs of a graph.
+ case luci::CircleOpcode::CONST:
+ case luci::CircleOpcode::CIRCLEINPUT:
+ case luci::CircleOpcode::CIRCLEOUTPUT:
+ // The following nodes denote outputs of multiple-output nodes.
+ case luci::CircleOpcode::CIRCLEIFOUT:
+ case luci::CircleOpcode::CIRCLESPLITOUT:
+ case luci::CircleOpcode::CIRCLEUNPACKOUT:
+ return false;
+ default:
+ return true;
+ }
+}
+
+bool isTensorProducingNode(const luci::CircleNode *node)
+{
+ switch (node->opcode())
+ {
+ // Output nodes do not produce tensors.
+ case luci::CircleOpcode::CIRCLEOUTPUT:
+ // The following nodes are multiple-output nodes. They do not produce tensors, the tensors
+ // are produced by the corresponding *Out nodes instead.
+ case luci::CircleOpcode::IF:
+ case luci::CircleOpcode::SPLIT:
+ case luci::CircleOpcode::UNPACK:
+ return false;
+ default:
+ return true;
+ }
+}
+
+} // namespace
+
+GraphLoader::GraphLoader(const ModuleLoader &module_loader, const loco::Graph *graph,
+ RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir,
+ std::unordered_map<const loco::Node *, Tensor *> &node_to_tensor)
+ : _module_loader(module_loader), _graph(graph), _runtime_graph(runtime_graph),
+ _runtime_to_ir(runtime_to_ir), _node_to_tensor(node_to_tensor)
+{
+}
+
+void GraphLoader::loadTensors()
+{
+ for (uint32_t i = 0; i < _graph->nodes()->size(); ++i)
+ {
+ const auto *node = loco::must_cast<const luci::CircleNode *>(_graph->nodes()->at(i));
+
+ if (!isTensorProducingNode(node))
+ continue;
+
+ // Only Input and Const nodes have shapes. Shapes of intermediate tensors will be inferred.
+ Shape shape{};
+ if (const auto *input_node = dynamic_cast<const luci::CircleInput *>(node))
+ {
+ shape = getNodeShape(input_node);
+ }
+ else if (const auto *const_node = dynamic_cast<const luci::CircleConst *>(node))
+ {
+ shape = getNodeShape(const_node);
+ }
+
+ AffineQuantization quantization;
+ if (node->quantparam() != nullptr)
+ {
+ const luci::CircleQuantParam *params = node->quantparam();
+ quantization.scale.assign(params->scale.cbegin(), params->scale.cend());
+ quantization.zero_point.assign(params->zerop.cbegin(), params->zerop.cend());
+ }
+
+ auto tensor = std::make_unique<Tensor>(node->dtype(), std::move(shape), std::move(quantization),
+ node->name());
+
+ if (const auto *const_node = dynamic_cast<const luci::CircleConst *>(node))
+ {
+ size_t data_size{};
+ const void *const_data = getNodeData(const_node, &data_size);
+ if (const_data != nullptr)
+ tensor->writeData(const_data, data_size);
+ }
+
+ _node_to_tensor.emplace(node, tensor.get());
+ _runtime_to_ir.tensor_to_node.emplace(tensor.get(), node);
+
+ _runtime_graph->addTensor(std::move(tensor));
+ }
+}
+
+void GraphLoader::initInputOutputTensors() const
+{
+ auto input_nodes = loco::input_nodes(_graph);
+ std::vector<Tensor *> input_tensors(input_nodes.size());
+ for (size_t i = 0; i < input_nodes.size(); ++i)
+ {
+ input_tensors[i] = _node_to_tensor.at(input_nodes[i]);
+ }
+ _runtime_graph->setInputTensors(input_tensors);
+
+ auto output_nodes = loco::output_nodes(const_cast<loco::Graph *>(_graph));
+ std::vector<Tensor *> output_tensors(output_nodes.size());
+ for (size_t i = 0; i < output_nodes.size(); ++i)
+ {
+ const auto *node = loco::must_cast<const luci::CircleOutput *>(output_nodes[i]);
+ output_tensors[i] = _node_to_tensor.at(node->from());
+ }
+ _runtime_graph->setOutputTensors(output_tensors);
+}
+
+void GraphLoader::loadOperators()
+{
+ KernelBuilder kernel_builder(_module_loader, *this);
+
+ // Create kernels for executable nodes. This has to be done in execution order.
+ for (const loco::Node *loco_node :
+ loco::postorder_traversal(loco::output_nodes(const_cast<loco::Graph *>(_graph))))
+ {
+ const auto *node = loco::must_cast<const luci::CircleNode *>(loco_node);
+
+ if (isExecutableNode(node))
+ {
+ std::unique_ptr<Kernel> kernel = node->accept(&kernel_builder);
+ _runtime_to_ir.kernel_to_node.emplace(kernel.get(), node);
+ _runtime_graph->addKernel(std::move(kernel));
+ }
+ }
+}
+
+void GraphLoader::load()
+{
+ loadTensors();
+ initInputOutputTensors();
+ loadOperators();
+}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/loader/GraphLoader.h b/compiler/luci-interpreter/src/loader/GraphLoader.h
new file mode 100644
index 000000000..e0adc0f6c
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/GraphLoader.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_LOADER_GRAPHLOADER_H
+#define LUCI_INTERPRETER_LOADER_GRAPHLOADER_H
+
+#include "core/RuntimeGraph.h"
+#include "loader/RuntimeToIR.h"
+
+#include <loco/IR/Graph.h>
+
+#include <unordered_map>
+
+namespace luci_interpreter
+{
+
+class ModuleLoader;
+
+class GraphLoader
+{
+public:
+ GraphLoader(const ModuleLoader &module_loader, const loco::Graph *graph,
+ RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir,
+ std::unordered_map<const loco::Node *, Tensor *> &node_to_tensor);
+
+ void load();
+
+ Tensor *getTensorForNode(const loco::Node *node) const { return _node_to_tensor.at(node); }
+
+private:
+ void loadOperators();
+ void initInputOutputTensors() const;
+ void loadTensors();
+
+ const ModuleLoader &_module_loader;
+ const loco::Graph *_graph;
+ RuntimeGraph *_runtime_graph;
+ RuntimeToIR &_runtime_to_ir;
+
+ std::unordered_map<const loco::Node *, Tensor *> &_node_to_tensor;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_LOADER_GRAPHLOADER_H
diff --git a/compiler/luci-interpreter/src/loader/KernelBuilder.cpp b/compiler/luci-interpreter/src/loader/KernelBuilder.cpp
new file mode 100644
index 000000000..56da961dd
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/KernelBuilder.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "loader/KernelBuilder.h"
+
+#include "kernels/Add.h"
+#include "kernels/ArgMax.h"
+#include "kernels/AveragePool2D.h"
+#include "kernels/Concatenation.h"
+#include "kernels/Conv2D.h"
+#include "kernels/DepthwiseConv2D.h"
+#include "kernels/Elu.h"
+#include "kernels/FullyConnected.h"
+#include "kernels/If.h"
+#include "kernels/L2Normalize.h"
+#include "kernels/L2Pool2D.h"
+#include "kernels/LeakyRelu.h"
+#include "kernels/LocalResponseNormalization.h"
+#include "kernels/Logistic.h"
+#include "kernels/MaxPool2D.h"
+#include "kernels/Mean.h"
+#include "kernels/Mul.h"
+#include "kernels/Pad.h"
+#include "kernels/Reshape.h"
+#include "kernels/Softmax.h"
+#include "kernels/SpaceToDepth.h"
+#include "kernels/Split.h"
+#include "kernels/StridedSlice.h"
+#include "kernels/Squeeze.h"
+#include "kernels/Unpack.h"
+#include "kernels/Transpose.h"
+#include "kernels/TransposeConv.h"
+#include "loader/GraphLoader.h"
+#include "loader/ModuleLoader.h"
+
+#include <stdexcept>
+
+namespace luci_interpreter
+{
+
+template <typename CircleNodeOut>
+static std::vector<const loco::Node *> collectOutputNodes(const luci::CircleNode *node)
+{
+ std::vector<const CircleNodeOut *> output_nodes;
+ for (const loco::Node *loco_node : loco::succs(node))
+ {
+ output_nodes.push_back(loco::must_cast<const CircleNodeOut *>(loco_node));
+ }
+ std::sort(output_nodes.begin(), output_nodes.end(),
+ [](const CircleNodeOut *node1, const CircleNodeOut *node2) {
+ return node1->index() < node2->index();
+ });
+ return {output_nodes.cbegin(), output_nodes.cend()};
+}
+
+const Tensor *KernelBuilder::getInputTensor(const loco::Node *node) const
+{
+ const Tensor *tensor = _graph_loader.getTensorForNode(node);
+ assert(tensor != nullptr);
+ return tensor;
+}
+
+const Tensor *KernelBuilder::getOptionalInputTensor(const loco::Node *node) const
+{
+ // TODO Revise this when optional inputs are implemented in the IR.
+ return getInputTensor(node);
+}
+
+Tensor *KernelBuilder::getOutputTensor(const loco::Node *node) const
+{
+ Tensor *tensor = _graph_loader.getTensorForNode(node);
+ assert(tensor != nullptr);
+ return tensor;
+}
+
+std::vector<Tensor *>
+KernelBuilder::getOutputTensors(const std::vector<const loco::Node *> &nodes) const
+{
+ std::vector<Tensor *> tensors;
+ tensors.reserve(nodes.size());
+ for (const loco::Node *node : nodes)
+ tensors.push_back(getOutputTensor(node));
+ return tensors;
+}
+
+RuntimeGraph *KernelBuilder::getRuntimeGraph(const loco::Graph *graph) const
+{
+ RuntimeGraph *runtime_graph = _module_loader.getRuntimeGraph(graph);
+ assert(runtime_graph != nullptr);
+ return runtime_graph;
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleAdd *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input1 = getInputTensor(node->x());
+ const Tensor *input2 = getInputTensor(node->y());
+ Tensor *output = getOutputTensor(node);
+
+ AddParams params{};
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::Add>(input1, input2, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleArgMax *node)
+{
+ assert(node->arity() == 2);
+ const Tensor *input1 = getInputTensor(node->input());
+ const Tensor *input2 = getInputTensor(node->dimension());
+ Tensor *output = getOutputTensor(node);
+
+ ArgMaxParams params{};
+ params.output_type = node->output_type();
+
+ return std::make_unique<kernels::ArgMax>(input1, input2, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleAveragePool2D *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->value());
+ Tensor *output = getOutputTensor(node);
+
+ Pool2DParams params{};
+ params.padding = node->padding();
+ params.filter_height = node->filter()->h();
+ params.filter_width = node->filter()->w();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::AveragePool2D>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleConcatenation *node)
+{
+ std::vector<const Tensor *> inputs(node->numValues());
+ for (uint32_t i = 0; i < node->numValues(); ++i)
+ {
+ inputs[i] = getInputTensor(node->values(i));
+ }
+ Tensor *output = getOutputTensor(node);
+
+ ConcatenationParams params{};
+ params.axis = node->axis();
+
+ return std::make_unique<kernels::Concatenation>(std::move(inputs), output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleConst *)
+{
+ throw std::runtime_error("Const node cannot be executed.");
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleConv2D *node)
+{
+ assert(node->arity() == 3);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *filter = getInputTensor(node->filter());
+ const Tensor *bias = getInputTensor(node->bias());
+ Tensor *output = getOutputTensor(node);
+
+ Conv2DParams params{};
+ params.padding = node->padding();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+ params.dilation_height_factor = node->dilation()->h();
+ params.dilation_width_factor = node->dilation()->w();
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::Conv2D>(input, filter, bias, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleDepthwiseConv2D *node)
+{
+ assert(node->arity() == 3);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *filter = getInputTensor(node->filter());
+ const Tensor *bias = getInputTensor(node->bias());
+ Tensor *output = getOutputTensor(node);
+
+ DepthwiseConv2DParams params{};
+ params.padding = node->padding();
+ params.depth_multiplier = node->depthMultiplier();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+ params.dilation_height_factor = node->dilation()->h();
+ params.dilation_width_factor = node->dilation()->w();
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::DepthwiseConv2D>(input, filter, bias, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleElu *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->features());
+ Tensor *output = getOutputTensor(node);
+
+ return std::make_unique<kernels::Elu>(input, output);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleFullyConnected *node)
+{
+ assert(node->arity() == 3);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *filter = getInputTensor(node->weights());
+ const Tensor *bias = getOptionalInputTensor(node->bias());
+ Tensor *output = getOutputTensor(node);
+
+ FullyConnectedParams params{};
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::FullyConnected>(input, filter, bias, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleIf *node)
+{
+ auto output_nodes = collectOutputNodes<luci::CircleIfOut>(node);
+ assert(node->arity() == 1 + node->input_count());
+ assert(output_nodes.size() == static_cast<size_t>(node->output_count()));
+
+ const Tensor *cond = getInputTensor(node->cond());
+ std::vector<const Tensor *> inputs(node->input_count());
+ for (uint32_t i = 0; i < node->input_count(); ++i)
+ {
+ inputs[i] = getInputTensor(node->input(i));
+ }
+ std::vector<Tensor *> outputs = getOutputTensors(output_nodes);
+
+ RuntimeGraph *then_graph = getRuntimeGraph(node->then_graph());
+ RuntimeGraph *else_graph = getRuntimeGraph(node->else_graph());
+
+ return std::make_unique<kernels::If>(cond, std::move(inputs), std::move(outputs), then_graph,
+ else_graph);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleL2Normalize *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->x());
+ Tensor *output = getOutputTensor(node);
+
+ L2NormParams params{};
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::L2Normalize>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleL2Pool2D *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->value());
+ Tensor *output = getOutputTensor(node);
+
+ Pool2DParams params{};
+ params.padding = node->padding();
+ params.filter_height = node->filter()->h();
+ params.filter_width = node->filter()->w();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::L2Pool2D>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleLeakyRelu *node)
+{
+ assert(node->arity() == 1);
+ const Tensor *input = getInputTensor(node->features());
+ Tensor *output = getOutputTensor(node);
+
+ LeakyReluParams params{};
+ params.alpha = node->alpha();
+
+ return std::make_unique<kernels::LeakyRelu>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleLocalResponseNormalization *node)
+{
+ assert(node->arity() == 1);
+ const Tensor *input = getInputTensor(node->input());
+ Tensor *output = getOutputTensor(node);
+
+ LocalResponseNormalizationParams params{};
+ params.radius = node->radius();
+ params.bias = node->bias();
+ params.alpha = node->alpha();
+ params.beta = node->beta();
+
+ return std::make_unique<kernels::LocalResponseNormalization>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleLogistic *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->x());
+ Tensor *output = getOutputTensor(node);
+
+ return std::make_unique<kernels::Logistic>(input, output);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleInput *)
+{
+ throw std::runtime_error("Input node cannot be executed.");
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleMaxPool2D *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->value());
+ Tensor *output = getOutputTensor(node);
+
+ Pool2DParams params{};
+ params.padding = node->padding();
+ params.filter_height = node->filter()->h();
+ params.filter_width = node->filter()->w();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::MaxPool2D>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleMean *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *axes = getInputTensor(node->reduction_indices());
+ Tensor *output = getOutputTensor(node);
+
+ ReducerParams params{};
+ params.keep_dims = node->keep_dims();
+
+ return std::make_unique<kernels::Mean>(input, axes, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleMul *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input1 = getInputTensor(node->x());
+ const Tensor *input2 = getInputTensor(node->y());
+ Tensor *output = getOutputTensor(node);
+
+ MulParams params{};
+ params.activation = node->fusedActivationFunction();
+
+ return std::make_unique<kernels::Mul>(input1, input2, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleOutput *)
+{
+ throw std::runtime_error("Output node cannot be executed.");
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CirclePad *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *paddings = getInputTensor(node->paddings());
+ Tensor *output = getOutputTensor(node);
+
+ return std::make_unique<kernels::Pad>(input, paddings, output);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleReshape *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input = getInputTensor(node->tensor());
+ const Tensor *shape = getInputTensor(node->shape());
+ Tensor *output = getOutputTensor(node);
+
+ // NOTE 'newShape' attribute is ignored.
+ return std::make_unique<kernels::Reshape>(input, shape, output);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleSoftmax *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->logits());
+ Tensor *output = getOutputTensor(node);
+
+ SoftmaxParams params{};
+ params.beta = node->beta();
+
+ return std::make_unique<kernels::Softmax>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleSpaceToDepth *node)
+{
+ assert(node->arity() == 1);
+ const Tensor *input = getInputTensor(node->input());
+
+ Tensor *output = getOutputTensor(node);
+
+ SpaceToDepthParams params{};
+ params.block_size = node->block_size();
+
+ return std::make_unique<kernels::SpaceToDepth>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleSplit *node)
+{
+ auto output_nodes = collectOutputNodes<luci::CircleSplitOut>(node);
+ assert(node->arity() == 2);
+ assert(output_nodes.size() == static_cast<size_t>(node->num_split()));
+
+ const Tensor *axis = getInputTensor(node->split_dim());
+ const Tensor *input = getInputTensor(node->input());
+ std::vector<Tensor *> outputs = getOutputTensors(output_nodes);
+
+ // NOTE 'num_splits' attribute is ignored.
+ return std::make_unique<kernels::Split>(axis, input, std::move(outputs));
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleStridedSlice *node)
+{
+ assert(node->arity() == 4);
+
+ const Tensor *input = getInputTensor(node->input());
+ const Tensor *begin = getInputTensor(node->begin());
+ const Tensor *end = getInputTensor(node->end());
+ const Tensor *strides = getInputTensor(node->strides());
+
+ Tensor *output = getOutputTensor(node);
+
+ StridedSliceParams params{};
+ params.begin_mask = node->begin_mask();
+ params.ellipsis_mask = node->ellipsis_mask();
+ params.end_mask = node->end_mask();
+ params.new_axis_mask = node->new_axis_mask();
+ params.shrink_axis_mask = node->shrink_axis_mask();
+
+ return std::make_unique<kernels::StridedSlice>(input, begin, end, strides, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleSqueeze *node)
+{
+ assert(node->arity() == 1);
+
+ const Tensor *input = getInputTensor(node->input());
+ Tensor *output = getOutputTensor(node);
+
+ SqueezeParams params{};
+ assert(node->squeeze_dims().size() <= 4);
+ for (size_t i = 0; i < node->squeeze_dims().size(); i++)
+ {
+ params.squeeze_dims.push_back(node->squeeze_dims().at(i));
+ }
+
+ return std::make_unique<kernels::Squeeze>(input, output, params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleTransposeConv *node)
+{
+ assert(node->arity() == 3);
+
+ const Tensor *input_sizes = getInputTensor(node->inputSizes());
+ const Tensor *filter = getInputTensor(node->filter());
+ const Tensor *out_backprop = getInputTensor(node->outBackprop());
+
+ Tensor *output = getOutputTensor(node);
+
+ TransposeConvParams params{};
+ params.padding = node->padding();
+ params.stride_height = node->stride()->h();
+ params.stride_width = node->stride()->w();
+
+ return std::make_unique<kernels::TransposeConv>(input_sizes, filter, out_backprop, output,
+ params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleUnpack *node)
+{
+ auto output_nodes = collectOutputNodes<luci::CircleUnpackOut>(node);
+ assert(node->arity() == 1);
+ assert(output_nodes.size() == static_cast<size_t>(node->num()));
+
+ const Tensor *input = getInputTensor(node->value());
+ std::vector<Tensor *> outputs = getOutputTensors(output_nodes);
+
+ UnpackParams params{};
+ params.axis = node->axis();
+
+ // NOTE 'num' attribute is ignored.
+ return std::make_unique<kernels::Unpack>(input, std::move(outputs), params);
+}
+
+std::unique_ptr<Kernel> KernelBuilder::visit(const luci::CircleTranspose *node)
+{
+ assert(node->arity() == 2);
+
+ const Tensor *input = getInputTensor(node->a());
+ const Tensor *perm = getInputTensor(node->perm());
+ Tensor *output = getOutputTensor(node);
+
+ return std::make_unique<kernels::Transpose>(input, perm, output);
+}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/loader/KernelBuilder.h b/compiler/luci-interpreter/src/loader/KernelBuilder.h
new file mode 100644
index 000000000..7e30d395b
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/KernelBuilder.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_LOADER_KERNELBUILDER_H
+#define LUCI_INTERPRETER_LOADER_KERNELBUILDER_H
+
+#include "core/Kernel.h"
+#include "core/RuntimeGraph.h"
+
+#include <luci/IR/CircleNodeVisitor.h>
+
+#include <memory>
+#include <vector>
+
+namespace luci_interpreter
+{
+
+class GraphLoader;
+class ModuleLoader;
+
+class KernelBuilder : public luci::CircleNodeVisitor<std::unique_ptr<Kernel>>
+{
+public:
+ KernelBuilder(const ModuleLoader &module_loader, const GraphLoader &graph_loader)
+ : _module_loader(module_loader), _graph_loader(graph_loader)
+ {
+ }
+
+ std::unique_ptr<Kernel> visit(const luci::CircleAdd *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleArgMax *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleAveragePool2D *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleConcatenation *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleConv2D *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleConst *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleDepthwiseConv2D *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleElu *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleFullyConnected *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleIf *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleL2Normalize *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleL2Pool2D *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleLeakyRelu *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleLocalResponseNormalization *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleLogistic *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleInput *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleMaxPool2D *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleMean *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleMul *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleOutput *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CirclePad *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleReshape *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleSoftmax *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleSpaceToDepth *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleSplit *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleStridedSlice *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleSqueeze *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleTranspose *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleTransposeConv *node) override;
+ std::unique_ptr<Kernel> visit(const luci::CircleUnpack *node) override;
+
+private:
+ const Tensor *getInputTensor(const loco::Node *node) const;
+
+ const Tensor *getOptionalInputTensor(const loco::Node *node) const;
+
+ Tensor *getOutputTensor(const loco::Node *node) const;
+
+ std::vector<Tensor *> getOutputTensors(const std::vector<const loco::Node *> &nodes) const;
+
+ RuntimeGraph *getRuntimeGraph(const loco::Graph *graph) const;
+
+private:
+ const ModuleLoader &_module_loader;
+ const GraphLoader &_graph_loader;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_LOADER_KERNELBUILDER_H
diff --git a/compiler/luci-interpreter/src/loader/ModuleLoader.cpp b/compiler/luci-interpreter/src/loader/ModuleLoader.cpp
new file mode 100644
index 000000000..7780a61b6
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/ModuleLoader.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModuleLoader.h"
+
+#include "GraphLoader.h"
+
+namespace luci_interpreter
+{
+
+ModuleLoader::ModuleLoader(const luci::Module *module, RuntimeModule *runtime_module,
+ RuntimeToIR &runtime_to_ir,
+ std::unordered_map<const loco::Node *, Tensor *> &node_to_tensor)
+ : _module(module), _runtime_module(runtime_module), _runtime_to_ir(runtime_to_ir),
+ _node_to_tensor(node_to_tensor)
+{
+}
+
+void ModuleLoader::load()
+{
+ // Runtime graphs have to be created in advance, because they will be needed during the loading
+ // process for control flow nodes.
+ for (size_t i = 0; i < _module->size(); ++i)
+ {
+ _graph_to_runtime_graph.emplace(_module->graph(i), _runtime_module->addGraph());
+ }
+ for (size_t i = 0; i < _module->size(); ++i)
+ {
+ const loco::Graph *graph = _module->graph(i);
+ RuntimeGraph *runtime_graph = _graph_to_runtime_graph.at(graph);
+ GraphLoader loader(*this, graph, runtime_graph, _runtime_to_ir, _node_to_tensor);
+ loader.load();
+ }
+}
+
+} // namespace luci_interpreter
diff --git a/compiler/luci-interpreter/src/loader/ModuleLoader.h b/compiler/luci-interpreter/src/loader/ModuleLoader.h
new file mode 100644
index 000000000..954dbfb61
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/ModuleLoader.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_LOADER_MODULELOADER_H
+#define LUCI_INTERPRETER_LOADER_MODULELOADER_H
+
+#include "core/RuntimeModule.h"
+#include "loader/RuntimeToIR.h"
+
+#include <luci/IR/Module.h>
+
+#include <unordered_map>
+
+namespace luci_interpreter
+{
+
+class ModuleLoader
+{
+public:
+ ModuleLoader(const luci::Module *module, RuntimeModule *runtime_module,
+ RuntimeToIR &runtime_to_ir,
+ std::unordered_map<const loco::Node *, Tensor *> &node_to_tensor);
+
+ void load();
+
+ RuntimeGraph *getRuntimeGraph(const loco::Graph *graph) const
+ {
+ return _graph_to_runtime_graph.at(graph);
+ }
+
+private:
+ const luci::Module *_module;
+ RuntimeModule *_runtime_module;
+ RuntimeToIR &_runtime_to_ir;
+ std::unordered_map<const loco::Node *, Tensor *> &_node_to_tensor;
+ std::unordered_map<const loco::Graph *, RuntimeGraph *> _graph_to_runtime_graph;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_LOADER_MODULELOADER_H
diff --git a/compiler/luci-interpreter/src/loader/RuntimeToIR.h b/compiler/luci-interpreter/src/loader/RuntimeToIR.h
new file mode 100644
index 000000000..9ea8b1fa2
--- /dev/null
+++ b/compiler/luci-interpreter/src/loader/RuntimeToIR.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LUCI_INTERPRETER_LOADER_RUNTIMETOIR_H
+#define LUCI_INTERPRETER_LOADER_RUNTIMETOIR_H
+
+#include "luci_interpreter/core/Tensor.h"
+
+#include <luci/IR/CircleNode.h>
+
+#include <unordered_map>
+
+namespace luci_interpreter
+{
+
+// Maps runtime entities back to IR entities. It is used to implement observing functionality.
+struct RuntimeToIR
+{
+ std::unordered_map<const Tensor *, const luci::CircleNode *> tensor_to_node;
+ std::unordered_map<const Kernel *, const luci::CircleNode *> kernel_to_node;
+};
+
+} // namespace luci_interpreter
+
+#endif // LUCI_INTERPRETER_LOADER_RUNTIMETOIR_H
diff --git a/compiler/luci-value-test/CMakeLists.txt b/compiler/luci-value-test/CMakeLists.txt
new file mode 100644
index 000000000..3a5c42b11
--- /dev/null
+++ b/compiler/luci-value-test/CMakeLists.txt
@@ -0,0 +1,25 @@
+unset(LUCI_VALUE_TESTS)
+
+macro(addeval NAME)
+ list(APPEND LUCI_VALUE_TESTS ${NAME})
+endmacro(addeval)
+
+# Read "test.lst"
+include("test.lst")
+# Read "test.local.lst" if exists
+include("test.local.lst" OPTIONAL)
+
+# Generate dependencies
+add_custom_target(luci_eval_testfiles ALL DEPENDS ${TESTFILES})
+
+add_subdirectory(tester)
+
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
+add_test(NAME luci_value_test
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/evalverify.sh"
+ "${CMAKE_CURRENT_BINARY_DIR}"
+ "${ARTIFACTS_BIN_PATH}"
+ "${NNCC_OVERLAY_DIR}/venv_1_13_2"
+ ${LUCI_VALUE_TESTS}
+)
diff --git a/compiler/luci-value-test/README.md b/compiler/luci-value-test/README.md
new file mode 100644
index 000000000..90e92834b
--- /dev/null
+++ b/compiler/luci-value-test/README.md
@@ -0,0 +1,15 @@
+# luci-value-test
+
+`luci-value-test` validates luci IR graph model file (.circle)
+
+The test proceeds as follows
+
+Step 1: Generate tflite files and circle files from TFLite recipes (listsed in test.lst).
+"TFLite recipe" -> tflchef -> "tflite file" -> tflite2circle -> "circle file"
+
+Step 2: Run TFLite interpreter and luci-interpreter for the generated tflite and circle, respectively.
+(with the same input tensors filled with random values)
+circle file -> luci-interpreter -------> Execution result 1
+tflite file -> TFLite interpreter -----> Execution result 2
+
+Step 3: Compare the execution result 1 and 2. The result must be the same.
diff --git a/compiler/luci-value-test/evalverify.sh b/compiler/luci-value-test/evalverify.sh
new file mode 100755
index 000000000..dfd55a691
--- /dev/null
+++ b/compiler/luci-value-test/evalverify.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# This script verifies the basic behavior of luci interpreter
+#
+# HOW TO USE
+#
+# ./evalverify.sh <path/to/work_dir> <TEST 1> <TEST 2> ...
+# work_dir : build directory of luci-value-test (ex: build/compiler/luci-value-test)
+
+VERIFY_SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+VERIFY_SCRIPT_PATH="${VERIFY_SOURCE_PATH}/luci_eval_verifier.py"
+BINDIR="$1"; shift
+WORKDIR="$1"; shift
+VIRTUALENV="$1"; shift
+INTERPRETER_DRIVER_PATH="${BINDIR}/tester/luci_eval_tester"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+for TESTCASE in "$@"; do
+ TESTED+=("${TESTCASE}")
+
+ TESTCASE_FILE="${WORKDIR}/${TESTCASE}"
+ TEST_RESULT_FILE="${BINDIR}/${TESTCASE}"
+
+ PASSED_TAG="${TEST_RESULT_FILE}.passed"
+ rm -f "${PASSED_TAG}"
+
+ cat > "${TEST_RESULT_FILE}.log" <(
+ exec 2>&1
+ set -ex
+
+ source "${VIRTUALENV}/bin/activate"
+ "${VIRTUALENV}/bin/python" "${VERIFY_SCRIPT_PATH}" \
+ --driver "${INTERPRETER_DRIVER_PATH}" \
+ --model "${TESTCASE_FILE}"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("${TESTCASE}")
+ else
+ FAILED+=("${TESTCASE}")
+ fi
+done
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/luci-value-test/luci_eval_verifier.py b/compiler/luci-value-test/luci_eval_verifier.py
new file mode 100755
index 000000000..6999110b6
--- /dev/null
+++ b/compiler/luci-value-test/luci_eval_verifier.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+import numpy as np
+import tensorflow as tf
+import subprocess
+import argparse
+import traceback
+
+#
+# This script compares the execution result of luci-interpreter with that of TFLite interpreter
+#
+# Basic usage:
+# eval_verifier.py --driver build/compiler/luci-value-test/tester/luci_eval_tester
+# --model inception_v3
+parser = argparse.ArgumentParser()
+parser.add_argument('--driver', type=str, required=True)
+parser.add_argument('--model', type=str, required=True)
+args = parser.parse_args()
+
+driver = args.driver
+tflite_model = args.model + ".tflite"
+circle_model = args.model + ".circle"
+
+# Build TFLite interpreter.
+interpreter = tf.lite.Interpreter(tflite_model)
+interpreter.allocate_tensors()
+
+# Generate random input data.
+num_inputs = len(interpreter.get_input_details())
+for i in range(num_inputs):
+ input_details = interpreter.get_input_details()[i]
+ if input_details["dtype"] == np.float32:
+ input_data = np.array(
+ np.random.random_sample(input_details["shape"]), input_details["dtype"])
+ elif input_details["dtype"] == np.uint8:
+ input_data = np.array(
+ np.random.randint(0, 256, size=input_details["shape"]),
+ input_details["dtype"])
+ else:
+ raise SystemExit("Unsupported input dtype")
+
+ interpreter.set_tensor(input_details["index"], input_data)
+ input_data.tofile(circle_model + ".input" + str(i))
+
+# Do inference
+interpreter.invoke()
+
+# Get reference output data.
+assert len(interpreter.get_output_details()) == 1 # TODO: Support multiple outputs
+output_details = interpreter.get_output_details()[0]
+ref_output_data = interpreter.get_tensor(output_details["index"])
+
+# Execute luci interpreter.
+subprocess.run(
+ [
+ driver, circle_model,
+ str(num_inputs), circle_model + ".input", circle_model + ".output"
+ ],
+ check=True)
+output_data = np.fromfile(circle_model + ".output", output_details["dtype"])
+shape_file = open(circle_model + ".output.shape", 'r')
+output_shape = [int(i) for i in shape_file.read().split(',')]
+shape_file.close()
+luci_output_data = np.reshape(output_data, output_shape)
+
+# Compare the results.
+try:
+ if output_details["dtype"] == np.uint8:
+ if np.allclose(luci_output_data, ref_output_data, rtol=0, atol=0) == False:
+ raise SystemExit("Execution result of " + tflite_model +
+ " does not match with " + circle_model)
+ elif output_details["dtype"] == np.float32:
+ if np.allclose(
+ luci_output_data, ref_output_data, rtol=1.e-5, atol=1.e-5) == False:
+ raise SystemExit("Execution result of " + tflite_model +
+ " does not match with " + circle_model)
+ else:
+ raise SystemExit("Unsupported data type: ", output_details["dtype"])
+except:
+ print(traceback.format_exc())
+ quit(255)
+
+quit(0)
diff --git a/compiler/luci-value-test/requires.cmake b/compiler/luci-value-test/requires.cmake
new file mode 100644
index 000000000..f8af5f27e
--- /dev/null
+++ b/compiler/luci-value-test/requires.cmake
@@ -0,0 +1,6 @@
+require("common-artifacts")
+require("luci")
+require("luci-interpreter")
+require("safemain")
+require("oops")
+require("loco")
diff --git a/compiler/luci-value-test/test.lst b/compiler/luci-value-test/test.lst
new file mode 100644
index 000000000..6a332f92c
--- /dev/null
+++ b/compiler/luci-value-test/test.lst
@@ -0,0 +1,81 @@
+#addeval(Abs_000)
+addeval(Add_000)
+addeval(Add_U8_000)
+#addeval(ArgMax_000)
+#addeval(ArgMax_001)
+#addeval(ArgMax_002)
+#addeval(ArgMax_003)
+#addeval(ArgMax_U8_000)
+#addeval(ArgMax_U8_001)
+#addeval(ArgMax_U8_002)
+#addeval(ArgMax_U8_003)
+addeval(AveragePool2D_000)
+#addeval(BatchMatMulV2_000)
+#addeval(BatchMatMulV2_001)
+#addeval(BatchToSpaceND_000)
+#addeval(Cast_000)
+addeval(Concatenation_000)
+addeval(Concatenation_U8_000)
+addeval(Conv2D_000)
+addeval(Conv2D_001)
+addeval(Conv2D_002)
+addeval(Conv2D_U8_000)
+addeval(Conv2D_U8_001)
+#addeval(Cos_000)
+addeval(DepthwiseConv2D_000)
+addeval(DepthwiseConv2D_U8_000)
+#addeval(Div_000)
+#addeval(Equal_000)
+#addeval(Exp_000)
+addeval(FullyConnected_000)
+addeval(FullyConnected_001)
+#addeval(FullyConnected_002)
+#addeval(FullyConnected_U8_000)
+#addeval(Gather_000)
+#addeval(If_000)
+#addeval(If_001)
+#addeval(LogicalNot_000)
+#addeval(LogicalOr_000)
+#addeval(Logistic_000)
+addeval(MaxPool2D_000)
+addeval(MaxPool2D_U8_000)
+addeval(Mean_000)
+addeval(Mean_001)
+addeval(Mean_U8_000)
+addeval(Mul_000)
+#addeval(Mul_U8_000)
+#addeval(Pack_000)
+#addeval(Pack_U8_000)
+addeval(Pad_000)
+addeval(Pad_U8_000)
+#addeval(ReduceProd_000)
+#addeval(ReduceProd_001)
+#addeval(ReduceProd_002)
+#addeval(ReduceProd_003)
+#addeval(ReLU_000)
+addeval(Reshape_000)
+addeval(Reshape_001)
+addeval(Reshape_002)
+#addeval(Reshape_003)
+addeval(Reshape_U8_000)
+#addeval(Rsqrt_000)
+#addeval(Sin_000)
+addeval(Softmax_000)
+#addeval(Softmax_U8_000)
+#addeval(SpaceToBatchND_000)
+#addeval(SpaceToBatchND_001)
+#addeval(SpaceToBatchND_002)
+#addeval(SpaceToBatchND_003)
+#addeval(StridedSlice_000)
+#addeval(StridedSlice_001)
+#addeval(Sub_000)
+#addeval(Sub_U8_000)
+#addeval(Tanh_000)
+#addeval(Tile_000)
+#addeval(Tile_U8_000)
+#addeval(Transpose_000)
+#addeval(Unpack_000)
+#addeval(Unpack_001)
+#addeval(Unpack_002)
+#addeval(While_000)
+#addeval(While_001)
diff --git a/compiler/luci-value-test/tester/CMakeLists.txt b/compiler/luci-value-test/tester/CMakeLists.txt
new file mode 100644
index 000000000..f3b6dfcfe
--- /dev/null
+++ b/compiler/luci-value-test/tester/CMakeLists.txt
@@ -0,0 +1,15 @@
+
+set(SRCS_EVAL_TESTER
+ src/EvalTester.cpp
+ src/CircleExpContract.h
+ src/CircleExpContract.cpp
+ )
+
+add_executable(luci_eval_tester ${SRCS_EVAL_TESTER})
+target_link_libraries(luci_eval_tester PRIVATE oops)
+target_link_libraries(luci_eval_tester PRIVATE loco)
+target_link_libraries(luci_eval_tester PRIVATE luci_import)
+target_link_libraries(luci_eval_tester PRIVATE luci_export)
+target_link_libraries(luci_eval_tester PRIVATE luci_lang)
+target_link_libraries(luci_eval_tester PRIVATE luci_interpreter)
+target_link_libraries(luci_eval_tester PRIVATE safemain)
diff --git a/compiler/luci-value-test/tester/src/CircleExpContract.cpp b/compiler/luci-value-test/tester/src/CircleExpContract.cpp
new file mode 100644
index 000000000..b56b7eedc
--- /dev/null
+++ b/compiler/luci-value-test/tester/src/CircleExpContract.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleExpContract.h"
+
+#include <oops/InternalExn.h>
+
+#include <fstream>
+#include <iostream>
+
+bool CircleExpContract::store(const char *ptr, const size_t size) const
+{
+ if (!ptr)
+ INTERNAL_EXN("Graph was not serialized by FlatBuffer for some reason");
+
+ std::ofstream fs(_filepath.c_str(), std::ofstream::binary);
+ fs.write(ptr, size);
+
+ return fs.good();
+}
diff --git a/compiler/luci-value-test/tester/src/CircleExpContract.h b/compiler/luci-value-test/tester/src/CircleExpContract.h
new file mode 100644
index 000000000..4d08fb89b
--- /dev/null
+++ b/compiler/luci-value-test/tester/src/CircleExpContract.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_VALUE_TEST_CIRCLEXPCONTRACT_H__
+#define __LUCI_VALUE_TEST_CIRCLEXPCONTRACT_H__
+
+#include <loco.h>
+#include <luci/CircleExporter.h>
+#include <luci/IR/Module.h>
+
+#include <memory>
+#include <string>
+
+struct CircleExpContract : public luci::CircleExporter::Contract
+{
+public:
+ CircleExpContract(luci::Module *module, const std::string &filename)
+ : _module(module), _filepath(filename)
+ {
+ // NOTHING TO DO
+ }
+ virtual ~CircleExpContract() = default;
+
+public:
+ loco::Graph *graph(void) const final { return nullptr; }
+ luci::Module *module(void) const final { return _module; };
+
+public:
+ bool store(const char *ptr, const size_t size) const final;
+
+private:
+ luci::Module *_module;
+ const std::string _filepath;
+};
+
+#endif // __LUCI_VALUE_TEST_CIRCLEXPCONTRACT_H__
diff --git a/compiler/luci-value-test/tester/src/EvalTester.cpp b/compiler/luci-value-test/tester/src/EvalTester.cpp
new file mode 100644
index 000000000..58f62f54c
--- /dev/null
+++ b/compiler/luci-value-test/tester/src/EvalTester.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleExpContract.h"
+
+#include <luci/Importer.h>
+#include <luci_interpreter/Interpreter.h>
+
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <vector>
+#include <map>
+#include <string>
+#include <random>
+
+namespace
+{
+
+void readDataFromFile(const std::string &filename, char *data, size_t data_size)
+{
+ std::ifstream fs(filename, std::ifstream::binary);
+ if (fs.fail())
+ throw std::runtime_error("Cannot open file \"" + filename + "\".\n");
+ if (fs.read(data, data_size).fail())
+ throw std::runtime_error("Failed to read data from file \"" + filename + "\".\n");
+}
+
+void writeDataToFile(const std::string &filename, const char *data, size_t data_size)
+{
+ std::ofstream fs(filename, std::ofstream::binary);
+ if (fs.fail())
+ throw std::runtime_error("Cannot open file \"" + filename + "\".\n");
+ if (fs.write(data, data_size).fail())
+ {
+ throw std::runtime_error("Failed to write data to file \"" + filename + "\".\n");
+ }
+}
+
+std::unique_ptr<luci::Module> importModel(const std::string &filename)
+{
+ std::ifstream fs(filename, std::ifstream::binary);
+ if (fs.fail())
+ {
+ throw std::runtime_error("Cannot open model file \"" + filename + "\".\n");
+ }
+ std::vector<char> model_data((std::istreambuf_iterator<char>(fs)),
+ std::istreambuf_iterator<char>());
+ return luci::Importer().importModule(circle::GetModel(model_data.data()));
+}
+
+template <typename NodeT> size_t getTensorSize(const NodeT *node)
+{
+ uint32_t tensor_size = loco::size(node->dtype());
+ for (uint32_t i = 0; i < node->rank(); ++i)
+ tensor_size *= node->dim(i).value();
+ return tensor_size;
+}
+
+} // namespace
+
+/*
+ * @brief EvalTester main
+ *
+ * Driver for testing luci-inerpreter
+ *
+ */
+int entry(int argc, char **argv)
+{
+ if (argc != 5)
+ {
+ std::cerr
+ << "Usage: " << argv[0]
+ << " <path/to/circle/model> <num_inputs> <path/to/input/prefix> <path/to/output/file>\n";
+ return EXIT_FAILURE;
+ }
+
+ const char *filename = argv[1];
+ const int32_t num_inputs = atoi(argv[2]);
+ const char *input_prefix = argv[3];
+ const char *output_file = argv[4];
+ const std::string intermediate_filename = std::string(filename) + ".inter.circle";
+
+ // Load model from the file
+ std::unique_ptr<luci::Module> initial_module = importModel(filename);
+ if (initial_module == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load '" << filename << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Export to a Circle file
+ luci::CircleExporter exporter;
+ CircleExpContract contract(initial_module.get(), intermediate_filename);
+ if (!exporter.invoke(&contract))
+ {
+ std::cerr << "ERROR: Failed to export '" << intermediate_filename << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Import model again
+ std::unique_ptr<luci::Module> module = importModel(intermediate_filename);
+ if (module == nullptr)
+ {
+ std::cerr << "ERROR: Failed to load '" << intermediate_filename << "'" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Create interpreter.
+ luci_interpreter::Interpreter interpreter(module.get());
+
+ // Set input.
+ // Data for n'th input is read from ${input_prefix}n
+ // (ex: Add.circle.input0, Add.circle.input1 ..)
+ const auto input_nodes = loco::input_nodes(module->graph());
+ assert(num_inputs == input_nodes.size());
+ for (int32_t i = 0; i < num_inputs; i++)
+ {
+ const auto *input_node = dynamic_cast<const luci::CircleInput *>(input_nodes[i]);
+ std::vector<char> input_data(getTensorSize(input_node));
+ readDataFromFile(std::string(input_prefix) + std::to_string(i), input_data.data(),
+ input_data.size());
+ interpreter.writeInputTensor(input_node, input_data.data(), input_data.size());
+ }
+
+ // Do inference.
+ interpreter.interpret();
+
+ // Get output.
+ const auto output_nodes = loco::output_nodes(module->graph());
+ // TODO: Support multiple outputs
+ assert(output_nodes.size() == 1);
+ const auto *output_node = dynamic_cast<const luci::CircleOutput *>(output_nodes[0]);
+ std::vector<char> output_data(getTensorSize(output_node));
+ interpreter.readOutputTensor(output_node, output_data.data(), output_data.size());
+
+ // Output data is written in ${output_file}
+ // (ex: Add.circle.output)
+ // Output shape is written in ${output_file}.shape
+ // (ex: Add.circle.output.shape)
+ // TODO: Use HDF5 file format
+ writeDataToFile(output_file, output_data.data(), output_data.size());
+ auto shape_str = std::to_string(output_node->dim(0).value());
+ for (int i = 1; i < output_node->rank(); i++)
+ {
+ shape_str += ",";
+ shape_str += std::to_string(output_node->dim(i).value());
+ }
+ writeDataToFile(std::string(output_file) + ".shape", shape_str.c_str(), shape_str.size());
+ return EXIT_SUCCESS;
+}
diff --git a/compiler/luci/CMakeLists.txt b/compiler/luci/CMakeLists.txt
index 387c22487..214a1bbf2 100644
--- a/compiler/luci/CMakeLists.txt
+++ b/compiler/luci/CMakeLists.txt
@@ -1,3 +1,4 @@
+add_subdirectory(env)
add_subdirectory(log)
add_subdirectory(lang)
add_subdirectory(service)
diff --git a/compiler/luci/env/CMakeLists.txt b/compiler/luci/env/CMakeLists.txt
new file mode 100644
index 000000000..3d8387a47
--- /dev/null
+++ b/compiler/luci/env/CMakeLists.txt
@@ -0,0 +1,18 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+add_library(luci_env SHARED ${SOURCES})
+target_include_directories(luci_env PUBLIC include)
+target_link_libraries(luci_env PRIVATE nncc_common)
+install(TARGETS luci_env DESTINATION lib)
+
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
+nnas_find_package(GTest REQUIRED)
+
+GTest_AddTest(luci_env_test ${TESTS})
+target_include_directories(luci_env_test PRIVATE src)
+target_link_libraries(luci_env_test luci_env)
diff --git a/compiler/luci/env/README.md b/compiler/luci/env/README.md
new file mode 100644
index 000000000..cda007867
--- /dev/null
+++ b/compiler/luci/env/README.md
@@ -0,0 +1,3 @@
+# luci-env
+
+_luci-env_ provides user environment settings that control _luci_
diff --git a/compiler/luci/env/include/luci/UserSettings.h b/compiler/luci/env/include/luci/UserSettings.h
new file mode 100644
index 000000000..bcfd16071
--- /dev/null
+++ b/compiler/luci/env/include/luci/UserSettings.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_USER_SETTINGS__
+#define __LUCI_USER_SETTINGS__
+
+// NOTE Revise the logic if we find a better way not using global status
+
+namespace luci
+{
+
+/**
+ * @brief UserSettings provides user settings by key-value
+ */
+struct UserSettings
+{
+ enum Key
+ {
+ Undefined,
+ MuteWarnings,
+ DisableValidation,
+ };
+
+ static UserSettings *settings();
+
+ virtual void set(const Key key, bool value) = 0;
+ virtual bool get(const Key key) const = 0;
+};
+
+} // namespace luci
+
+#endif // __LUCI_USER_SETTINGS__
diff --git a/compiler/luci/env/src/UserSettings.cpp b/compiler/luci/env/src/UserSettings.cpp
new file mode 100644
index 000000000..27dec762d
--- /dev/null
+++ b/compiler/luci/env/src/UserSettings.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/UserSettings.h"
+
+#include <stdexcept>
+
+namespace luci
+{
+
+class UserSettingsImpl : public UserSettings
+{
+public:
+ void set(const Key key, bool value) override;
+ bool get(const Key key) const override;
+
+private:
+ bool _MuteWarnings{false};
+ bool _DisableValidation{false};
+};
+
+void UserSettingsImpl::set(const Key key, bool value)
+{
+ switch (key)
+ {
+ case Key::MuteWarnings:
+ _MuteWarnings = value;
+ break;
+ case Key::DisableValidation:
+ _DisableValidation = value;
+ break;
+ default:
+ throw std::runtime_error("Invalid key in boolean set");
+ break;
+ }
+}
+
+bool UserSettingsImpl::get(const Key key) const
+{
+ switch (key)
+ {
+ case Key::MuteWarnings:
+ return _MuteWarnings;
+ case Key::DisableValidation:
+ return _DisableValidation;
+ default:
+ throw std::runtime_error("Invalid key in boolean get");
+ break;
+ }
+ return false;
+}
+
+} // namespace luci
+
+namespace luci
+{
+
+UserSettings *UserSettings::settings()
+{
+ static UserSettingsImpl _this;
+ return &_this;
+}
+
+} // namespace luci
diff --git a/compiler/luci/env/src/UserSettings.test.cpp b/compiler/luci/env/src/UserSettings.test.cpp
new file mode 100644
index 000000000..8d9d1875b
--- /dev/null
+++ b/compiler/luci/env/src/UserSettings.test.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/UserSettings.h"
+
+#include <gtest/gtest.h>
+
+TEST(UserSettings, instance)
+{
+ auto settings = luci::UserSettings::settings();
+ ASSERT_NE(nullptr, settings);
+
+ auto s2 = luci::UserSettings::settings();
+ ASSERT_EQ(s2, settings);
+}
+
+TEST(UserSettings, MuteWarnings)
+{
+ auto settings = luci::UserSettings::settings();
+ ASSERT_NE(nullptr, settings);
+
+ settings->set(luci::UserSettings::Key::MuteWarnings, false);
+ ASSERT_FALSE(settings->get(luci::UserSettings::Key::MuteWarnings));
+
+ settings->set(luci::UserSettings::Key::MuteWarnings, true);
+ ASSERT_TRUE(settings->get(luci::UserSettings::Key::MuteWarnings));
+}
+
+TEST(UserSettings, DisableValidation)
+{
+ auto settings = luci::UserSettings::settings();
+ ASSERT_NE(nullptr, settings);
+
+ settings->set(luci::UserSettings::Key::DisableValidation, false);
+ ASSERT_FALSE(settings->get(luci::UserSettings::Key::DisableValidation));
+
+ settings->set(luci::UserSettings::Key::DisableValidation, true);
+ ASSERT_TRUE(settings->get(luci::UserSettings::Key::DisableValidation));
+}
+
+TEST(UserSettings, undefined_set_NEG)
+{
+ auto settings = luci::UserSettings::settings();
+ ASSERT_NE(nullptr, settings);
+
+ ASSERT_THROW(settings->set(luci::UserSettings::Key::Undefined, true), std::exception);
+}
+
+TEST(UserSettings, undefined_get_NEG)
+{
+ auto settings = luci::UserSettings::settings();
+ ASSERT_NE(nullptr, settings);
+
+ ASSERT_THROW(settings->get(luci::UserSettings::Key::Undefined), std::exception);
+}
diff --git a/compiler/luci/export/CMakeLists.txt b/compiler/luci/export/CMakeLists.txt
index e32eca366..fe4382ecd 100644
--- a/compiler/luci/export/CMakeLists.txt
+++ b/compiler/luci/export/CMakeLists.txt
@@ -10,6 +10,7 @@ target_link_libraries(luci_export PRIVATE luci_lang)
target_link_libraries(luci_export PRIVATE luci_service)
target_link_libraries(luci_export PRIVATE luci_pass)
target_link_libraries(luci_export PRIVATE mio_circle)
+target_link_libraries(luci_export PRIVATE luci_env)
target_link_libraries(luci_export PRIVATE luci_log)
target_link_libraries(luci_export PRIVATE luci_logex)
target_link_libraries(luci_export PRIVATE nncc_common)
diff --git a/compiler/luci/export/src/CircleExporterImpl.cpp b/compiler/luci/export/src/CircleExporterImpl.cpp
index 81109ee62..860cebf6e 100644
--- a/compiler/luci/export/src/CircleExporterImpl.cpp
+++ b/compiler/luci/export/src/CircleExporterImpl.cpp
@@ -16,6 +16,7 @@
#include "CircleExporterImpl.h"
#include "Optimize.h"
+#include "TypeBridge.h"
#include "CircleTensorExporter.h"
#include "CircleOperationExporter.h"
#include "CircleExporterUtils.h"
@@ -36,11 +37,11 @@ luci::CircleInput *input_node(loco::Graph *g, const loco::GraphInputIndex &index
{
for (uint32_t n = 0; n < g->nodes()->size(); ++n)
{
- if (auto pull = dynamic_cast<luci::CircleInput *>(g->nodes()->at(n)))
+ if (auto input = dynamic_cast<luci::CircleInput *>(g->nodes()->at(n)))
{
- if (pull->indexed() && pull->index() == index)
+ if (input->indexed() && input->index() == index)
{
- return pull;
+ return input;
}
}
}
@@ -51,11 +52,11 @@ luci::CircleOutput *output_node(loco::Graph *g, const loco::GraphOutputIndex &in
{
for (uint32_t n = 0; n < g->nodes()->size(); ++n)
{
- if (auto push = dynamic_cast<luci::CircleOutput *>(g->nodes()->at(n)))
+ if (auto output = dynamic_cast<luci::CircleOutput *>(g->nodes()->at(n)))
{
- if (push->indexed() && push->index() == index)
+ if (output->indexed() && output->index() == index)
{
- return push;
+ return output;
}
}
}
@@ -80,6 +81,13 @@ void registerGraphOutputTensors(loco::Graph *graph, luci::SubGraphContext &ctx)
assert(push != nullptr);
auto node = push->from();
assert(node != nullptr);
+
+ // Do not export CircleOutput when it's input is CircleOutputExclude
+ if (dynamic_cast<luci::CircleOutputExclude *>(push->from()) != nullptr)
+ {
+ continue;
+ }
+
ctx._outputs.push_back(luci::get_tensor_index(node));
}
}
@@ -93,8 +101,7 @@ using namespace circle;
using namespace flatbuffers;
Offset<Vector<Offset<OperatorCode>>>
-encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map<luci::OpCode, uint32_t> &opcodes,
- std::unordered_map<luci::OpCode, std::string> &custom_opcodes)
+encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map<luci::OpCode, uint32_t> &opcodes)
{
std::vector<Offset<OperatorCode>> operator_codes_vec(opcodes.size());
for (auto it : opcodes)
@@ -102,19 +109,15 @@ encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map<luci::OpCode,
uint32_t idx = it.second;
if (it.first.opcode != BuiltinOperator_CUSTOM)
{
- operator_codes_vec[idx] = CreateOperatorCode(builder, it.first.opcode);
+ operator_codes_vec[idx] = CreateOperatorCode(builder, it.first.opcode, 0, it.first.version);
}
- else // custom op
+ else
{
- auto opCode = it.first;
- auto custom_code = custom_opcodes.find(opCode);
- if (custom_code == custom_opcodes.end())
- INTERNAL_EXN("Cannot find code for customop even though opcode is BuiltinOperator_CUSTOM");
-
operator_codes_vec[idx] =
- CreateOperatorCode(builder, it.first.opcode, builder.CreateString(custom_code->second));
+ CreateOperatorCode(builder, it.first.opcode, builder.CreateString(it.first.custom_code));
}
}
+
return builder.CreateVector(operator_codes_vec);
}
@@ -136,8 +139,9 @@ CircleExporterImpl::exportSubgraph(SerializedGraphData &gd)
auto inputs = _builder.CreateVector(gd._inputs);
auto outputs = _builder.CreateVector(gd._outputs);
auto operators = _builder.CreateVector(gd._operators);
+ auto name = _builder.CreateString(gd._name);
auto df = gd._data_format;
- auto subgraph = CreateSubGraph(_builder, tensors, inputs, outputs, operators, df);
+ auto subgraph = CreateSubGraph(_builder, tensors, inputs, outputs, operators, name, df);
return subgraph;
}
@@ -146,6 +150,9 @@ void CircleExporterImpl::exportGraph(loco::Graph *graph)
// do graph optimization
optimize(graph);
+ // copy shape/dtype inference data to CircleNode
+ copy_shape_dtype(graph);
+
_builder.Clear();
SerializedModelData md;
@@ -154,6 +161,9 @@ void CircleExporterImpl::exportGraph(loco::Graph *graph)
// This version is taken from comment in fbs
constexpr uint32_t version = 0;
+ // set Subgraph name
+ gd._name = graph->name();
+
// TODO set this value properly
gd._data_format = circle::DataFormat::DataFormat_CHANNELS_LAST;
@@ -170,8 +180,7 @@ void CircleExporterImpl::exportGraph(loco::Graph *graph)
exportNodes(graph, _builder, md, gd);
// encode operator codes
- auto operator_codes =
- encodeOperatorCodes(_builder, md._operator_codes, md._custom_operator_codes);
+ auto operator_codes = encodeOperatorCodes(_builder, md._operator_codes);
// Subgraphs
Offset<SubGraph> subgraph = exportSubgraph(gd);
@@ -203,6 +212,9 @@ void CircleExporterImpl::exportModule(Module *module)
_builder.Clear();
+ // prepare model data
+ prepareModelData(_builder, md);
+
std::vector<flatbuffers::Offset<circle::SubGraph>> subgraph_vec;
for (size_t g = 0; g < module->size(); ++g)
@@ -211,8 +223,14 @@ void CircleExporterImpl::exportModule(Module *module)
optimize(graph);
+ // copy shape/dtype inference data to CircleNode
+ copy_shape_dtype(graph);
+
SerializedGraphData gd;
+ // set Subgraph name
+ gd._name = graph->name();
+
// TODO set this value properly
gd._data_format = circle::DataFormat::DataFormat_CHANNELS_LAST;
@@ -233,8 +251,7 @@ void CircleExporterImpl::exportModule(Module *module)
auto subgraphs = _builder.CreateVector(std::vector<Offset<SubGraph>>{subgraph_vec});
// encode operator codes
- auto operator_codes =
- encodeOperatorCodes(_builder, md._operator_codes, md._custom_operator_codes);
+ auto operator_codes = encodeOperatorCodes(_builder, md._operator_codes);
// Description
std::string description_str = "nnpackage";
diff --git a/compiler/luci/export/src/CircleExporterUtils.cpp b/compiler/luci/export/src/CircleExporterUtils.cpp
index 1272facb2..f097e71c5 100644
--- a/compiler/luci/export/src/CircleExporterUtils.cpp
+++ b/compiler/luci/export/src/CircleExporterUtils.cpp
@@ -70,28 +70,49 @@ circle::TensorType to_circle_tensortype(loco::DataType type)
}
}
+circle::MirrorPadMode to_circle_mirrorpadmode(luci::MirrorPadMode mode)
+{
+ switch (mode)
+ {
+ case luci::MirrorPadMode::REFLECT:
+ return circle::MirrorPadMode::MirrorPadMode_REFLECT;
+ case luci::MirrorPadMode::SYMMETRIC:
+ return circle::MirrorPadMode::MirrorPadMode_SYMMETRIC;
+ default:
+ INTERNAL_EXN_V("trying to convert unsupported luci::MirrorPadMode", oops::to_uint32(mode));
+ }
+}
+
} // namespace luci
namespace luci
{
-uint32_t SerializedModelData::registerBuiltinOpcode(circle::BuiltinOperator builtin_code)
+uint32_t SerializedModelData::registerBuiltinOpcode(circle::BuiltinOperator builtin_code,
+ const int32_t op_version)
{
- auto it = _operator_codes.find(OpCode{builtin_code});
+ assert(op_version > 0);
+
+ auto it = _operator_codes.find(OpCode{builtin_code, "", op_version});
if (it != _operator_codes.end())
{
return it->second;
}
auto idx = static_cast<uint32_t>(_operator_codes.size());
- _operator_codes.emplace(OpCode{builtin_code}, idx);
+ _operator_codes.emplace(OpCode{builtin_code, "", op_version}, idx);
return idx;
}
-uint32_t SerializedModelData::registerCustomOpcode(const std::string &custom_op)
+uint32_t SerializedModelData::registerCustomOpcode(const std::string &custom_code)
{
- circle::BuiltinOperator custom_code = circle::BuiltinOperator_CUSTOM;
- auto idx = registerBuiltinOpcode(custom_code);
- _custom_operator_codes.emplace(OpCode{custom_code}, custom_op);
+ const circle::BuiltinOperator builtin_code = circle::BuiltinOperator_CUSTOM;
+ auto it = _operator_codes.find(OpCode{builtin_code, custom_code});
+ if (it != _operator_codes.end())
+ {
+ return it->second;
+ }
+ auto idx = static_cast<uint32_t>(_operator_codes.size());
+ _operator_codes.emplace(OpCode{builtin_code, custom_code}, idx);
return idx;
}
diff --git a/compiler/luci/export/src/CircleExporterUtils.h b/compiler/luci/export/src/CircleExporterUtils.h
index 6b970fd3c..f9ce6d2bf 100644
--- a/compiler/luci/export/src/CircleExporterUtils.h
+++ b/compiler/luci/export/src/CircleExporterUtils.h
@@ -31,6 +31,7 @@ namespace luci
circle::ActivationFunctionType to_circle_actfunc(luci::FusedActFunc func);
circle::TensorType to_circle_tensortype(loco::DataType type);
+circle::MirrorPadMode to_circle_mirrorpadmode(luci::MirrorPadMode mode);
} // namespace luci
diff --git a/compiler/luci/export/src/CircleOperationExporter.cpp b/compiler/luci/export/src/CircleOperationExporter.cpp
index ad9c7fd4b..3c01b676f 100644
--- a/compiler/luci/export/src/CircleOperationExporter.cpp
+++ b/compiler/luci/export/src/CircleOperationExporter.cpp
@@ -22,6 +22,8 @@
#include <luci/IR/CircleNodes.h>
#include <luci/IR/CircleNodeVisitor.h>
#include <luci/Service/CircleShapeInference.h>
+#include <luci/UserSettings.h>
+#include <luci/Log.h>
#include <loco/IR/CanonicalNodeVisitor.h>
#include <oops/InternalExn.h>
@@ -49,42 +51,125 @@ public:
public:
void visit(luci::CircleAbs *) final;
void visit(luci::CircleAdd *) final;
+ void visit(luci::CircleAddN *) final;
void visit(luci::CircleArgMax *) final;
+ void visit(luci::CircleArgMin *) final;
void visit(luci::CircleAveragePool2D *) final;
+ void visit(luci::CircleBatchMatMul *) final;
void visit(luci::CircleBatchToSpaceND *) final;
+ void visit(luci::CircleCast *) final;
+ void visit(luci::CircleCeil *) final;
void visit(luci::CircleConcatenation *) final;
void visit(luci::CircleConst *) final{/* skip, everything is done in exportOpDefinedTensors */};
void visit(luci::CircleConv2D *) final;
void visit(luci::CircleCos *) final;
+ void visit(luci::CircleCustom *) final;
+ void visit(luci::CircleDepthToSpace *) final;
void visit(luci::CircleDepthwiseConv2D *) final;
void visit(luci::CircleDiv *) final;
- void visit(luci::CircleExp *) final;
+ void visit(luci::CircleElu *) final;
void visit(luci::CircleEqual *) final;
+ void visit(luci::CircleExp *) final;
+ void visit(luci::CircleExpandDims *) final;
+ void visit(luci::CircleFill *) final;
+ void visit(luci::CircleFloor *) final;
+ void visit(luci::CircleFloorDiv *) final;
+ void visit(luci::CircleFloorMod *) final;
void visit(luci::CircleFullyConnected *) final;
+ void visit(luci::CircleGather *) final;
+ void visit(luci::CircleGatherNd *) final;
+ void visit(luci::CircleGreater *) final;
+ void visit(luci::CircleGreaterEqual *) final;
+ void visit(luci::CircleIf *) final;
+ void visit(luci::CircleL2Normalize *) final;
+ void visit(luci::CircleL2Pool2D *) final;
+ void visit(luci::CircleLeakyRelu *) final;
+ void visit(luci::CircleLess *) final;
+ void visit(luci::CircleLessEqual *) final;
+ void visit(luci::CircleLocalResponseNormalization *) final;
+ void visit(luci::CircleLog *) final;
+ void visit(luci::CircleLogicalAnd *) final;
void visit(luci::CircleLogicalNot *) final;
void visit(luci::CircleLogicalOr *) final;
+ void visit(luci::CircleLogistic *) final;
+ void visit(luci::CircleLogSoftmax *) final;
+ void visit(luci::CircleMatrixDiag *) final;
+ void visit(luci::CircleMatrixSetDiag *) final;
void visit(luci::CircleMaximum *) final;
void visit(luci::CircleMaxPool2D *) final;
void visit(luci::CircleMean *) final;
+ void visit(luci::CircleMinimum *) final;
+ void visit(luci::CircleMirrorPad *) final;
void visit(luci::CircleMul *) final;
+ void visit(luci::CircleNeg *) final;
+ void visit(luci::CircleNotEqual *) final;
+ void visit(luci::CircleOneHot *) final;
void visit(luci::CirclePack *) final;
void visit(luci::CirclePad *) final;
+ void visit(luci::CirclePow *) final;
+ void visit(luci::CirclePRelu *) final;
+ void visit(luci::CircleRange *) final;
+ void visit(luci::CircleRank *) final;
+ void visit(luci::CircleReduceAny *) final;
+ void visit(luci::CircleReduceMax *) final;
+ void visit(luci::CircleReduceMin *) final;
+ void visit(luci::CircleReduceProd *) final;
void visit(luci::CircleRelu *) final;
void visit(luci::CircleRelu6 *) final;
+ void visit(luci::CircleReluN1To1 *) final;
void visit(luci::CircleReshape *) final;
+ void visit(luci::CircleResizeBilinear *) final;
+ void visit(luci::CircleResizeNearestNeighbor *) final;
+ void visit(luci::CircleReverseSequence *) final;
+ void visit(luci::CircleReverseV2 *) final;
+ void visit(luci::CircleRound *) final;
void visit(luci::CircleRsqrt *) final;
+ void visit(luci::CircleScatterNd *) final;
+ void visit(luci::CircleSegmentSum *) final;
+ void visit(luci::CircleSelect *) final;
+ void visit(luci::CircleSelectV2 *) final;
+ void visit(luci::CircleShape *) final;
+ void visit(luci::CircleSin *) final;
+ void visit(luci::CircleSlice *) final;
void visit(luci::CircleSoftmax *) final;
+ void visit(luci::CircleSpaceToBatchND *) final;
+ void visit(luci::CircleSpaceToDepth *) final;
+ void visit(luci::CircleSparseToDense *) final;
+ void visit(luci::CircleSplit *) final;
+ void visit(luci::CircleSplitV *) final;
void visit(luci::CircleSqrt *) final;
+ void visit(luci::CircleSquare *) final;
void visit(luci::CircleSquaredDifference *) final;
+ void visit(luci::CircleSqueeze *) final;
+ void visit(luci::CircleStridedSlice *) final;
void visit(luci::CircleSub *) final;
- // TODO CircleTanh
+ void visit(luci::CircleSum *) final;
+ void visit(luci::CircleTanh *) final;
+ void visit(luci::CircleTile *) final;
+ void visit(luci::CircleTopKV2 *) final;
void visit(luci::CircleTranspose *) final;
void visit(luci::CircleTransposeConv *) final;
+ void visit(luci::CircleUnpack *) final;
+ void visit(luci::CircleWhere *) final;
+ void visit(luci::CircleWhile *) final;
+ void visit(luci::CircleZerosLike *) final;
// Circle only
+ void visit(luci::CircleBCQFullyConnected *) final;
+ void visit(luci::CircleBCQGather *) final;
void visit(luci::CircleInstanceNorm *) final;
// Virtual
void visit(luci::CircleInput *) final {}
void visit(luci::CircleOutput *) final {}
+ void visit(luci::CircleOutputDummy *) final {}
+ void visit(luci::CircleOutputExclude *) final {}
+ // Virtual for multiple-outputs
+ void visit(luci::CircleCustomOut *) final {}
+ void visit(luci::CircleIfOut *) final {}
+ void visit(luci::CircleSplitOut *) final {}
+ void visit(luci::CircleSplitVOut *) final {}
+ void visit(luci::CircleTopKV2Out *) final {}
+ void visit(luci::CircleUnpackOut *) final {}
+ void visit(luci::CircleWhileOut *) final {}
private:
/**
@@ -95,6 +180,17 @@ private:
template <class CirclePool2D>
void export_pool_2d(CirclePool2D *node, circle::BuiltinOperator builtin_op);
+ /**
+ * @brief export simple nodes
+ */
+ void export_simple(loco::Node *node, circle::BuiltinOperator bop, circle::BuiltinOptions bot,
+ flatbuffers::Offset<void> options_offset);
+
+ /**
+ * @brief export simple nodes having void options
+ */
+ void export_simple(loco::Node *node, circle::BuiltinOperator bop);
+
private:
FlatBufferBuilder &builder;
SerializedModelData &md;
@@ -105,11 +201,12 @@ template <class CirclePool2D>
void OperationExporter::export_pool_2d(CirclePool2D *node, circle::BuiltinOperator builtin_op)
{
LUCI_ASSERT(builtin_op == circle::BuiltinOperator_MAX_POOL_2D ||
+ builtin_op == circle::BuiltinOperator_L2_POOL_2D ||
builtin_op == circle::BuiltinOperator_AVERAGE_POOL_2D,
- "Should be MaxPool or AvgPool");
+ "Should be L2Pool, MaxPool or AvgPool");
LUCI_ASSERT(node->padding() != luci::Padding::UNDEFINED, "Padding is not set");
- uint32_t op_idx = md.registerBuiltinOpcode(builtin_op);
+ uint32_t op_idx = md.registerBuiltinOpcode(builtin_op, node->op_version());
std::vector<int32_t> inputs_vec{get_tensor_index(node->value())};
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
@@ -125,54 +222,122 @@ void OperationExporter::export_pool_2d(CirclePool2D *node, circle::BuiltinOperat
gd._operators.push_back(op_offset);
}
-void OperationExporter::visit(luci::CircleAbs *node)
+void OperationExporter::export_simple(loco::Node *node, circle::BuiltinOperator bop,
+ circle::BuiltinOptions bot,
+ flatbuffers::Offset<void> options_offset)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_ABS);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
+ uint32_t op_idx =
+ md.registerBuiltinOpcode(bop, loco::must_cast<luci::CircleNode *>(node)->op_version());
+ std::vector<int32_t> inputs_vec;
+ std::vector<int32_t> outputs_vec{get_tensor_index(node)};
+ for (uint32_t i = 0; i < node->arity(); ++i)
+ inputs_vec.push_back(get_tensor_index(node->arg(i)));
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs, bot, options_offset);
+ gd._operators.push_back(op_offset);
+}
+
+void OperationExporter::export_simple(loco::Node *node, circle::BuiltinOperator bop)
+{
+ uint32_t op_idx =
+ md.registerBuiltinOpcode(bop, loco::must_cast<luci::CircleNode *>(node)->op_version());
+ std::vector<int32_t> inputs_vec;
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+ for (uint32_t i = 0; i < node->arity(); ++i)
+ inputs_vec.push_back(get_tensor_index(node->arg(i)));
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateAbsOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_AbsOptions, options.Union());
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleAbs *node)
+{
+ export_simple(node, circle::BuiltinOperator_ABS, circle::BuiltinOptions_AbsOptions,
+ CreateAbsOptions(builder).Union());
+}
+
void OperationExporter::visit(luci::CircleAdd *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_ADD);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
+ export_simple(
+ node, circle::BuiltinOperator_ADD, circle::BuiltinOptions_AddOptions,
+ CreateAddOptions(builder, to_circle_actfunc(node->fusedActivationFunction())).Union());
+}
+
+void OperationExporter::visit(luci::CircleAddN *node)
+{
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_ADD_N, node->op_version());
+ std::vector<int32_t> inputs_vec;
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+
+ for (uint32_t i = 0; i < node->arity(); ++i)
+ inputs_vec.push_back(get_tensor_index(node->inputs(i)));
+
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateAddOptions(builder, to_circle_actfunc(node->fusedActivationFunction()));
+ auto options = CreateAddNOptions(builder);
auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_AddOptions, options.Union());
+ circle::BuiltinOptions_AddNOptions, options.Union());
gd._operators.push_back(op_offset);
}
void OperationExporter::visit(luci::CircleArgMax *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_ARG_MAX);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
- get_tensor_index(node->dimension())};
+ export_simple(node, circle::BuiltinOperator_ARG_MAX, circle::BuiltinOptions_ArgMaxOptions,
+ CreateArgMaxOptions(builder, to_circle_tensortype(node->output_type())).Union());
+}
+
+void OperationExporter::visit(luci::CircleArgMin *node)
+{
+ export_simple(node, circle::BuiltinOperator_ARG_MIN, circle::BuiltinOptions_ArgMinOptions,
+ CreateArgMinOptions(builder, to_circle_tensortype(node->output_type())).Union());
+}
+
+void OperationExporter::visit(luci::CircleAveragePool2D *node)
+{
+ export_pool_2d<luci::CircleAveragePool2D>(node, circle::BuiltinOperator_AVERAGE_POOL_2D);
+}
+
+void OperationExporter::visit(luci::CircleBatchMatMul *node)
+{
+ export_simple(node, circle::BuiltinOperator_BATCH_MATMUL,
+ circle::BuiltinOptions_BatchMatMulOptions,
+ CreateBatchMatMulOptions(builder, node->adj_x(), node->adj_y()).Union());
+}
+
+void OperationExporter::visit(luci::CircleCast *node)
+{
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_CAST, node->op_version());
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateArgMaxOptions(builder, to_circle_tensortype(node->output_type()));
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_ArgMaxOptions, options.Union());
+
+ flatbuffers::Offset<Operator> op_offset;
+ if (node->out_data_type() != loco::DataType::Unknown)
+ {
+ auto options = CreateCastOptions(builder, to_circle_tensortype(node->in_data_type()),
+ to_circle_tensortype(node->out_data_type()));
+ op_offset = CreateOperator(builder, op_idx, inputs, outputs, circle::BuiltinOptions_CastOptions,
+ options.Union());
+ }
+ else
+ {
+ op_offset = CreateOperator(builder, op_idx, inputs, outputs);
+ }
gd._operators.push_back(op_offset);
}
-void OperationExporter::visit(luci::CircleAveragePool2D *node)
+void OperationExporter::visit(luci::CircleCeil *node)
{
- export_pool_2d<luci::CircleAveragePool2D>(node, circle::BuiltinOperator_AVERAGE_POOL_2D);
+ export_simple(node, circle::BuiltinOperator_CEIL);
}
void OperationExporter::visit(luci::CircleConcatenation *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_CONCATENATION);
+ uint32_t op_idx =
+ md.registerBuiltinOpcode(circle::BuiltinOperator_CONCATENATION, node->op_version());
std::vector<int32_t> inputs_vec;
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
@@ -190,169 +355,304 @@ void OperationExporter::visit(luci::CircleConcatenation *node)
void OperationExporter::visit(luci::CircleBatchToSpaceND *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_BATCH_TO_SPACE_ND);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
- get_tensor_index(node->block_shape()),
- get_tensor_index(node->crops())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
-
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateBatchToSpaceNDOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_BatchToSpaceNDOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_BATCH_TO_SPACE_ND,
+ circle::BuiltinOptions_BatchToSpaceNDOptions,
+ CreateBatchToSpaceNDOptions(builder).Union());
}
void OperationExporter::visit(luci::CircleConv2D *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_CONV_2D);
-
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()), get_tensor_index(node->filter()),
- get_tensor_index(node->bias())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- circle::Padding padding = getOpPadding(node->padding());
- auto options = CreateConv2DOptions(builder, padding, node->stride()->w(), node->stride()->h(),
- to_circle_actfunc(node->fusedActivationFunction()));
-
- // Make CONV_2D operator
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_Conv2DOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_CONV_2D, circle::BuiltinOptions_Conv2DOptions,
+ CreateConv2DOptions(builder, getOpPadding(node->padding()), node->stride()->w(),
+ node->stride()->h(),
+ to_circle_actfunc(node->fusedActivationFunction()),
+ node->dilation()->w(), node->dilation()->h())
+ .Union());
}
void OperationExporter::visit(luci::CircleCos *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_COS);
+ export_simple(node, circle::BuiltinOperator_COS, circle::BuiltinOptions_CosOptions,
+ CreateCosOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleCustom *node)
+{
+ auto custom_outputs = loco::succs(node);
+
+ uint32_t op_idx = md.registerCustomOpcode(node->custom_code());
+ std::vector<int32_t> inputs_vec;
+ std::vector<int32_t> outputs_vec;
+
+ for (uint32_t index = 0; index < node->numInputs(); index++)
+ {
+ inputs_vec.push_back(get_tensor_index(node->inputs(index)));
+ }
+ for (uint32_t index = 0; index < custom_outputs.size(); index++)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : custom_outputs)
+ {
+ auto custom_out = loco::must_cast<luci::CircleCustomOut *>(out);
+ if (custom_out->index() == static_cast<int32_t>(index))
+ {
+ outputs_vec.push_back(get_tensor_index(custom_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid Custom output");
+ }
+ }
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateCosOptions(builder);
-
- // Make COS operator
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_CosOptions, options.Union());
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> circle_custom_options;
+ std::vector<uint8_t> custom_options_vec{node->custom_options().begin(),
+ node->custom_options().end()};
+ circle_custom_options = builder.CreateVector(custom_options_vec);
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs, circle::BuiltinOptions_NONE,
+ flatbuffers::Offset<void>(), circle_custom_options);
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleDepthToSpace *node)
+{
+ export_simple(node, circle::BuiltinOperator_DEPTH_TO_SPACE,
+ circle::BuiltinOptions_DepthToSpaceOptions,
+ CreateDepthToSpaceOptions(builder, node->block_size()).Union());
+}
+
void OperationExporter::visit(luci::CircleDepthwiseConv2D *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_DEPTHWISE_CONV_2D);
+ export_simple(node, circle::BuiltinOperator_DEPTHWISE_CONV_2D,
+ circle::BuiltinOptions_DepthwiseConv2DOptions,
+ CreateDepthwiseConv2DOptions(builder, getOpPadding(node->padding()),
+ node->stride()->w(), node->stride()->h(),
+ node->depthMultiplier(),
+ to_circle_actfunc(node->fusedActivationFunction()),
+ node->dilation()->w(), node->dilation()->h())
+ .Union());
+}
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()), get_tensor_index(node->filter()),
- get_tensor_index(node->bias())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- circle::Padding padding = getOpPadding(node->padding());
- auto options = CreateDepthwiseConv2DOptions(builder, padding, node->stride()->w(),
- node->stride()->h(), node->depthMultiplier(),
- to_circle_actfunc(node->fusedActivationFunction()));
+void OperationExporter::visit(luci::CircleDiv *node)
+{
+ export_simple(
+ node, circle::BuiltinOperator_DIV, circle::BuiltinOptions_DivOptions,
+ CreateDivOptions(builder, to_circle_actfunc(node->fusedActivationFunction())).Union());
+}
- // Make DEPTHWISE_CONV_2D operator
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_DepthwiseConv2DOptions, options.Union());
- gd._operators.push_back(op_offset);
+void OperationExporter::visit(luci::CircleElu *node)
+{
+ export_simple(node, circle::BuiltinOperator_ELU);
}
-void OperationExporter::visit(luci::CircleDiv *node)
+void OperationExporter::visit(luci::CircleEqual *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_DIV);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateDivOptions(builder, to_circle_actfunc(node->fusedActivationFunction()));
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_DivOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_EQUAL, circle::BuiltinOptions_EqualOptions,
+ CreateEqualOptions(builder).Union());
}
void OperationExporter::visit(luci::CircleExp *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_EXP);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateAbsOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_ExpOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_EXP, circle::BuiltinOptions_ExpOptions,
+ CreateExpOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleExpandDims *node)
+{
+ export_simple(node, circle::BuiltinOperator_EXPAND_DIMS, circle::BuiltinOptions_ExpandDimsOptions,
+ CreateExpandDimsOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleFill *node)
+{
+ export_simple(node, circle::BuiltinOperator_FILL, circle::BuiltinOptions_FillOptions,
+ CreateFillOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleFloor *node)
+{
+ export_simple(node, circle::BuiltinOperator_FLOOR);
+}
+
+void OperationExporter::visit(luci::CircleFloorDiv *node)
+{
+ export_simple(node, circle::BuiltinOperator_FLOOR_DIV, circle::BuiltinOptions_FloorDivOptions,
+ CreateFloorDivOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleFloorMod *node)
+{
+ export_simple(node, circle::BuiltinOperator_FLOOR_MOD, circle::BuiltinOptions_FloorModOptions,
+ CreateFloorModOptions(builder).Union());
}
void OperationExporter::visit(luci::CircleFullyConnected *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_FULLY_CONNECTED);
+ export_simple(
+ node, circle::BuiltinOperator_FULLY_CONNECTED, circle::BuiltinOptions_FullyConnectedOptions,
+ CreateFullyConnectedOptions(builder, to_circle_actfunc(node->fusedActivationFunction()))
+ .Union());
+}
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
- get_tensor_index(node->weights()),
- get_tensor_index(node->bias())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options =
- CreateFullyConnectedOptions(builder, to_circle_actfunc(node->fusedActivationFunction()));
+void OperationExporter::visit(luci::CircleGather *node)
+{
+ export_simple(node, circle::BuiltinOperator_GATHER, circle::BuiltinOptions_GatherOptions,
+ CreateGatherOptions(builder, node->axis()).Union());
+}
- // Make FULLY_CONNECTED operator
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_FullyConnectedOptions, options.Union());
- gd._operators.push_back(op_offset);
+void OperationExporter::visit(luci::CircleGatherNd *node)
+{
+ export_simple(node, circle::BuiltinOperator_GATHER_ND, circle::BuiltinOptions_GatherNdOptions,
+ CreateGatherNdOptions(builder).Union());
}
-void OperationExporter::visit(luci::CircleLogicalNot *node)
+void OperationExporter::visit(luci::CircleGreater *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_LOGICAL_NOT);
+ export_simple(node, circle::BuiltinOperator_GREATER, circle::BuiltinOptions_GreaterOptions,
+ CreateGreaterOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleGreaterEqual *node)
+{
+ export_simple(node, circle::BuiltinOperator_GREATER_EQUAL,
+ circle::BuiltinOptions_GreaterEqualOptions,
+ CreateGreaterEqualOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleIf *node)
+{
+ auto if_outs = loco::succs(node);
+ assert(if_outs.size() == node->output_count());
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_IF, node->op_version());
+ std::vector<int32_t> inputs_vec;
+ std::vector<int32_t> outputs_vec;
+
+ inputs_vec.push_back(get_tensor_index(node->cond()));
+ for (uint32_t idx = 0; idx < node->input_count(); ++idx)
+ inputs_vec.push_back(get_tensor_index(node->input(idx)));
+
+ for (uint32_t idx = 0; idx < node->output_count(); ++idx)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : if_outs)
+ {
+ auto if_out = loco::must_cast<luci::CircleIfOut *>(out);
+ if (if_out->index() == static_cast<int32_t>(idx))
+ {
+ outputs_vec.push_back(get_tensor_index(if_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid CircleIf output");
+ }
+ }
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateLogicalNotOptions(builder);
-
- // Make LOGICAL_NOT operator
+ auto options = CreateIfOptions(builder, node->then_branch(), node->else_branch());
auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_LogicalNotOptions, options.Union());
+ circle::BuiltinOptions_IfOptions, options.Union());
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleL2Normalize *node)
+{
+ export_simple(
+ node, circle::BuiltinOperator_L2_NORMALIZATION, circle::BuiltinOptions_L2NormOptions,
+ CreateL2NormOptions(builder, to_circle_actfunc(node->fusedActivationFunction())).Union());
+}
+
+void OperationExporter::visit(luci::CircleL2Pool2D *node)
+{
+ export_pool_2d<luci::CircleL2Pool2D>(node, circle::BuiltinOperator_L2_POOL_2D);
+}
+
+void OperationExporter::visit(luci::CircleLeakyRelu *node)
+{
+ export_simple(node, circle::BuiltinOperator_LEAKY_RELU, circle::BuiltinOptions_LeakyReluOptions,
+ CreateLeakyReluOptions(builder, node->alpha()).Union());
+}
+
+void OperationExporter::visit(luci::CircleLess *node)
+{
+ export_simple(node, circle::BuiltinOperator_LESS, circle::BuiltinOptions_LessOptions,
+ CreateLessOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleLessEqual *node)
+{
+ export_simple(node, circle::BuiltinOperator_LESS_EQUAL, circle::BuiltinOptions_LessEqualOptions,
+ CreateLessEqualOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleLocalResponseNormalization *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+ circle::BuiltinOptions_LocalResponseNormalizationOptions,
+ CreateLocalResponseNormalizationOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleLog *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOG);
+}
+
+void OperationExporter::visit(luci::CircleLogicalAnd *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOGICAL_AND, circle::BuiltinOptions_LogicalAndOptions,
+ CreateLogicalAndOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleLogicalNot *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOGICAL_NOT, circle::BuiltinOptions_LogicalNotOptions,
+ CreateLogicalNotOptions(builder).Union());
+}
+
void OperationExporter::visit(luci::CircleLogicalOr *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_LOGICAL_OR);
+ export_simple(node, circle::BuiltinOperator_LOGICAL_OR, circle::BuiltinOptions_LogicalOrOptions,
+ CreateLogicalOrOptions(builder).Union());
+}
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateLogicalOrOptions(builder);
+void OperationExporter::visit(luci::CircleLogistic *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOGISTIC);
+}
- // Make LOGICAL_OR operator
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_LogicalOrOptions, options.Union());
- gd._operators.push_back(op_offset);
+void OperationExporter::visit(luci::CircleLogSoftmax *node)
+{
+ export_simple(node, circle::BuiltinOperator_LOG_SOFTMAX, circle::BuiltinOptions_LogSoftmaxOptions,
+ CreateLogSoftmaxOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleMatrixDiag *node)
+{
+ export_simple(node, circle::BuiltinOperator_MATRIX_DIAG, circle::BuiltinOptions_MatrixDiagOptions,
+ CreateMatrixDiagOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleMatrixSetDiag *node)
+{
+ export_simple(node, circle::BuiltinOperator_MATRIX_SET_DIAG,
+ circle::BuiltinOptions_MatrixSetDiagOptions,
+ CreateMatrixSetDiagOptions(builder).Union());
}
void OperationExporter::visit(luci::CircleMaximum *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_MAXIMUM);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateMaximumMinimumOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_MaximumMinimumOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_MAXIMUM, circle::BuiltinOptions_MaximumMinimumOptions,
+ CreateMaximumMinimumOptions(builder).Union());
}
void OperationExporter::visit(luci::CircleMaxPool2D *node)
@@ -362,259 +662,568 @@ void OperationExporter::visit(luci::CircleMaxPool2D *node)
void OperationExporter::visit(luci::CircleMean *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_MEAN);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
- get_tensor_index(node->reduction_indices())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateReducerOptions(builder, node->keep_dims());
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_ReducerOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_MEAN, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
+}
+
+void OperationExporter::visit(luci::CircleMinimum *node)
+{
+ export_simple(node, circle::BuiltinOperator_MINIMUM, circle::BuiltinOptions_MaximumMinimumOptions,
+ CreateMaximumMinimumOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleMirrorPad *node)
+{
+ export_simple(node, circle::BuiltinOperator_MIRROR_PAD, circle::BuiltinOptions_MirrorPadOptions,
+ CreateMirrorPadOptions(builder, to_circle_mirrorpadmode(node->mode())).Union());
}
void OperationExporter::visit(luci::CircleMul *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_MUL);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateMulOptions(builder, to_circle_actfunc(node->fusedActivationFunction()));
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_MulOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(
+ node, circle::BuiltinOperator_MUL, circle::BuiltinOptions_MulOptions,
+ CreateMulOptions(builder, to_circle_actfunc(node->fusedActivationFunction())).Union());
}
-void OperationExporter::visit(luci::CirclePack *node)
+void OperationExporter::visit(luci::CircleNeg *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_PACK);
- std::vector<int32_t> inputs_vec;
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+ export_simple(node, circle::BuiltinOperator_NEG, circle::BuiltinOptions_NegOptions,
+ CreateNegOptions(builder).Union());
+}
- for (uint32_t i = 0; i < node->values_count(); ++i)
- inputs_vec.push_back(get_tensor_index(node->values(i)));
+void OperationExporter::visit(luci::CircleNotEqual *node)
+{
+ export_simple(node, circle::BuiltinOperator_NOT_EQUAL, circle::BuiltinOptions_NotEqualOptions,
+ CreateNotEqualOptions(builder).Union());
+}
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreatePackOptions(builder, node->values_count(), node->axis());
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_PackOptions, options.Union());
- gd._operators.push_back(op_offset);
+void OperationExporter::visit(luci::CircleOneHot *node)
+{
+ export_simple(node, circle::BuiltinOperator_ONE_HOT, circle::BuiltinOptions_OneHotOptions,
+ CreateOneHotOptions(builder, node->axis()).Union());
+}
+
+void OperationExporter::visit(luci::CirclePack *node)
+{
+ export_simple(node, circle::BuiltinOperator_PACK, circle::BuiltinOptions_PackOptions,
+ CreatePackOptions(builder, node->values_count(), node->axis()).Union());
}
void OperationExporter::visit(luci::CirclePad *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_PAD);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
- get_tensor_index(node->paddings())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreatePadOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_PadOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_PAD, circle::BuiltinOptions_PadOptions,
+ CreatePadOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CirclePow *node)
+{
+ export_simple(node, circle::BuiltinOperator_POW, circle::BuiltinOptions_PowOptions,
+ CreatePowOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CirclePRelu *node)
+{
+ export_simple(node, circle::BuiltinOperator_PRELU);
+}
+
+void OperationExporter::visit(luci::CircleRange *node)
+{
+ export_simple(node, circle::BuiltinOperator_RANGE, circle::BuiltinOptions_RangeOptions,
+ CreateRangeOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleRank *node)
+{
+ export_simple(node, circle::BuiltinOperator_RANK, circle::BuiltinOptions_RankOptions,
+ CreateRankOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleReduceAny *node)
+{
+ export_simple(node, circle::BuiltinOperator_REDUCE_ANY, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
+}
+
+void OperationExporter::visit(luci::CircleReduceMax *node)
+{
+ export_simple(node, circle::BuiltinOperator_REDUCE_MAX, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
+}
+
+void OperationExporter::visit(luci::CircleReduceMin *node)
+{
+ export_simple(node, circle::BuiltinOperator_REDUCE_MIN, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
+}
+
+void OperationExporter::visit(luci::CircleReduceProd *node)
+{
+ export_simple(node, circle::BuiltinOperator_REDUCE_PROD, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
}
void OperationExporter::visit(luci::CircleRelu *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_RELU);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->features())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_RELU);
}
void OperationExporter::visit(luci::CircleRelu6 *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_RELU6);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->features())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_RELU6);
}
-void OperationExporter::visit(luci::CircleReshape *node)
+void OperationExporter::visit(luci::CircleReluN1To1 *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_RESHAPE);
-
- // Create inputs and outputs.
- std::vector<int32_t> inputs_vec{get_tensor_index(node->tensor()),
- get_tensor_index(node->shape())};
- std::vector<int32_t> outputs_vec{get_tensor_index(node)};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
+ export_simple(node, circle::BuiltinOperator_RELU_N1_TO_1);
+}
- // Create options.
+void OperationExporter::visit(luci::CircleReshape *node)
+{
auto new_shape = builder.CreateVector<int32_t>(
node->newShape()->rank(), [node](size_t i) { return node->newShape()->dim(i); });
- auto options = CreateReshapeOptions(builder, new_shape);
- // Create the operator.
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_ReshapeOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_RESHAPE, circle::BuiltinOptions_ReshapeOptions,
+ CreateReshapeOptions(builder, new_shape).Union());
}
-void OperationExporter::visit(luci::CircleRsqrt *node)
+void OperationExporter::visit(luci::CircleResizeBilinear *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_RSQRT);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
+ export_simple(
+ node, circle::BuiltinOperator_RESIZE_BILINEAR, circle::BuiltinOptions_ResizeBilinearOptions,
+ CreateResizeBilinearOptions(builder, node->align_corners(), node->half_pixel_centers())
+ .Union());
+}
+
+void OperationExporter::visit(luci::CircleResizeNearestNeighbor *node)
+{
+ export_simple(node, circle::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+ circle::BuiltinOptions_ResizeNearestNeighborOptions,
+ CreateResizeNearestNeighborOptions(builder, node->align_corners()).Union());
+}
+
+void OperationExporter::visit(luci::CircleReverseSequence *node)
+{
+ export_simple(
+ node, circle::BuiltinOperator_REVERSE_SEQUENCE, circle::BuiltinOptions_ReverseSequenceOptions,
+ CreateReverseSequenceOptions(builder, node->seq_axis(), node->batch_axis()).Union());
+}
+
+void OperationExporter::visit(luci::CircleReverseV2 *node)
+{
+ uint32_t op_idx =
+ md.registerBuiltinOpcode(circle::BuiltinOperator_REVERSE_V2, node->op_version());
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->tensor()), get_tensor_index(node->axis())};
std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
+ auto options = CreateReverseV2Options(builder);
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ circle::BuiltinOptions_ReverseSequenceOptions, options.Union());
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleRound *node)
+{
+ export_simple(node, circle::BuiltinOperator_ROUND);
+}
+
+void OperationExporter::visit(luci::CircleRsqrt *node)
+{
+ export_simple(node, circle::BuiltinOperator_RSQRT);
+}
+
+void OperationExporter::visit(luci::CircleScatterNd *node)
+{
+ export_simple(node, circle::BuiltinOperator_SCATTER_ND, circle::BuiltinOptions_ScatterNdOptions,
+ CreateScatterNdOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSegmentSum *node)
+{
+ export_simple(node, circle::BuiltinOperator_SEGMENT_SUM, circle::BuiltinOptions_SegmentSumOptions,
+ CreateSegmentSumOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSelect *node)
+{
+ export_simple(node, circle::BuiltinOperator_SELECT, circle::BuiltinOptions_SelectOptions,
+ CreateSelectOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSelectV2 *node)
+{
+ export_simple(node, circle::BuiltinOperator_SELECT_V2, circle::BuiltinOptions_SelectV2Options,
+ CreateSelectV2Options(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleShape *node)
+{
+ export_simple(node, circle::BuiltinOperator_SHAPE, circle::BuiltinOptions_ShapeOptions,
+ CreateShapeOptions(builder, to_circle_tensortype(node->out_type())).Union());
+}
+
+void OperationExporter::visit(luci::CircleSin *node)
+{
+ export_simple(node, circle::BuiltinOperator_SIN);
+}
+
+void OperationExporter::visit(luci::CircleSlice *node)
+{
+ export_simple(node, circle::BuiltinOperator_SLICE, circle::BuiltinOptions_SliceOptions,
+ CreateSliceOptions(builder).Union());
+}
+
void OperationExporter::visit(luci::CircleSoftmax *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SOFTMAX);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->logits())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+ export_simple(node, circle::BuiltinOperator_SOFTMAX, circle::BuiltinOptions_SoftmaxOptions,
+ CreateSoftmaxOptions(builder, node->beta()).Union());
+}
+
+void OperationExporter::visit(luci::CircleSpaceToBatchND *node)
+{
+ export_simple(node, circle::BuiltinOperator_SPACE_TO_BATCH_ND,
+ circle::BuiltinOptions_SpaceToBatchNDOptions,
+ CreateSpaceToBatchNDOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSpaceToDepth *node)
+{
+ export_simple(node, circle::BuiltinOperator_SPACE_TO_DEPTH,
+ circle::BuiltinOptions_SpaceToDepthOptions,
+ CreateSpaceToDepthOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSparseToDense *node)
+{
+ export_simple(node, circle::BuiltinOperator_SPARSE_TO_DENSE,
+ circle::BuiltinOptions_SparseToDenseOptions,
+ CreateSparseToDenseOptions(builder, node->validate_indices()).Union());
+}
+
+void OperationExporter::visit(luci::CircleSplit *node)
+{
+ auto split_outs = loco::succs(node);
+ assert(int32_t(split_outs.size()) == node->num_split());
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SPLIT, node->op_version());
+ // NOTE BuiltinOperator_SPLIT input is placed at second position
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->split_dim()),
+ get_tensor_index(node->input())};
+ std::vector<int32_t> outputs_vec;
+
+ for (int32_t index = 0; index < node->num_split(); index++)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : split_outs)
+ {
+ auto split_out = loco::must_cast<luci::CircleSplitOut *>(out);
+ if (split_out->index() == index)
+ {
+ outputs_vec.push_back(get_tensor_index(split_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid Split output");
+ }
+ }
+
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateSoftmaxOptions(builder, node->beta());
+ auto options = CreateSplitOptions(builder, node->num_split());
auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_SoftmaxOptions, options.Union());
+ circle::BuiltinOptions_SplitOptions, options.Union());
gd._operators.push_back(op_offset);
}
-void OperationExporter::visit(luci::CircleSqrt *node)
+void OperationExporter::visit(luci::CircleSplitV *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SQRT);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+ auto split_outs = loco::succs(node);
+ assert(int32_t(split_outs.size()) == node->num_split());
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SPLIT_V, node->op_version());
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->input()),
+ get_tensor_index(node->size_splits()),
+ get_tensor_index(node->split_dim())};
+ std::vector<int32_t> outputs_vec;
+
+ for (int32_t index = 0; index < node->num_split(); index++)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : split_outs)
+ {
+ auto split_out = loco::must_cast<luci::CircleSplitVOut *>(out);
+ if (split_out->index() == index)
+ {
+ outputs_vec.push_back(get_tensor_index(split_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid SplitV output");
+ }
+ }
+
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
+ auto options = CreateSplitVOptions(builder, node->num_split());
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ circle::BuiltinOptions_SplitVOptions, options.Union());
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleSqrt *node)
+{
+ export_simple(node, circle::BuiltinOperator_SQRT);
+}
+
+void OperationExporter::visit(luci::CircleSquare *node)
+{
+ export_simple(node, circle::BuiltinOperator_SQUARE, circle::BuiltinOptions_SquareOptions,
+ CreateSquareOptions(builder).Union());
+}
+
void OperationExporter::visit(luci::CircleSquaredDifference *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SQUARED_DIFFERENCE);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateSquaredDifferenceOptions(builder);
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_SquaredDifferenceOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(node, circle::BuiltinOperator_SQUARED_DIFFERENCE,
+ circle::BuiltinOptions_SquaredDifferenceOptions,
+ CreateSquaredDifferenceOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleSqueeze *node)
+{
+ auto squeeze_dims = builder.CreateVector<int32_t>(node->squeeze_dims());
+ export_simple(node, circle::BuiltinOperator_SQUEEZE, circle::BuiltinOptions_SqueezeOptions,
+ CreateSqueezeOptions(builder, squeeze_dims).Union());
+}
+
+void OperationExporter::visit(luci::CircleStridedSlice *node)
+{
+ export_simple(node, circle::BuiltinOperator_STRIDED_SLICE,
+ circle::BuiltinOptions_StridedSliceOptions,
+ CreateStridedSliceOptions(builder, node->begin_mask(), node->end_mask(),
+ node->ellipsis_mask(), node->new_axis_mask(),
+ node->shrink_axis_mask())
+ .Union());
}
void OperationExporter::visit(luci::CircleSub *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_SUB);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
- auto inputs = builder.CreateVector(inputs_vec);
- auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateSubOptions(builder, to_circle_actfunc(node->fusedActivationFunction()));
- auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_SubOptions, options.Union());
- gd._operators.push_back(op_offset);
+ export_simple(
+ node, circle::BuiltinOperator_SUB, circle::BuiltinOptions_SubOptions,
+ CreateSubOptions(builder, to_circle_actfunc(node->fusedActivationFunction())).Union());
}
-// TODO CircleTanh
+void OperationExporter::visit(luci::CircleSum *node)
+{
+ export_simple(node, circle::BuiltinOperator_SUM, circle::BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder, node->keep_dims()).Union());
+}
-void OperationExporter::visit(luci::CircleTranspose *node)
+void OperationExporter::visit(luci::CircleTanh *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_TRANSPOSE);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->arg(0)), get_tensor_index(node->arg(1))};
- std::vector<int32_t> outputs_vec{get_tensor_index(node)};
+ export_simple(node, circle::BuiltinOperator_TANH);
+}
+
+void OperationExporter::visit(luci::CircleTile *node)
+{
+ export_simple(node, circle::BuiltinOperator_TILE, circle::BuiltinOptions_TileOptions,
+ CreateTileOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleTopKV2 *node)
+{
+ auto topkv2_outs = loco::succs(node);
+ int outs_count = int32_t(topkv2_outs.size());
+ assert(outs_count == 2);
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_TOPK_V2, node->op_version());
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->input()), get_tensor_index(node->k())};
+ std::vector<int32_t> outputs_vec;
+
+ for (int32_t index = 0; index < outs_count; index++)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : topkv2_outs)
+ {
+ auto topkv2_out = loco::must_cast<luci::CircleTopKV2Out *>(out);
+ if (topkv2_out->index() == index)
+ {
+ outputs_vec.push_back(get_tensor_index(topkv2_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid TopKV2 output");
+ }
+ }
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateTransposeOptions(builder);
-
- auto op_offset =
- CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions::BuiltinOptions_TransposeOptions, options.Union());
+ auto options = CreateTopKV2Options(builder);
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ circle::BuiltinOptions_TopKV2Options, options.Union());
gd._operators.push_back(op_offset);
}
+void OperationExporter::visit(luci::CircleTranspose *node)
+{
+ export_simple(node, circle::BuiltinOperator_TRANSPOSE, circle::BuiltinOptions_TransposeOptions,
+ CreateTransposeOptions(builder).Union());
+}
+
void OperationExporter::visit(luci::CircleTransposeConv *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_TRANSPOSE_CONV);
+ export_simple(node, circle::BuiltinOperator_TRANSPOSE_CONV,
+ circle::BuiltinOptions_TransposeConvOptions,
+ CreateTransposeConvOptions(builder, getOpPadding(node->padding()),
+ node->stride()->w(), node->stride()->h())
+ .Union());
+}
+
+void OperationExporter::visit(luci::CircleUnpack *node)
+{
+ LOGGER(l);
+ auto settings = luci::UserSettings::settings();
+
+ auto unpack_outs = loco::succs(node);
+ // NOTE real models may not use all of the outputs
+ if (static_cast<int32_t>(unpack_outs.size()) != node->num())
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ WARN(l) << "Warning: export Unpack(" << node->name() << ") 'num' not same as outputs";
+ }
+ else
+ assert(false);
+ }
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_UNPACK, node->op_version());
+ std::vector<int32_t> inputs_vec{get_tensor_index(node->value())};
+ std::vector<int32_t> outputs_vec;
+
+ for (int32_t index = 0; index < node->num(); index++)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : unpack_outs)
+ {
+ auto unpack_out = loco::must_cast<luci::CircleUnpackOut *>(out);
+ if (unpack_out->index() == index)
+ {
+ outputs_vec.push_back(get_tensor_index(unpack_out));
+ found = true;
+ break;
+ }
+ }
+ // NOTE real models may not use all of the outputs
+ if (!found)
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ WARN(l) << "Warning: export Unpack(" << node->name() << ") output " << index << " not used";
+ }
+ else
+ assert(false);
+ }
+ }
- // Make input, output and options for operator
- std::vector<int32_t> inputs_vec{get_tensor_index(node->inputSizes()),
- get_tensor_index(node->filter()),
- get_tensor_index(node->outBackprop())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- circle::Padding padding = getOpPadding(node->padding());
- auto options =
- CreateTransposeConvOptions(builder, padding, node->stride()->w(), node->stride()->h());
-
- // Make TRANSPOSE_CONV operator
+ auto options = CreateUnpackOptions(builder, node->num(), node->axis());
auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_TransposeConvOptions, options.Union());
+ circle::BuiltinOptions_UnpackOptions, options.Union());
gd._operators.push_back(op_offset);
}
-void OperationExporter::visit(luci::CircleInstanceNorm *node)
+void OperationExporter::visit(luci::CircleWhere *node)
{
- uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_INSTANCE_NORM);
- std::vector<int32_t> inputs_vec{get_tensor_index(node->input()), get_tensor_index(node->gamma()),
- get_tensor_index(node->beta())};
- std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+ export_simple(node, circle::BuiltinOperator_WHERE, circle::BuiltinOptions_WhereOptions,
+ CreateWhereOptions(builder).Union());
+}
+
+void OperationExporter::visit(luci::CircleWhile *node)
+{
+ auto while_outs = loco::succs(node);
+ assert(while_outs.size() == node->output_count());
+
+ uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_WHILE, node->op_version());
+ std::vector<int32_t> inputs_vec;
+ std::vector<int32_t> outputs_vec;
+
+ for (uint32_t idx = 0; idx < node->input_count(); ++idx)
+ inputs_vec.push_back(get_tensor_index(node->input(idx)));
+
+ for (uint32_t idx = 0; idx < node->output_count(); ++idx)
+ {
+ // store in order of index
+ bool found = false;
+ for (auto out : while_outs)
+ {
+ auto while_out = loco::must_cast<luci::CircleWhileOut *>(out);
+ if (while_out->index() == static_cast<int32_t>(idx))
+ {
+ outputs_vec.push_back(get_tensor_index(while_out));
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ INTERNAL_EXN("Invalid CircleWhile output");
+ }
+ }
+
auto inputs = builder.CreateVector(inputs_vec);
auto outputs = builder.CreateVector(outputs_vec);
- auto options = CreateInstanceNormOptions(builder, node->epsilon(),
- to_circle_actfunc(node->fusedActivationFunction()));
+ auto options = CreateWhileOptions(builder, node->cond_branch(), node->body_branch());
auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
- circle::BuiltinOptions_InstanceNormOptions, options.Union());
+ circle::BuiltinOptions_WhileOptions, options.Union());
gd._operators.push_back(op_offset);
}
-void OperationExporter::visit(luci::CircleEqual *node)
+void OperationExporter::visit(luci::CircleZerosLike *node)
{
- uint32_t opcode_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_EQUAL);
- std::vector<int32_t> inputs{get_tensor_index(node->x()), get_tensor_index(node->y())};
- std::vector<int32_t> outputs{get_tensor_index(node)};
-
- auto fb_inputs = builder.CreateVector(inputs);
- auto fb_outputs = builder.CreateVector(outputs);
+ export_simple(node, circle::BuiltinOperator_ZEROS_LIKE, circle::BuiltinOptions_ZerosLikeOptions,
+ CreateZerosLikeOptions(builder).Union());
+}
- auto options = CreateEqualOptions(builder);
+void OperationExporter::visit(luci::CircleBCQFullyConnected *node)
+{
+ export_simple(node, circle::BuiltinOperator_BCQ_FULLY_CONNECTED,
+ circle::BuiltinOptions_BCQFullyConnectedOptions,
+ CreateBCQFullyConnectedOptions(builder, node->weights_hidden_size(),
+ to_circle_actfunc(node->fusedActivationFunction()))
+ .Union());
+}
- auto op_offset = CreateOperator(builder, opcode_idx, fb_inputs, fb_outputs,
- circle::BuiltinOptions_EqualOptions, options.Union());
+void OperationExporter::visit(luci::CircleBCQGather *node)
+{
+ export_simple(node, circle::BuiltinOperator_BCQ_GATHER, circle::BuiltinOptions_BCQGatherOptions,
+ CreateBCQGatherOptions(builder, node->input_hidden_size(), node->axis()).Union());
+}
- gd._operators.push_back(op_offset);
+void OperationExporter::visit(luci::CircleInstanceNorm *node)
+{
+ export_simple(node, circle::BuiltinOperator_INSTANCE_NORM,
+ circle::BuiltinOptions_InstanceNormOptions,
+ CreateInstanceNormOptions(builder, node->epsilon(),
+ to_circle_actfunc(node->fusedActivationFunction()))
+ .Union());
}
void exportNode(loco::Node *node, flatbuffers::FlatBufferBuilder &builder, SerializedModelData &md,
SerializedGraphData &gd)
{
- // TODO Use explicit tagging to prevent possible mistake
- auto isNoOp = [](loco::Node *node) {
- // If there is only one input and the TensorIndex for the input is same
- // as the TensorIndex of the output then this node is just a dummy node
- if (node->arity() == 1)
- {
- assert(node->arg(0) != nullptr);
- return get_tensor_index(node) == get_tensor_index(node->arg(0));
- }
- return false;
- };
-
- if (isNoOp(node))
- {
- // Skip if a given node is marked as NoOp (op with no effect) before
- return;
- }
-
if (auto circle_node = dynamic_cast<luci::CircleNode *>(node))
{
OperationExporter exporter{builder, md, gd};
diff --git a/compiler/luci/export/src/CircleTensorExporter.cpp b/compiler/luci/export/src/CircleTensorExporter.cpp
index ef9b9d7d9..5cad3920b 100644
--- a/compiler/luci/export/src/CircleTensorExporter.cpp
+++ b/compiler/luci/export/src/CircleTensorExporter.cpp
@@ -15,6 +15,7 @@
*/
#include "CircleTensorExporter.h"
+#include "TypeBridge.h"
#include <luci/IR/CircleNodes.h>
#include <luci/IR/CircleNodeVisitor.h>
@@ -52,6 +53,9 @@ public:
const ShapeDescription &shape(void) const { return _shape; }
void shape(const ShapeDescription &shape) { _shape = shape; }
+ luci::ShapeStatus shape_status(void) const { return _shape_status; }
+ void shape_status(luci::ShapeStatus ss) { _shape_status = ss; }
+
public:
luci::CircleConst *content(void) const { return _content; }
void content(luci::CircleConst *c) { _content = c; }
@@ -62,8 +66,9 @@ public:
private:
std::string _name;
- circle::TensorType _dtype;
- ShapeDescription _shape;
+ circle::TensorType _dtype{circle::TensorType_FLOAT32};
+ ShapeDescription _shape{};
+ luci::ShapeStatus _shape_status{luci::ShapeStatus::UNDEFINED};
luci::CircleConst *_content = nullptr;
luci::CircleQuantParam *_quantparam = nullptr;
@@ -76,30 +81,16 @@ struct NoOpDetector final : public luci::CircleNodeMutableVisitor<bool>
// Input is Virtual but does produce a Tensor
// Output is Virtual that does not produce any Tensor
bool visit(luci::CircleOutput *) final { return true; }
+ bool visit(luci::CircleOutputExclude *) final { return true; }
// Return false by default
bool visit(luci::CircleNode *) final { return false; }
};
-void allocateCircleTensor(CircleNode *node, CircleTensorContext &ctx)
+void allocateCircleTensorInfo(CircleNode *node, CircleTensorContext &ctx)
{
LOGGER(l);
- auto isNoOp = [](loco::Node *node) {
- if (auto circle_node = dynamic_cast<luci::CircleNode *>(node))
- {
- NoOpDetector d;
- return circle_node->accept(&d);
- }
- return false;
- };
-
- if (isNoOp(node))
- {
- set_tensor_index(node, get_tensor_index(node->arg(0)));
- return;
- }
-
auto tensor_index = static_cast<CircleTensorIndex>(ctx.size());
// TODO Use Graph-level metadata for Input & Output
// auto tensor_name = "t_" + std::to_string(tensor_index);
@@ -111,8 +102,10 @@ void allocateCircleTensor(CircleNode *node, CircleTensorContext &ctx)
CircleTensoInfo tensor_info;
tensor_info.name(tensor_name);
- tensor_info.dtype(TypeInference::get(node));
- tensor_info.shape(ShapeInference::get(node));
+ tensor_info.dtype(to_circle_tensortype(luci::node_dtype(node)));
+ if (node->shape_status() == ShapeStatus::VALID)
+ tensor_info.shape(to_shape_description(luci::node_shape(node)));
+ tensor_info.shape_status(node->shape_status());
tensor_info.content(dynamic_cast<luci::CircleConst *>(node));
tensor_info.quantparam(node->quantparam());
@@ -122,6 +115,108 @@ void allocateCircleTensor(CircleNode *node, CircleTensorContext &ctx)
ctx.emplace_back(tensor_info);
}
+class MultiOutputDetector final : public luci::CircleNodeMutableVisitor<bool>
+{
+public:
+ MultiOutputDetector(CircleTensorContext &ctx) : _ctx(ctx) {}
+
+private:
+ void store_outputs(luci::CircleNode *node, uint32_t count)
+ {
+ auto outs = loco::succs(node);
+ assert(outs.size() == count);
+ (void)count; // for unused variable error in release build
+ for (auto out : outs)
+ {
+ auto circle_out = loco::must_cast<luci::CircleNode *>(out);
+ allocateCircleTensorInfo(circle_out, _ctx);
+ }
+ set_tensor_index(node, -1);
+ }
+
+public:
+ bool visit(luci::CircleIfOut *) final { return true; }
+ bool visit(luci::CircleSplitOut *) final { return true; }
+ bool visit(luci::CircleSplitVOut *) final { return true; }
+ bool visit(luci::CircleTopKV2Out *) final { return true; }
+ bool visit(luci::CircleUnpackOut *) final { return true; }
+ bool visit(luci::CircleWhileOut *) final { return true; }
+
+ bool visit(luci::CircleIf *node) final
+ {
+ store_outputs(node, node->output_count());
+ return true;
+ }
+
+ bool visit(luci::CircleSplit *node) final
+ {
+ store_outputs(node, uint32_t(node->num_split()));
+ return true;
+ }
+
+ bool visit(luci::CircleSplitV *node) final
+ {
+ store_outputs(node, uint32_t(node->num_split()));
+ return true;
+ }
+
+ bool visit(luci::CircleTopKV2 *node) final
+ {
+ store_outputs(node, 2);
+ return true;
+ }
+
+ bool visit(luci::CircleUnpack *node) final
+ {
+ store_outputs(node, node->num());
+ return true;
+ }
+
+ bool visit(luci::CircleWhile *node) final
+ {
+ store_outputs(node, node->output_count());
+ return true;
+ }
+
+ // Return false by default
+ bool visit(luci::CircleNode *) final { return false; }
+
+private:
+ CircleTensorContext &_ctx;
+};
+
+void allocateCircleTensor(CircleNode *node, CircleTensorContext &ctx)
+{
+ if (node == nullptr)
+ throw std::runtime_error("allocateCIrcleTensor Failed : node is nullptr");
+
+ auto isNoOp = [](loco::Node *node) {
+ if (auto circle_node = dynamic_cast<luci::CircleNode *>(node))
+ {
+ NoOpDetector d;
+ return circle_node->accept(&d);
+ }
+ return false;
+ };
+
+ if (isNoOp(node))
+ {
+ set_tensor_index(node, -1);
+ return;
+ }
+
+ // TODO revise this when loco supports multiple outputs
+ // NOTE this will store all virtual output tensors and skip for the real node
+ if (auto circle_node = dynamic_cast<luci::CircleNode *>(node))
+ {
+ MultiOutputDetector d(ctx);
+ if (circle_node->accept(&d))
+ return;
+ }
+
+ allocateCircleTensorInfo(node, ctx);
+}
+
} // namespace
namespace
@@ -166,18 +261,22 @@ flatbuffers::Offset<circle::Buffer> encodeOpBufferByDType(FlatBufferBuilder &bui
template <>
flatbuffers::Offset<circle::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, luci::CircleConst *c)
{
- // TODO use switch
- if (c->dtype() == loco::DataType::FLOAT32)
+ switch (c->dtype())
{
- return encodeOpBufferByDType<loco::DataType::FLOAT32>(builder, c);
- }
- else if (c->dtype() == loco::DataType::S32)
- {
- return encodeOpBufferByDType<loco::DataType::S32>(builder, c);
- }
- else if (c->dtype() == loco::DataType::U8)
- {
- return encodeOpBufferByDType<loco::DataType::U8>(builder, c);
+ case loco::DataType::FLOAT32:
+ return encodeOpBufferByDType<loco::DataType::FLOAT32>(builder, c);
+ case loco::DataType::S16:
+ return encodeOpBufferByDType<loco::DataType::S16>(builder, c);
+ case loco::DataType::S32:
+ return encodeOpBufferByDType<loco::DataType::S32>(builder, c);
+ case loco::DataType::S64:
+ return encodeOpBufferByDType<loco::DataType::S64>(builder, c);
+ case loco::DataType::U8:
+ return encodeOpBufferByDType<loco::DataType::U8>(builder, c);
+ case loco::DataType::BOOL:
+ return encodeOpBufferByDType<loco::DataType::BOOL>(builder, c);
+ default:
+ break;
}
INTERNAL_EXN_V("Unsupported datatype", oops::to_uint32(c->dtype()));
@@ -210,7 +309,9 @@ void exportOpDefinedTensor(const CircleTensoInfo &info, FlatBufferBuilder &build
SerializedModelData &md, SerializedGraphData &gd)
{
// Create and register output tensor shape
- auto shape_offset = encodeShape(builder, info.shape());
+ flatbuffers::Offset<Vector<int32_t>> shape_offset;
+ if (info.shape_status() == ShapeStatus::VALID)
+ shape_offset = encodeShape(builder, info.shape());
// encode and register output tensor buffer
auto buffer =
@@ -249,9 +350,21 @@ void exportOpDefinedTensors(loco::Graph *g, FlatBufferBuilder &builder, Serializ
{
CircleTensorContext tensor_ctx;
+ // NOTE There may exist dangle CircleInput that is not visited with postorder_traversal()
+ // All dangle CircleOutput should be visited by postorder_traversal()
+ auto nodes = g->nodes();
+ for (uint32_t n = 0; n < nodes->size(); ++n)
+ {
+ auto node = dynamic_cast<luci::CircleInput *>(nodes->at(n));
+ if (node != nullptr)
+ allocateCircleTensor(node, tensor_ctx);
+ }
+
for (auto node : loco::postorder_traversal(loco::output_nodes(g)))
{
- CircleNode *circle_node = dynamic_cast<luci::CircleNode *>(node);
+ CircleNode *circle_node = loco::must_cast<luci::CircleNode *>(node);
+ if (dynamic_cast<const luci::CircleInput *>(circle_node) != nullptr)
+ continue;
allocateCircleTensor(circle_node, tensor_ctx);
}
diff --git a/compiler/luci/export/src/ProgressReporter.cpp b/compiler/luci/export/src/ProgressReporter.cpp
index ac9c3d9a8..216bd3f2a 100644
--- a/compiler/luci/export/src/ProgressReporter.cpp
+++ b/compiler/luci/export/src/ProgressReporter.cpp
@@ -49,36 +49,36 @@ namespace luci
void ProgressReporter::notify(const logo::PhaseEventInfo<logo::PhaseEvent::PhaseBegin> *)
{
- LOGGER(prime);
+ LOGGER(l);
- INFO(prime) << "==============================================================";
- INFO(prime) << "luci::PhaseRunner<" << to_str(strategy()) << ">";
- INFO(prime) << "Initial graph";
- INFO(prime) << fmt(graph());
+ VERBOSE(l, 4) << "==============================================================";
+ VERBOSE(l, 4) << "luci::PhaseRunner<" << to_str(strategy()) << ">";
+ VERBOSE(l, 4) << "Initial graph";
+ VERBOSE(l, 4) << fmt(graph());
}
void ProgressReporter::notify(const logo::PhaseEventInfo<logo::PhaseEvent::PhaseEnd> *)
{
- LOGGER(prime);
+ LOGGER(l);
- INFO(prime) << "luci::PhaseRunner<" << to_str(strategy()) << "> - done";
+ VERBOSE(l, 4) << "luci::PhaseRunner<" << to_str(strategy()) << "> - done";
}
void ProgressReporter::notify(const logo::PhaseEventInfo<logo::PhaseEvent::PassBegin> *info)
{
- LOGGER(prime);
+ LOGGER(l);
- INFO(prime) << "--------------------------------------------------------------";
- INFO(prime) << "Before " << logo::pass_name(info->pass());
+ VERBOSE(l, 4) << "--------------------------------------------------------------";
+ VERBOSE(l, 4) << "Before " << logo::pass_name(info->pass());
}
void ProgressReporter::notify(const logo::PhaseEventInfo<logo::PhaseEvent::PassEnd> *info)
{
- LOGGER(prime);
+ LOGGER(l);
- INFO(prime) << "After " << logo::pass_name(info->pass())
- << " (changed: " << to_char(info->changed()) << ")";
- INFO(prime) << fmt(graph());
+ VERBOSE(l, 4) << "After " << logo::pass_name(info->pass())
+ << " (changed: " << to_char(info->changed()) << ")";
+ VERBOSE(l, 4) << fmt(graph());
}
} // namespace luci
diff --git a/compiler/luci/export/src/SerializedData.h b/compiler/luci/export/src/SerializedData.h
index 84249653c..251daa0ea 100644
--- a/compiler/luci/export/src/SerializedData.h
+++ b/compiler/luci/export/src/SerializedData.h
@@ -29,8 +29,20 @@ namespace luci
struct OpCode
{
circle::BuiltinOperator opcode;
-
- bool operator==(const OpCode &rhs) const { return opcode == rhs.opcode; }
+ std::string custom_code{""};
+ int32_t version = 1;
+
+ bool operator==(const OpCode &rhs) const
+ {
+ if (opcode == circle::BuiltinOperator_CUSTOM)
+ {
+ return custom_code == rhs.custom_code;
+ }
+ else
+ {
+ return opcode == rhs.opcode;
+ }
+ }
};
} // namespace luci
@@ -53,11 +65,13 @@ namespace luci
*/
struct SubGraphContext
{
+ /// @brief SubGraph name
+ std::string _name;
/// @brief SubGraph input tensor id
std::vector<int32_t> _inputs;
/// @brief SubGraph output tensor id
std::vector<int32_t> _outputs;
- /// @DataFormat for SubGraph
+ /// @brief DataFormat for SubGraph
circle::DataFormat _data_format{circle::DataFormat::DataFormat_CHANNELS_LAST};
};
@@ -68,7 +82,6 @@ struct SerializedModelData final
SerializedModelData(const SerializedModelData &) = delete;
std::unordered_map<OpCode, uint32_t> _operator_codes;
- std::unordered_map<OpCode, std::string> _custom_operator_codes;
std::vector<flatbuffers::Offset<circle::Buffer>> _buffers;
/**
@@ -76,7 +89,7 @@ struct SerializedModelData final
* @param builtin_code
* @return idx of opcode in table of opcodes (see schema)
*/
- uint32_t registerBuiltinOpcode(circle::BuiltinOperator builtin_code);
+ uint32_t registerBuiltinOpcode(circle::BuiltinOperator builtin_code, const int32_t op_version);
uint32_t registerCustomOpcode(const std::string &custom_op);
};
diff --git a/compiler/luci/export/src/TypeBridge.cpp b/compiler/luci/export/src/TypeBridge.cpp
new file mode 100644
index 000000000..9ccd52376
--- /dev/null
+++ b/compiler/luci/export/src/TypeBridge.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TypeBridge.h"
+
+#include "CircleExporterUtils.h"
+
+#include <luci/IR/CircleNodes.h>
+#include <luci/IR/CircleNodeVisitor.h>
+#include <luci/Service/CircleTypeInference.h>
+#include <luci/Service/CircleShapeInference.h>
+
+#include <loco/Service/TypeInference.h>
+#include <loco/Service/ShapeInference.h>
+
+namespace
+{
+
+/**
+ * @brief CopySelector will return condition of copy shape/type inference to node
+ */
+struct CopySelector final : public luci::CircleNodeVisitor<bool>
+{
+ // return false(don't copy) for nodes that provides shape/type from nature
+ bool visit(const luci::CircleInput *) final { return false; }
+ bool visit(const luci::CircleConst *) final { return false; }
+
+ // default is copy attributes
+ bool visit(const luci::CircleNode *) { return true; }
+};
+
+} // namespace
+
+namespace luci
+{
+
+loco::TensorShape node_shape(CircleNode *node)
+{
+ loco::TensorShape shape;
+
+ shape.rank(node->rank());
+ for (uint32_t r = 0; r < node->rank(); ++r)
+ {
+ shape.dim(r) = loco::Dimension(node->dim(r).value());
+ }
+ return shape;
+}
+
+loco::DataType node_dtype(CircleNode *node) { return node->dtype(); }
+
+void copy_shape_dtype(loco::Graph *graph)
+{
+ /**
+ * @note We will iterate all the nodes in the graph to include dangle nodes
+ */
+ auto nodes = graph->nodes();
+ for (uint32_t n = 0; n < nodes->size(); ++n)
+ {
+ auto node = loco::must_cast<luci::CircleNode *>(nodes->at(n));
+
+ CopySelector cs;
+ if (node->accept(&cs))
+ {
+ // NOTE not all nodes have infered shape/dtype: multiple outs may not be
+ // visited when outputs are not used
+ // TODO fix shape inference traversal
+ // NOTE when loco supports multiple outputs in nature this issue should be
+ // resolved also
+
+ if (loco::dtype_known(node))
+ {
+ node->dtype(loco::dtype_get(node));
+ }
+
+ if (loco::shape_known(node))
+ {
+ auto shape = loco::shape_get(node).as<loco::TensorShape>();
+ node->rank(shape.rank());
+ for (uint32_t r = 0; r < shape.rank(); ++r)
+ {
+ node->dim(r) = loco::Dimension(shape.dim(r).value());
+ }
+
+ // ShapeStatus should be update only when the status was UNDEFINED
+ if (node->shape_status() == ShapeStatus::UNDEFINED)
+ node->shape_status(ShapeStatus::VALID);
+ }
+ }
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/export/src/TypeBridge.h b/compiler/luci/export/src/TypeBridge.h
new file mode 100644
index 000000000..a63fbce54
--- /dev/null
+++ b/compiler/luci/export/src/TypeBridge.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TYPE_BRIDGE_H__
+#define __TYPE_BRIDGE_H__
+
+#include <luci/IR/CircleNode.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+/**
+ * @brief node_shape() will return loco::TensorShape of CircleNode
+ */
+loco::TensorShape node_shape(CircleNode *node);
+
+/**
+ * @brief node_dtype() will return loco::DataType of CircleNode
+ */
+loco::DataType node_dtype(CircleNode *node);
+
+/**
+ * @brief copy_shape_dtype() will copy shape and dtype inference data to CircleNode
+ */
+void copy_shape_dtype(loco::Graph *graph);
+
+} // namespace luci
+
+#endif // __TYPE_BRIDGE_H__
diff --git a/compiler/luci/import/CMakeLists.txt b/compiler/luci/import/CMakeLists.txt
index bc9a9152a..2ae00b837 100644
--- a/compiler/luci/import/CMakeLists.txt
+++ b/compiler/luci/import/CMakeLists.txt
@@ -7,6 +7,7 @@ target_include_directories(luci_import PRIVATE src)
target_include_directories(luci_import PUBLIC include)
target_link_libraries(luci_import PUBLIC luci_lang)
target_link_libraries(luci_import PUBLIC mio_circle)
+target_link_libraries(luci_import PRIVATE luci_env)
target_link_libraries(luci_import PRIVATE luci_log)
target_link_libraries(luci_import PRIVATE luci_logex)
target_link_libraries(luci_import PRIVATE nncc_common)
diff --git a/compiler/luci/import/include/luci/Import/CircleReader.h b/compiler/luci/import/include/luci/Import/CircleReader.h
index fcbe09ceb..3d85b9e35 100644
--- a/compiler/luci/import/include/luci/Import/CircleReader.h
+++ b/compiler/luci/import/include/luci/Import/CircleReader.h
@@ -21,6 +21,7 @@
#include <luci/IR/AttrFusedActFunc.h>
#include <luci/IR/AttrPadding.h>
+#include <luci/IR/CircleNode.h>
#include <luci/IR/CircleQuantParam.h>
#include <loco.h>
@@ -42,9 +43,13 @@ const circle::QuantizationParametersT *tensor_quantization(const circle::TensorT
loco::DataType luci_datatype(circle::TensorType type);
FusedActFunc luci_actfunc(const circle::ActivationFunctionType type);
Padding luci_padding(const circle::Padding padding);
+MirrorPadMode luci_mirrorpad_mode(const circle::MirrorPadMode mode);
std::unique_ptr<CircleQuantParam>
luci_quantparam(const circle::QuantizationParametersT *quantization);
+/// @brief Copy common tensor attributes such as name, type, etc. to node.
+void copy_tensor_attributes(const circle::TensorT &tensor, CircleNode *node);
+
/**
* @brief Loads Circle file and provides helpers to access attributes
*/
@@ -56,6 +61,9 @@ private:
using CircleOperators_t = std::vector<std::unique_ptr<circle::OperatorT>>;
using CircleOperatorCodes_t = std::vector<std::unique_ptr<circle::OperatorCodeT>>;
+ using CircleSubGraphsPtr_t = flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>>;
+ using CircleTensorsPtr_t = flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>>;
+
public:
CircleReader() = default;
@@ -68,6 +76,8 @@ public:
const std::vector<int32_t> &outputs() const { return _current_subgraph->outputs; }
const std::string &name() const { return _current_subgraph->name; }
+ const CircleTensorsPtr_t *tensors_ptr() const { return _tensors_ptr; }
+
uint32_t num_subgraph() const { return _model->subgraphs.size(); }
circle::BuiltinOperator builtin_code(const circle::OperatorT &op) const;
@@ -80,6 +90,9 @@ public:
private:
std::unique_ptr<const circle::ModelT> _model;
const circle::SubGraphT *_current_subgraph{nullptr};
+
+ const circle::Model *_model_ptr{nullptr};
+ const CircleTensorsPtr_t *_tensors_ptr{nullptr};
};
} // namespace luci
diff --git a/compiler/luci/import/include/luci/Import/GraphBuilder.h b/compiler/luci/import/include/luci/Import/GraphBuilder.h
index 61f673fb6..548264dac 100644
--- a/compiler/luci/import/include/luci/Import/GraphBuilder.h
+++ b/compiler/luci/import/include/luci/Import/GraphBuilder.h
@@ -18,6 +18,7 @@
#define __LUCI_IMPORT_GRAPH_BUILDER_H__
#include "GraphBuilderContext.h"
+#include "GraphBuilderBase.h"
#include <mio/circle/schema_generated.h>
@@ -25,25 +26,14 @@ namespace luci
{
/**
- * @brief Interface of convert circle:: NodeDef to loco::Node (e.g., Conv2DGraphBuilder)
+ * @brief Base of general single output graph builder(e.g., Conv2DGraphBuilder)
*/
-class GraphBuilder
+class GraphBuilder : public GraphBuilderBase
{
public:
- struct ValidateArgs
- {
- ValidateArgs(const circle::OperatorT &o, const CircleReader &r) : op(o), reader(r) {}
-
- const circle::OperatorT &op;
- const CircleReader &reader;
- };
-
-public:
virtual ~GraphBuilder() = default;
- virtual bool validate(const ValidateArgs &) const = 0;
-
- void build(const circle::OperatorT &op, GraphBuilderContext *context) const;
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
private:
virtual CircleNode *build_node(const circle::OperatorT &op,
diff --git a/compiler/luci/import/include/luci/Import/GraphBuilderBase.h b/compiler/luci/import/include/luci/Import/GraphBuilderBase.h
new file mode 100644
index 000000000..a0cd008e0
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/GraphBuilderBase.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_GRAPH_BUILDER_BASE_H__
+#define __LUCI_IMPORT_GRAPH_BUILDER_BASE_H__
+
+#include "GraphBuilderContext.h"
+
+#include <mio/circle/schema_generated.h>
+
+namespace luci
+{
+
+/**
+ * @brief Interface of convert circle::OperatorT to CircleNode
+ */
+struct GraphBuilderBase
+{
+ struct ValidateArgs
+ {
+ ValidateArgs(const circle::OperatorT &o, const CircleReader &r) : op(o), reader(r) {}
+
+ const circle::OperatorT &op;
+ const CircleReader &reader;
+ };
+
+ virtual bool validate(const ValidateArgs &) const = 0;
+ virtual void build(const circle::OperatorT &op, GraphBuilderContext *context) const = 0;
+
+ virtual ~GraphBuilderBase() = default;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_GRAPH_BUILDER_BASE_H__
diff --git a/compiler/luci/import/include/luci/Import/GraphBuilderContext.h b/compiler/luci/import/include/luci/Import/GraphBuilderContext.h
index 8d464181d..72e237abc 100644
--- a/compiler/luci/import/include/luci/Import/GraphBuilderContext.h
+++ b/compiler/luci/import/include/luci/Import/GraphBuilderContext.h
@@ -24,6 +24,7 @@
#include <loco.h>
#include <map>
+#include <set>
namespace luci
{
@@ -48,13 +49,29 @@ private:
};
/**
+ * @brief Set of Tensor Index of outputs of operators
+ * including graph input nodes
+ */
+class IndexTensorOutputs
+{
+public:
+ void enroll(TensorIndex idx);
+
+ bool find(TensorIndex idx);
+
+private:
+ std::set<TensorIndex> _set;
+};
+
+/**
* @brief Class to store context to build loco graph IR from TensorFlow
*/
class GraphBuilderContext
{
public:
- GraphBuilderContext(loco::Graph *g, CircleReader *reader, IndexNodeFinder *nodefinder)
- : _g(g), _reader(reader), _indexnodefinder(nodefinder)
+ GraphBuilderContext(loco::Graph *g, CircleReader *reader, IndexNodeFinder *nodefinder,
+ IndexTensorOutputs *tensoroutputs)
+ : _g(g), _reader(reader), _indexnodefinder(nodefinder), _indextensoroutputs(tensoroutputs)
{
// DO NOTHING
}
@@ -67,11 +84,13 @@ public:
CircleReader *reader() { return _reader; }
IndexNodeFinder *nodefinder() { return _indexnodefinder; }
+ IndexTensorOutputs *tensoroutputs() { return _indextensoroutputs; }
private:
loco::Graph *_g;
CircleReader *_reader;
IndexNodeFinder *_indexnodefinder;
+ IndexTensorOutputs *_indextensoroutputs;
};
} // namespace luci
diff --git a/compiler/luci/import/include/luci/Import/GraphBuilderRegistry.h b/compiler/luci/import/include/luci/Import/GraphBuilderRegistry.h
index 99054e7b6..b8dc22fdd 100644
--- a/compiler/luci/import/include/luci/Import/GraphBuilderRegistry.h
+++ b/compiler/luci/import/include/luci/Import/GraphBuilderRegistry.h
@@ -17,7 +17,7 @@
#ifndef __LUCI_IMPORT_GRAPH_BUILDER_REGISTRY_H__
#define __LUCI_IMPORT_GRAPH_BUILDER_REGISTRY_H__
-#include "GraphBuilder.h"
+#include "GraphBuilderBase.h"
#include <map>
@@ -31,11 +31,11 @@ struct GraphBuilderSource
/**
* @brief Returns registered GraphBuilder pointer for operator (nullptr if not present)
*/
- virtual const GraphBuilder *lookup(const circle::BuiltinOperator &op) const = 0;
+ virtual const GraphBuilderBase *lookup(const circle::BuiltinOperator &op) const = 0;
};
/**
- * @brief Class to return graph builder for TF nodes
+ * @brief Class to return graph builder for Circle nodes
*/
class GraphBuilderRegistry final : public GraphBuilderSource
{
@@ -53,7 +53,7 @@ public:
* @brief Returns registered GraphBuilder pointer for operator or
* nullptr if not registered
*/
- const GraphBuilder *lookup(const circle::BuiltinOperator &op) const final
+ const GraphBuilderBase *lookup(const circle::BuiltinOperator &op) const final
{
if (_builder_map.find(op) == _builder_map.end())
return (_parent == nullptr) ? nullptr : _parent->lookup(op);
@@ -68,7 +68,7 @@ public:
}
public:
- void add(const circle::BuiltinOperator op, std::unique_ptr<GraphBuilder> &&builder)
+ void add(const circle::BuiltinOperator op, std::unique_ptr<GraphBuilderBase> &&builder)
{
_builder_map[op] = std::move(builder);
}
@@ -77,7 +77,7 @@ private:
const GraphBuilderSource *_parent = nullptr;
private:
- std::map<const circle::BuiltinOperator, std::unique_ptr<GraphBuilder>> _builder_map;
+ std::map<const circle::BuiltinOperator, std::unique_ptr<GraphBuilderBase>> _builder_map;
};
} // namespace luci
diff --git a/compiler/luci/import/include/luci/Import/Nodes.h b/compiler/luci/import/include/luci/Import/Nodes.h
index 381d02b97..2719a5aec 100644
--- a/compiler/luci/import/include/luci/Import/Nodes.h
+++ b/compiler/luci/import/include/luci/Import/Nodes.h
@@ -19,30 +19,110 @@
#include "Nodes/CircleAbs.h"
#include "Nodes/CircleAdd.h"
+#include "Nodes/CircleAddN.h"
#include "Nodes/CircleArgMax.h"
+#include "Nodes/CircleArgMin.h"
#include "Nodes/CircleAveragePool2D.h"
+#include "Nodes/CircleBatchMatMul.h"
#include "Nodes/CircleBatchToSpaceND.h"
+#include "Nodes/CircleBCQFullyConnected.h"
+#include "Nodes/CircleBCQGather.h"
+#include "Nodes/CircleCast.h"
+#include "Nodes/CircleCeil.h"
#include "Nodes/CircleConcatenation.h"
#include "Nodes/CircleConst.h"
#include "Nodes/CircleConv2D.h"
#include "Nodes/CircleCos.h"
+#include "Nodes/CircleCustom.h"
+#include "Nodes/CircleDepthToSpace.h"
#include "Nodes/CircleDepthwiseConv2D.h"
#include "Nodes/CircleDiv.h"
+#include "Nodes/CircleElu.h"
#include "Nodes/CircleEqual.h"
#include "Nodes/CircleExp.h"
+#include "Nodes/CircleExpandDims.h"
+#include "Nodes/CircleFill.h"
+#include "Nodes/CircleFloor.h"
+#include "Nodes/CircleFloorDiv.h"
+#include "Nodes/CircleFloorMod.h"
#include "Nodes/CircleFullyConnected.h"
+#include "Nodes/CircleGather.h"
+#include "Nodes/CircleGatherNd.h"
+#include "Nodes/CircleGreater.h"
+#include "Nodes/CircleGreaterEqual.h"
+#include "Nodes/CircleIf.h"
+#include "Nodes/CircleInstanceNorm.h"
+#include "Nodes/CircleL2Normalize.h"
+#include "Nodes/CircleL2Pool2D.h"
+#include "Nodes/CircleLeakyRelu.h"
+#include "Nodes/CircleLess.h"
+#include "Nodes/CircleLessEqual.h"
+#include "Nodes/CircleLocalResponseNormalization.h"
+#include "Nodes/CircleLog.h"
+#include "Nodes/CircleLogicalAnd.h"
#include "Nodes/CircleLogicalNot.h"
#include "Nodes/CircleLogicalOr.h"
+#include "Nodes/CircleLogistic.h"
+#include "Nodes/CircleLogSoftmax.h"
+#include "Nodes/CircleMatrixSetDiag.h"
+#include "Nodes/CircleMaximum.h"
#include "Nodes/CircleMaxPool2D.h"
+#include "Nodes/CircleMatrixDiag.h"
#include "Nodes/CircleMean.h"
+#include "Nodes/CircleMinimum.h"
+#include "Nodes/CircleMirrorPad.h"
#include "Nodes/CircleMul.h"
+#include "Nodes/CircleNeg.h"
+#include "Nodes/CircleNotEqual.h"
+#include "Nodes/CircleOneHot.h"
#include "Nodes/CirclePack.h"
#include "Nodes/CirclePad.h"
+#include "Nodes/CirclePow.h"
+#include "Nodes/CirclePRelu.h"
+#include "Nodes/CircleRange.h"
+#include "Nodes/CircleRank.h"
+#include "Nodes/CircleReduceAny.h"
+#include "Nodes/CircleReduceMax.h"
+#include "Nodes/CircleReduceMin.h"
+#include "Nodes/CircleReduceProd.h"
#include "Nodes/CircleRelu.h"
+#include "Nodes/CircleRelu6.h"
+#include "Nodes/CircleReluN1To1.h"
#include "Nodes/CircleReshape.h"
+#include "Nodes/CircleResizeBilinear.h"
+#include "Nodes/CircleResizeNearestNeighbor.h"
+#include "Nodes/CircleReverseSequence.h"
+#include "Nodes/CircleReverseV2.h"
+#include "Nodes/CircleRound.h"
#include "Nodes/CircleRsqrt.h"
+#include "Nodes/CircleScatterNd.h"
+#include "Nodes/CircleSegmentSum.h"
+#include "Nodes/CircleSelect.h"
+#include "Nodes/CircleSelectV2.h"
+#include "Nodes/CircleShape.h"
+#include "Nodes/CircleSin.h"
+#include "Nodes/CircleSlice.h"
#include "Nodes/CircleSoftmax.h"
+#include "Nodes/CircleSpaceToBatchND.h"
+#include "Nodes/CircleSpaceToDepth.h"
+#include "Nodes/CircleSparseToDense.h"
+#include "Nodes/CircleSplit.h"
+#include "Nodes/CircleSplitV.h"
+#include "Nodes/CircleSqrt.h"
+#include "Nodes/CircleSquare.h"
+#include "Nodes/CircleSquaredDifference.h"
+#include "Nodes/CircleSqueeze.h"
+#include "Nodes/CircleStridedSlice.h"
#include "Nodes/CircleSub.h"
+#include "Nodes/CircleSum.h"
+#include "Nodes/CircleTanh.h"
+#include "Nodes/CircleTile.h"
+#include "Nodes/CircleTopKV2.h"
#include "Nodes/CircleTranspose.h"
+#include "Nodes/CircleTransposeConv.h"
+#include "Nodes/CircleUnpack.h"
+#include "Nodes/CircleWhere.h"
+#include "Nodes/CircleWhile.h"
+#include "Nodes/CircleZerosLike.h"
#endif // __LUCI_IMPORT_NODES_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleAddN.h b/compiler/luci/import/include/luci/Import/Nodes/CircleAddN.h
new file mode 100644
index 000000000..3ec6b2a45
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleAddN.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ADD_N_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ADD_N_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleAddNGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ADD_N_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleArgMin.h b/compiler/luci/import/include/luci/Import/Nodes/CircleArgMin.h
new file mode 100644
index 000000000..746f52837
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleArgMin.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ARGMIN_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ARGMIN_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleArgMinGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ARGMIN_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleBCQFullyConnected.h b/compiler/luci/import/include/luci/Import/Nodes/CircleBCQFullyConnected.h
new file mode 100644
index 000000000..be58acd8d
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleBCQFullyConnected.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_BCQFULLYCONNECTED_H__
+#define __LUCI_IMPORT_OP_CIRCLE_BCQFULLYCONNECTED_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleBCQFullyConnectedGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_BCQFULLYCONNECTED_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleBCQGather.h b/compiler/luci/import/include/luci/Import/Nodes/CircleBCQGather.h
new file mode 100644
index 000000000..ff1c1f7e9
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleBCQGather.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_BCQGATHER_H__
+#define __LUCI_IMPORT_OP_CIRCLE_BCQGATHER_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleBCQGatherGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_BCQGATHER_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleBatchMatMul.h b/compiler/luci/import/include/luci/Import/Nodes/CircleBatchMatMul.h
new file mode 100644
index 000000000..b46a8715c
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleBatchMatMul.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_BATCHMATMUL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_BATCHMATMUL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleBatchMatMulGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_BATCHMATMUL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleCast.h b/compiler/luci/import/include/luci/Import/Nodes/CircleCast.h
new file mode 100644
index 000000000..1cd850bc7
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleCast.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_CAST_H__
+#define __LUCI_IMPORT_OP_CIRCLE_CAST_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleCastGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_CAST_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleCeil.h b/compiler/luci/import/include/luci/Import/Nodes/CircleCeil.h
new file mode 100644
index 000000000..f1bdf2397
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleCeil.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_CEIL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_CEIL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleCeilGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_CEIL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleCustom.h b/compiler/luci/import/include/luci/Import/Nodes/CircleCustom.h
new file mode 100644
index 000000000..65745be4b
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleCustom.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_CUSTOM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_CUSTOM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleCustomGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_CUSTOM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleDepthToSpace.h b/compiler/luci/import/include/luci/Import/Nodes/CircleDepthToSpace.h
new file mode 100644
index 000000000..a479cbd20
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleDepthToSpace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_DEPTHTOSPACE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_DEPTHTOSPACE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleDepthToSpaceGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_DEPTHTOSPACE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleElu.h b/compiler/luci/import/include/luci/Import/Nodes/CircleElu.h
new file mode 100644
index 000000000..2ec5642ce
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleElu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ELU_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ELU_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleEluGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ELU_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleExpandDims.h b/compiler/luci/import/include/luci/Import/Nodes/CircleExpandDims.h
new file mode 100644
index 000000000..acbfe7aea
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleExpandDims.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_EXPAND_DIMS_H__
+#define __LUCI_IMPORT_OP_CIRCLE_EXPAND_DIMS_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleExpandDimsGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_EXPAND_DIMS_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleFill.h b/compiler/luci/import/include/luci/Import/Nodes/CircleFill.h
new file mode 100644
index 000000000..3539dcd56
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleFill.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_FILL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_FILL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleFillGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_FILL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleFloor.h b/compiler/luci/import/include/luci/Import/Nodes/CircleFloor.h
new file mode 100644
index 000000000..057800865
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleFloor.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_FLOOR_H__
+#define __LUCI_IMPORT_OP_CIRCLE_FLOOR_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleFloorGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_FLOOR_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleFloorDiv.h b/compiler/luci/import/include/luci/Import/Nodes/CircleFloorDiv.h
new file mode 100644
index 000000000..ddc2ab2ff
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleFloorDiv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_FLOOR_DIV_H__
+#define __LUCI_IMPORT_OP_CIRCLE_FLOOR_DIV_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleFloorDivGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_FLOOR_DIV_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleFloorMod.h b/compiler/luci/import/include/luci/Import/Nodes/CircleFloorMod.h
new file mode 100644
index 000000000..1d6aa87c2
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleFloorMod.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_FLOOR_MOD_H__
+#define __LUCI_IMPORT_OP_CIRCLE_FLOOR_MOD_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleFloorModGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_FLOOR_MOD_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleGather.h b/compiler/luci/import/include/luci/Import/Nodes/CircleGather.h
new file mode 100644
index 000000000..0680c9451
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleGather.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_GATHER_H__
+#define __LUCI_IMPORT_OP_CIRCLE_GATHER_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleGatherGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_GATHER_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleGatherNd.h b/compiler/luci/import/include/luci/Import/Nodes/CircleGatherNd.h
new file mode 100644
index 000000000..be96b7dbe
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleGatherNd.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_GATHER_ND_H__
+#define __LUCI_IMPORT_OP_CIRCLE_GATHER_ND_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleGatherNdGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_GATHER_ND_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleGreater.h b/compiler/luci/import/include/luci/Import/Nodes/CircleGreater.h
new file mode 100644
index 000000000..87f0a8d83
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleGreater.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_GREATER_H__
+#define __LUCI_IMPORT_OP_CIRCLE_GREATER_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleGreaterGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_GREATER_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleGreaterEqual.h b/compiler/luci/import/include/luci/Import/Nodes/CircleGreaterEqual.h
new file mode 100644
index 000000000..4d24314e9
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleGreaterEqual.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_GREATEREQUAL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_GREATEREQUAL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleGreaterEqualGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_GREATEREQUAL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleIf.h b/compiler/luci/import/include/luci/Import/Nodes/CircleIf.h
new file mode 100644
index 000000000..8faf09cae
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleIf.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_IF_H__
+#define __LUCI_IMPORT_OP_CIRCLE_IF_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleIfGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_IF_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleInstanceNorm.h b/compiler/luci/import/include/luci/Import/Nodes/CircleInstanceNorm.h
new file mode 100644
index 000000000..5fd8f148a
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleInstanceNorm.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_INSTANCE_NORM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_INSTANCE_NORM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleInstanceNormGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_INSTANCE_NORM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleL2Normalize.h b/compiler/luci/import/include/luci/Import/Nodes/CircleL2Normalize.h
new file mode 100644
index 000000000..116605f09
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleL2Normalize.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_L2_NORMALIZE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_L2_NORMALIZE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleL2NormalizeGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_L2_NORMALIZE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleL2Pool2D.h b/compiler/luci/import/include/luci/Import/Nodes/CircleL2Pool2D.h
new file mode 100644
index 000000000..2211c4751
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleL2Pool2D.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_L2_POOL2D_H__
+#define __LUCI_IMPORT_OP_CIRCLE_L2_POOL2D_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleL2Pool2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_L2_POOL2D_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLeakyRelu.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLeakyRelu.h
new file mode 100644
index 000000000..b7fa41f25
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLeakyRelu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LEAKY_RELU_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LEAKY_RELU_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLeakyReluGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LEAKY_RELU_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLess.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLess.h
new file mode 100644
index 000000000..b93155bc4
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLess.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LESS_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LESS_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLessGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LESS_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLessEqual.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLessEqual.h
new file mode 100644
index 000000000..e54a4cb8c
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLessEqual.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LESS_EQUAL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LESS_EQUAL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLessEqualGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LESS_EQUAL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLocalResponseNormalization.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLocalResponseNormalization.h
new file mode 100644
index 000000000..95e6ea880
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLocalResponseNormalization.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LOCAL_RESPONSE_NORMALIZATION_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LOCAL_RESPONSE_NORMALIZATION_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLocalResponseNormalizationGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LOCAL_RESPONSE_NORMALIZATION_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLog.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLog.h
new file mode 100644
index 000000000..5b3321014
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLog.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LOG_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LOG_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLogGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LOG_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLogSoftmax.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLogSoftmax.h
new file mode 100644
index 000000000..ef29833f5
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLogSoftmax.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LOG_SOFTMAX_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LOG_SOFTMAX_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLogSoftmaxGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LOG_SOFTMAX_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLogicalAnd.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLogicalAnd.h
new file mode 100644
index 000000000..9336f4ac8
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLogicalAnd.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LOGICALAND_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LOGICALAND_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLogicalAndGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LOGICALAND_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleLogistic.h b/compiler/luci/import/include/luci/Import/Nodes/CircleLogistic.h
new file mode 100644
index 000000000..67c6c1f1f
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleLogistic.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_LOGISTIC_H__
+#define __LUCI_IMPORT_OP_CIRCLE_LOGISTIC_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleLogisticGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_LOGISTIC_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixDiag.h b/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixDiag.h
new file mode 100644
index 000000000..e038c3e0a
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixDiag.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_MATRIX_DIAG_H__
+#define __LUCI_IMPORT_OP_CIRCLE_MATRIX_DIAG_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleMatrixDiagGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_MATRIX_DIAG_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixSetDiag.h b/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixSetDiag.h
new file mode 100644
index 000000000..a9ea0ac3d
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleMatrixSetDiag.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_MATRIX_SET_DIAG_H__
+#define __LUCI_IMPORT_OP_CIRCLE_MATRIX_SET_DIAG_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleMatrixSetDiagGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_MATRIX_SET_DIAG_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleMaximum.h b/compiler/luci/import/include/luci/Import/Nodes/CircleMaximum.h
new file mode 100644
index 000000000..9705d3a36
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleMaximum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_MAXIMUM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_MAXIMUM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleMaximumGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_MAXIMUM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleMinimum.h b/compiler/luci/import/include/luci/Import/Nodes/CircleMinimum.h
new file mode 100644
index 000000000..d9546ecf8
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleMinimum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_MINIMUM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_MINIMUM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleMinimumGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_MINIMUM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleMirrorPad.h b/compiler/luci/import/include/luci/Import/Nodes/CircleMirrorPad.h
new file mode 100644
index 000000000..7f512cda7
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleMirrorPad.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_MIRROR_PAD_H__
+#define __LUCI_IMPORT_OP_CIRCLE_MIRROR_PAD_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleMirrorPadGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_MIRROR_PAD_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleNeg.h b/compiler/luci/import/include/luci/Import/Nodes/CircleNeg.h
new file mode 100644
index 000000000..3d0bac19f
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleNeg.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_NEG_H__
+#define __LUCI_IMPORT_OP_CIRCLE_NEG_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleNegGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_NEG_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleNotEqual.h b/compiler/luci/import/include/luci/Import/Nodes/CircleNotEqual.h
new file mode 100644
index 000000000..10c79b75e
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleNotEqual.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_NOTEQUAL_H__
+#define __LUCI_IMPORT_OP_CIRCLE_NOTEQUAL_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleNotEqualGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_NOTEQUAL_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleOneHot.h b/compiler/luci/import/include/luci/Import/Nodes/CircleOneHot.h
new file mode 100644
index 000000000..8d9526d0e
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleOneHot.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ONEHOT_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ONEHOT_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleOneHotGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ONEHOT_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CirclePRelu.h b/compiler/luci/import/include/luci/Import/Nodes/CirclePRelu.h
new file mode 100644
index 000000000..822862cfd
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CirclePRelu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_PRELU_H__
+#define __LUCI_IMPORT_OP_CIRCLE_PRELU_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CirclePReluGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_PRELU_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CirclePow.h b/compiler/luci/import/include/luci/Import/Nodes/CirclePow.h
new file mode 100644
index 000000000..284aa9b89
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CirclePow.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_POW_H__
+#define __LUCI_IMPORT_OP_CIRCLE_POW_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CirclePowGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_POW_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleRange.h b/compiler/luci/import/include/luci/Import/Nodes/CircleRange.h
new file mode 100644
index 000000000..bc63286b2
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleRange.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RANGE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RANGE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleRangeGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RANGE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleRank.h b/compiler/luci/import/include/luci/Import/Nodes/CircleRank.h
new file mode 100644
index 000000000..43a7fdb7b
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleRank.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RANK_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RANK_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleRankGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RANK_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReduceAny.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceAny.h
new file mode 100644
index 000000000..5ee517999
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceAny.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REDUCE_ANY_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REDUCE_ANY_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReduceAnyGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REDUCE_ANY_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMax.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMax.h
new file mode 100644
index 000000000..0bc7021c1
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMax.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REDUCE_MAX_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REDUCE_MAX_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReduceMaxGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REDUCE_MAX_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMin.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMin.h
new file mode 100644
index 000000000..0c05457f0
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceMin.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REDUCE_MIN_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REDUCE_MIN_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReduceMinGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REDUCE_MIN_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReduceProd.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceProd.h
new file mode 100644
index 000000000..446bc7866
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReduceProd.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REDUCE_PROD_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REDUCE_PROD_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReduceProdGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REDUCE_PROD_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleRelu6.h b/compiler/luci/import/include/luci/Import/Nodes/CircleRelu6.h
new file mode 100644
index 000000000..d17b4e200
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleRelu6.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RELU6_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RELU6_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleRelu6GraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RELU6_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReluN1To1.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReluN1To1.h
new file mode 100644
index 000000000..059431565
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReluN1To1.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RELU_N1_TO_1_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RELU_N1_TO_1_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReluN1To1GraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RELU_N1_TO_1_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleResizeBilinear.h b/compiler/luci/import/include/luci/Import/Nodes/CircleResizeBilinear.h
new file mode 100644
index 000000000..8c20ecc24
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleResizeBilinear.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RESIZE_BILINEAR_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RESIZE_BILINEAR_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleResizeBilinearGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RESIZE_BILINEAR_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleResizeNearestNeighbor.h b/compiler/luci/import/include/luci/Import/Nodes/CircleResizeNearestNeighbor.h
new file mode 100644
index 000000000..5b0647163
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleResizeNearestNeighbor.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_RESIZE_NEAREST_NEIGHBOR_H__
+#define __LUCI_IMPORT_OP_CIRCLE_RESIZE_NEAREST_NEIGHBOR_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleResizeNearestNeighborGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_RESIZE_NEAREST_NEIGHBOR_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReverseSequence.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReverseSequence.h
new file mode 100644
index 000000000..cbeed3013
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReverseSequence.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REVERSE_SEQUENCE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REVERSE_SEQUENCE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReverseSequenceGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REVERSE_SEQUENCE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleReverseV2.h b/compiler/luci/import/include/luci/Import/Nodes/CircleReverseV2.h
new file mode 100644
index 000000000..f354298dd
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleReverseV2.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_REVERSE_V2_H__
+#define __LUCI_IMPORT_OP_CIRCLE_REVERSE_V2_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleReverseV2GraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_REVERSE_V2_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleRound.h b/compiler/luci/import/include/luci/Import/Nodes/CircleRound.h
new file mode 100644
index 000000000..8b027d7ef
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleRound.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ROUND_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ROUND_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleRoundGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ROUND_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleScatterNd.h b/compiler/luci/import/include/luci/Import/Nodes/CircleScatterNd.h
new file mode 100644
index 000000000..8fa7a2f91
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleScatterNd.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SCATTER_ND_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SCATTER_ND_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleScatterNdGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SCATTER_ND_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSegmentSum.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSegmentSum.h
new file mode 100644
index 000000000..7c33dee41
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSegmentSum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SEGMENT_SUM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SEGMENT_SUM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSegmentSumGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SEGMENT_SUM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSelect.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSelect.h
new file mode 100644
index 000000000..87bd1a7fe
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSelect.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SELECT_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SELECT_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSelectGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SELECT_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSelectV2.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSelectV2.h
new file mode 100644
index 000000000..28c73b087
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSelectV2.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SELECT_V2_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SELECT_V2_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSelectV2GraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SELECT_V2_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleShape.h b/compiler/luci/import/include/luci/Import/Nodes/CircleShape.h
new file mode 100644
index 000000000..3002084a5
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleShape.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SHAPE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SHAPE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleShapeGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SHAPE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSin.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSin.h
new file mode 100644
index 000000000..605f5a5a0
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSin.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SIN_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SIN_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSinGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SIN_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSlice.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSlice.h
new file mode 100644
index 000000000..3bb4c51b7
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSlice.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SLICE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SLICE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSliceGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SLICE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToBatchND.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToBatchND.h
new file mode 100644
index 000000000..b8723098d
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToBatchND.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SPACETOBATCHND_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SPACETOBATCHND_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSpaceToBatchNDGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SPACETOBATCHND_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToDepth.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToDepth.h
new file mode 100644
index 000000000..75a54dd26
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSpaceToDepth.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SPACETODEPTH_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SPACETODEPTH_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSpaceToDepthGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SPACETODEPTH_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSparseToDense.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSparseToDense.h
new file mode 100644
index 000000000..baf240919
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSparseToDense.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SPARSETODENSE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SPARSETODENSE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSparseToDenseGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SPARSETODENSE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSplit.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSplit.h
new file mode 100644
index 000000000..3395e40fd
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSplit.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SPLIT_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SPLIT_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleSplitGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SPLIT_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSplitV.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSplitV.h
new file mode 100644
index 000000000..3e53df362
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSplitV.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SPLIT_V_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SPLIT_V_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleSplitVGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SPLIT_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSqrt.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSqrt.h
new file mode 100644
index 000000000..4fd79951c
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSqrt.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SQRT_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SQRT_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSqrtGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SQRT_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSquare.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSquare.h
new file mode 100644
index 000000000..3a1299102
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSquare.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SQUARE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SQUARE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSquareGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SQUARE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSquaredDifference.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSquaredDifference.h
new file mode 100644
index 000000000..95f08412b
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSquaredDifference.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SQUAREDDIFFERENCE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SQUAREDDIFFERENCE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSquaredDifferenceGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SQUAREDDIFFERENCE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSqueeze.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSqueeze.h
new file mode 100644
index 000000000..4f0dfb5ef
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSqueeze.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SQUEEZE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SQUEEZE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSqueezeGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SQUEEZE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleStridedSlice.h b/compiler/luci/import/include/luci/Import/Nodes/CircleStridedSlice.h
new file mode 100644
index 000000000..f535c3a61
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleStridedSlice.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_STRIDED_SLICE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_STRIDED_SLICE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleStridedSliceGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_STRIDED_SLICE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleSum.h b/compiler/luci/import/include/luci/Import/Nodes/CircleSum.h
new file mode 100644
index 000000000..e65dd46ad
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleSum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_SUM_H__
+#define __LUCI_IMPORT_OP_CIRCLE_SUM_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleSumGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_SUM_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleTanh.h b/compiler/luci/import/include/luci/Import/Nodes/CircleTanh.h
new file mode 100644
index 000000000..b3795acba
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleTanh.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_TANH_H__
+#define __LUCI_IMPORT_OP_CIRCLE_TANH_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleTanhGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_TANH_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleTile.h b/compiler/luci/import/include/luci/Import/Nodes/CircleTile.h
new file mode 100644
index 000000000..1da6cdbde
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleTile.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_TILE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_TILE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleTileGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_TILE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleTopKV2.h b/compiler/luci/import/include/luci/Import/Nodes/CircleTopKV2.h
new file mode 100644
index 000000000..8ec3f3311
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleTopKV2.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_TOPK_V2_H__
+#define __LUCI_IMPORT_OP_CIRCLE_TOPK_V2_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleTopKV2GraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_TOPK_V2_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleTransposeConv.h b/compiler/luci/import/include/luci/Import/Nodes/CircleTransposeConv.h
new file mode 100644
index 000000000..2614d0d0d
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleTransposeConv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_TRANSPOSE_CONV_H__
+#define __LUCI_IMPORT_OP_CIRCLE_TRANSPOSE_CONV_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleTransposeConvGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_TRANSPOSE_CONV_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleUnpack.h b/compiler/luci/import/include/luci/Import/Nodes/CircleUnpack.h
new file mode 100644
index 000000000..f1a21de22
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleUnpack.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_UNPACK_H__
+#define __LUCI_IMPORT_OP_CIRCLE_UNPACK_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleUnpackGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_UNPACK_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleWhere.h b/compiler/luci/import/include/luci/Import/Nodes/CircleWhere.h
new file mode 100644
index 000000000..72f98ef92
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleWhere.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_WHERE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_WHERE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleWhereGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const override;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_WHERE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleWhile.h b/compiler/luci/import/include/luci/Import/Nodes/CircleWhile.h
new file mode 100644
index 000000000..68c56b3c6
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleWhile.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_WHILE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_WHILE_H__
+
+#include "luci/Import/GraphBuilderBase.h"
+
+namespace luci
+{
+
+class CircleWhileGraphBuilder : public GraphBuilderBase
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+ void build(const circle::OperatorT &op, GraphBuilderContext *context) const final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_WHILE_H__
diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleZerosLike.h b/compiler/luci/import/include/luci/Import/Nodes/CircleZerosLike.h
new file mode 100644
index 000000000..2a3410379
--- /dev/null
+++ b/compiler/luci/import/include/luci/Import/Nodes/CircleZerosLike.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IMPORT_OP_CIRCLE_ZEROS_LIKE_H__
+#define __LUCI_IMPORT_OP_CIRCLE_ZEROS_LIKE_H__
+
+#include "luci/Import/GraphBuilder.h"
+
+namespace luci
+{
+
+class CircleZerosLikeGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const ValidateArgs &args) const final;
+
+private:
+ CircleNode *build_node(const circle::OperatorT &op, const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const override;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IMPORT_OP_CIRCLE_ZEROS_LIKE_H__
diff --git a/compiler/luci/import/include/luci/Importer.h b/compiler/luci/import/include/luci/Importer.h
index 246df9f27..f08ddcda7 100644
--- a/compiler/luci/import/include/luci/Importer.h
+++ b/compiler/luci/import/include/luci/Importer.h
@@ -51,4 +51,4 @@ private:
} // namespace luci
-#endif // __MOCO_IMPORTER_H__
+#endif // __LUCI_IMPORTER_H__
diff --git a/compiler/luci/import/src/CircleReader.cpp b/compiler/luci/import/src/CircleReader.cpp
index ead0093b8..81e945dd1 100644
--- a/compiler/luci/import/src/CircleReader.cpp
+++ b/compiler/luci/import/src/CircleReader.cpp
@@ -136,6 +136,19 @@ Padding luci_padding(const circle::Padding padding)
return Padding::UNDEFINED;
}
+MirrorPadMode luci_mirrorpad_mode(const circle::MirrorPadMode mode)
+{
+ switch (mode)
+ {
+ case circle::MirrorPadMode::MirrorPadMode_REFLECT:
+ return MirrorPadMode::REFLECT;
+ case circle::MirrorPadMode::MirrorPadMode_SYMMETRIC:
+ return MirrorPadMode::SYMMETRIC;
+ }
+ assert(false);
+ return MirrorPadMode::UNDEFINED;
+}
+
std::unique_ptr<CircleQuantParam>
luci_quantparam(const circle::QuantizationParametersT *quantization)
{
@@ -159,6 +172,27 @@ luci_quantparam(const circle::QuantizationParametersT *quantization)
return nullptr;
}
+void copy_tensor_attributes(const circle::TensorT &tensor, CircleNode *node)
+{
+ node->name(tensor_name(tensor));
+ node->dtype(luci_datatype(tensor.type));
+
+ std::vector<int32_t> dims = tensor.shape; // in NHWC
+ node->rank(dims.size());
+ for (uint32_t r = 0; r < dims.size(); ++r)
+ {
+ node->dim(r) = loco::Dimension(dims[r]);
+ }
+
+ const auto *quantization = tensor.quantization.get();
+ if (quantization != nullptr)
+ {
+ auto quantparam = luci_quantparam(quantization);
+ if (quantparam)
+ node->quantparam(std::move(quantparam));
+ }
+}
+
circle::BuiltinOperator CircleReader::builtin_code(const circle::OperatorT &op) const
{
const auto &op_codes = opcodes();
@@ -192,6 +226,9 @@ bool CircleReader::parse(const circle::Model *model)
_model.reset(model->UnPack());
+ // for direct pointer access
+ _model_ptr = model;
+
return true;
}
@@ -205,6 +242,12 @@ bool CircleReader::select_subgraph(uint32_t sgindex)
_current_subgraph = _model->subgraphs[sgindex].get();
+ // for direct pointer access
+ auto subgraphs = _model_ptr->subgraphs();
+ const circle::SubGraph *subgraph = (*subgraphs)[sgindex];
+
+ _tensors_ptr = subgraph->tensors();
+
return true;
}
diff --git a/compiler/luci/import/src/GraphBuilder.cpp b/compiler/luci/import/src/GraphBuilder.cpp
index e0ec9ded5..80a9f986a 100644
--- a/compiler/luci/import/src/GraphBuilder.cpp
+++ b/compiler/luci/import/src/GraphBuilder.cpp
@@ -16,21 +16,39 @@
#include "luci/Import/GraphBuilder.h"
+#include <luci/Log.h>
+
namespace luci
{
void GraphBuilder::build(const circle::OperatorT &op, GraphBuilderContext *context) const
{
+ LOGGER(l);
+
assert(context != nullptr);
const std::vector<int32_t> &inputs = op.inputs;
const std::vector<int32_t> &outputs = op.outputs;
const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
std::vector<CircleNode *> input_nodes;
for (const int32_t input_tensor_index : inputs)
{
- input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ if (input_tensor_index >= 0)
+ {
+ auto input = context->nodefinder()->node(input_tensor_index);
+ if (input == nullptr)
+ INFO(l) << "[luci] Warning: input node is null " << input_tensor_index << std::endl;
+ input_nodes.push_back(input);
+ }
+ else
+ {
+ // If there is no tensor, insert CircleOutputExclude.
+ input_nodes.push_back(context->graph()->nodes()->create<luci::CircleOutputExclude>());
+ }
}
CircleNode *node = build_node(op, input_nodes, context->graph());
@@ -39,16 +57,15 @@ void GraphBuilder::build(const circle::OperatorT &op, GraphBuilderContext *conte
assert(outputs.size() == 1);
{
const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ copy_tensor_attributes(output_tensor, node);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[0])->shape() == nullptr)
+ node->shape_status(ShapeStatus::NOSHAPE);
+ else
+ node->shape_status(ShapeStatus::VALID);
- node->name(tensor_name(output_tensor));
-
- auto quantization = tensor_quantization(output_tensor);
- if (quantization)
- {
- auto quantparam = luci_quantparam(quantization);
- if (quantparam)
- node->quantparam(std::move(quantparam));
- }
+ // mark operator version
+ node->op_version(opcodes[op.opcode_index].get()->version);
}
// Register node's only output.
diff --git a/compiler/luci/import/src/GraphBuilderContext.cpp b/compiler/luci/import/src/GraphBuilderContext.cpp
index a5162ce83..21adfa7e2 100644
--- a/compiler/luci/import/src/GraphBuilderContext.cpp
+++ b/compiler/luci/import/src/GraphBuilderContext.cpp
@@ -25,10 +25,12 @@ namespace luci
void IndexNodeFinder::enroll(TensorIndex idx, CircleNode *node)
{
- if (_table.find(idx) != _table.end())
+ auto iter = _table.find(idx);
+ if (iter != _table.end())
{
LOGGER(l);
- INFO(l) << "[luci] NodeFinder SKIP (" << idx << ") " << node << std::endl;
+ INFO(l) << "[luci] NodeFinder SKIP (" << idx << ") " << node << ":" << node->name()
+ << " existing: " << iter->second << ":" << iter->second->name() << std::endl;
return;
}
@@ -39,9 +41,22 @@ CircleNode *IndexNodeFinder::node(TensorIndex idx) const
{
MapIndexNode_t::const_iterator iter = _table.find(idx);
- assert(iter != _table.end() && iter->second != nullptr);
+ // dangle output node may exist that are not enrolled
+ return (iter != _table.end()) ? iter->second : nullptr;
+}
- return iter->second;
+void IndexTensorOutputs::enroll(TensorIndex idx)
+{
+ auto iter = _set.find(idx);
+ if (iter != _set.end())
+ {
+ LOGGER(l);
+ INFO(l) << "[luci] TensorOutputs SKIP (" << idx << ") existing" << std::endl;
+ return;
+ }
+ _set.insert(idx);
}
+bool IndexTensorOutputs::find(TensorIndex idx) { return (_set.find(idx) != _set.end()); }
+
} // namespace luci
diff --git a/compiler/luci/import/src/GraphBuilderRegistry.cpp b/compiler/luci/import/src/GraphBuilderRegistry.cpp
index 929b71a7d..d29557f74 100644
--- a/compiler/luci/import/src/GraphBuilderRegistry.cpp
+++ b/compiler/luci/import/src/GraphBuilderRegistry.cpp
@@ -27,137 +27,140 @@ GraphBuilderRegistry::GraphBuilderRegistry()
{
#define CIRCLE_NODE(OPCODE, CLASS) add(circle::BuiltinOperator_##OPCODE, std::make_unique<CLASS>());
- CIRCLE_NODE(ABS, CircleAbsGraphBuilder); // 101
- CIRCLE_NODE(ADD, CircleAddGraphBuilder); // 0
- CIRCLE_NODE(ARG_MAX, CircleArgMaxGraphBuilder); // 56
- CIRCLE_NODE(AVERAGE_POOL_2D, CircleAveragePool2DGraphBuilder); // 1
- CIRCLE_NODE(BATCH_TO_SPACE_ND, CircleBatchToSpaceNDGraphBuilder); // 37
- CIRCLE_NODE(CONCATENATION, CircleConcatenationGraphBuilder); // 2
- CIRCLE_NODE(CONV_2D, CircleConv2DGraphBuilder); // 3
- CIRCLE_NODE(COS, CircleCosGraphBuilder); // 108
- CIRCLE_NODE(DEPTHWISE_CONV_2D, CircleDepthwiseConv2DGraphBuilder); // 4
- CIRCLE_NODE(DIV, CircleDivGraphBuilder); // 42
- CIRCLE_NODE(EQUAL, CircleEqualGraphBuilder); // 71
- CIRCLE_NODE(EXP, CircleExpGraphBuilder); // 47
- CIRCLE_NODE(FULLY_CONNECTED, CircleFullyConnectedGraphBuilder); // 9
- CIRCLE_NODE(LOGICAL_NOT, CircleLogicalNotGraphBuilder); // 87
- CIRCLE_NODE(LOGICAL_OR, CircleLogicalOrGraphBuilder); // 84
- CIRCLE_NODE(MAX_POOL_2D, CircleMaxPool2DGraphBuilder); // 17
- CIRCLE_NODE(MEAN, CircleMeanGraphBuilder); // 40
- CIRCLE_NODE(MUL, CircleMulGraphBuilder); // 18
- CIRCLE_NODE(PACK, CirclePackGraphBuilder); // 83
- CIRCLE_NODE(PAD, CirclePadGraphBuilder); // 34
- CIRCLE_NODE(RELU, CircleReluGraphBuilder); // 19
- CIRCLE_NODE(RESHAPE, CircleReshapeGraphBuilder); // 22
- CIRCLE_NODE(RSQRT, CircleRsqrtGraphBuilder); // 76
- CIRCLE_NODE(SOFTMAX, CircleSoftmaxGraphBuilder); // 25
- CIRCLE_NODE(SUB, CircleSubGraphBuilder); // 41
- CIRCLE_NODE(TRANSPOSE, CircleTransposeGraphBuilder); // 39
+ CIRCLE_NODE(ABS, CircleAbsGraphBuilder); // 101
+ CIRCLE_NODE(ADD, CircleAddGraphBuilder); // 0
+ CIRCLE_NODE(ADD_N, CircleAddNGraphBuilder); // 106
+ CIRCLE_NODE(ARG_MAX, CircleArgMaxGraphBuilder); // 56
+ CIRCLE_NODE(ARG_MIN, CircleArgMinGraphBuilder); // 79
+ CIRCLE_NODE(AVERAGE_POOL_2D, CircleAveragePool2DGraphBuilder); // 1
+ CIRCLE_NODE(BATCH_MATMUL, CircleBatchMatMulGraphBuilder); // 126
+ CIRCLE_NODE(BATCH_TO_SPACE_ND, CircleBatchToSpaceNDGraphBuilder); // 37
+ CIRCLE_NODE(BCQ_FULLY_CONNECTED, CircleBCQFullyConnectedGraphBuilder); // 253
+ CIRCLE_NODE(BCQ_GATHER, CircleBCQGatherGraphBuilder); // 252
+ CIRCLE_NODE(CAST, CircleCastGraphBuilder); // 53
+ CIRCLE_NODE(CEIL, CircleCeilGraphBuilder); // 104
+ CIRCLE_NODE(CUSTOM, CircleCustomGraphBuilder); // 32
+ CIRCLE_NODE(CONCATENATION, CircleConcatenationGraphBuilder); // 2
+ CIRCLE_NODE(CONV_2D, CircleConv2DGraphBuilder); // 3
+ CIRCLE_NODE(COS, CircleCosGraphBuilder); // 108
+ CIRCLE_NODE(DEPTH_TO_SPACE, CircleDepthToSpaceGraphBuilder); // 5
+ CIRCLE_NODE(DEPTHWISE_CONV_2D, CircleDepthwiseConv2DGraphBuilder); // 4
+ CIRCLE_NODE(DIV, CircleDivGraphBuilder); // 42
+ CIRCLE_NODE(ELU, CircleEluGraphBuilder); // 111
+ CIRCLE_NODE(EQUAL, CircleEqualGraphBuilder); // 71
+ CIRCLE_NODE(EXP, CircleExpGraphBuilder); // 47
+ CIRCLE_NODE(EXPAND_DIMS, CircleExpandDimsGraphBuilder); // 70
+ CIRCLE_NODE(FILL, CircleFillGraphBuilder); // 94
+ CIRCLE_NODE(FLOOR, CircleFloorGraphBuilder); // 8
+ CIRCLE_NODE(FLOOR_DIV, CircleFloorDivGraphBuilder); // 90
+ CIRCLE_NODE(FLOOR_MOD, CircleFloorModGraphBuilder); // 95
+ CIRCLE_NODE(FULLY_CONNECTED, CircleFullyConnectedGraphBuilder); // 9
+ CIRCLE_NODE(GATHER, CircleGatherGraphBuilder); // 36
+ CIRCLE_NODE(GATHER_ND, CircleGatherNdGraphBuilder); // 107
+ CIRCLE_NODE(GREATER, CircleGreaterGraphBuilder); // 61
+ CIRCLE_NODE(GREATER_EQUAL, CircleGreaterEqualGraphBuilder); // 62
+ CIRCLE_NODE(IF, CircleIfGraphBuilder); // 118
+ CIRCLE_NODE(INSTANCE_NORM, CircleInstanceNormGraphBuilder); // 254
+ CIRCLE_NODE(L2_NORMALIZATION, CircleL2NormalizeGraphBuilder); // 11
+ CIRCLE_NODE(L2_POOL_2D, CircleL2Pool2DGraphBuilder); // 12
+ CIRCLE_NODE(LEAKY_RELU, CircleLeakyReluGraphBuilder); // 98,
+ CIRCLE_NODE(LESS, CircleLessGraphBuilder); // 58
+ CIRCLE_NODE(LESS_EQUAL, CircleLessEqualGraphBuilder); // 63
+ CIRCLE_NODE(LOCAL_RESPONSE_NORMALIZATION, CircleLocalResponseNormalizationGraphBuilder); // 13
+ CIRCLE_NODE(LOG, CircleLogGraphBuilder); // 73
+ CIRCLE_NODE(LOGICAL_AND, CircleLogicalAndGraphBuilder); // 86
+ CIRCLE_NODE(LOGICAL_NOT, CircleLogicalNotGraphBuilder); // 87
+ CIRCLE_NODE(LOGICAL_OR, CircleLogicalOrGraphBuilder); // 84
+ CIRCLE_NODE(LOGISTIC, CircleLogisticGraphBuilder); // 14
+ CIRCLE_NODE(LOG_SOFTMAX, CircleLogSoftmaxGraphBuilder); // 50
+ CIRCLE_NODE(MATRIX_DIAG, CircleMatrixDiagGraphBuilder); // 113
+ CIRCLE_NODE(MATRIX_SET_DIAG, CircleMatrixSetDiagGraphBuilder); // 115
+ CIRCLE_NODE(MAXIMUM, CircleMaximumGraphBuilder); // 55
+ CIRCLE_NODE(MAX_POOL_2D, CircleMaxPool2DGraphBuilder); // 17
+ CIRCLE_NODE(MEAN, CircleMeanGraphBuilder); // 40
+ CIRCLE_NODE(MINIMUM, CircleMinimumGraphBuilder); // 57
+ CIRCLE_NODE(MIRROR_PAD, CircleMirrorPadGraphBuilder); // 100
+ CIRCLE_NODE(MUL, CircleMulGraphBuilder); // 18
+ CIRCLE_NODE(NEG, CircleNegGraphBuilder); // 59
+ CIRCLE_NODE(NOT_EQUAL, CircleNotEqualGraphBuilder); // 72
+ CIRCLE_NODE(ONE_HOT, CircleOneHotGraphBuilder); // 85
+ CIRCLE_NODE(PACK, CirclePackGraphBuilder); // 83
+ CIRCLE_NODE(PAD, CirclePadGraphBuilder); // 34
+ CIRCLE_NODE(POW, CirclePowGraphBuilder); // 78
+ CIRCLE_NODE(PRELU, CirclePReluGraphBuilder); // 54,
+ CIRCLE_NODE(RANGE, CircleRangeGraphBuilder); // 96
+ CIRCLE_NODE(RANK, CircleRankGraphBuilder); // 110
+ CIRCLE_NODE(REDUCE_ANY, CircleReduceAnyGraphBuilder); // 91
+ CIRCLE_NODE(REDUCE_MAX, CircleReduceMaxGraphBuilder); // 82
+ CIRCLE_NODE(REDUCE_MIN, CircleReduceMinGraphBuilder); // 89
+ CIRCLE_NODE(REDUCE_PROD, CircleReduceProdGraphBuilder); // 81
+ CIRCLE_NODE(RELU, CircleReluGraphBuilder); // 19
+ CIRCLE_NODE(RELU6, CircleRelu6GraphBuilder); // 21
+ CIRCLE_NODE(RELU_N1_TO_1, CircleReluN1To1GraphBuilder); // 20
+ CIRCLE_NODE(RESHAPE, CircleReshapeGraphBuilder); // 22
+ CIRCLE_NODE(RESIZE_BILINEAR, CircleResizeBilinearGraphBuilder); // 23
+ CIRCLE_NODE(RESIZE_NEAREST_NEIGHBOR, CircleResizeNearestNeighborGraphBuilder); // 97
+ CIRCLE_NODE(REVERSE_SEQUENCE, CircleReverseSequenceGraphBuilder); // 112
+ CIRCLE_NODE(REVERSE_V2, CircleReverseV2GraphBuilder); // 105
+ CIRCLE_NODE(ROUND, CircleRoundGraphBuilder); // 116
+ CIRCLE_NODE(RSQRT, CircleRsqrtGraphBuilder); // 76
+ CIRCLE_NODE(SCATTER_ND, CircleScatterNdGraphBuilder); // 122
+ CIRCLE_NODE(SEGMENT_SUM, CircleSegmentSumGraphBuilder); // 125
+ CIRCLE_NODE(SELECT, CircleSelectGraphBuilder); // 64
+ CIRCLE_NODE(SELECT_V2, CircleSelectV2GraphBuilder); // 123
+ CIRCLE_NODE(SHAPE, CircleShapeGraphBuilder); // 77
+ CIRCLE_NODE(SIN, CircleSinGraphBuilder); // 66
+ CIRCLE_NODE(SLICE, CircleSliceGraphBuilder); // 65
+ CIRCLE_NODE(SOFTMAX, CircleSoftmaxGraphBuilder); // 25
+ CIRCLE_NODE(SPACE_TO_BATCH_ND, CircleSpaceToBatchNDGraphBuilder); // 38
+ CIRCLE_NODE(SPACE_TO_DEPTH, CircleSpaceToDepthGraphBuilder); // 26
+ CIRCLE_NODE(SPARSE_TO_DENSE, CircleSparseToDenseGraphBuilder); // 68
+ CIRCLE_NODE(SPLIT, CircleSplitGraphBuilder); // 49
+ CIRCLE_NODE(SPLIT_V, CircleSplitVGraphBuilder); // 102
+ CIRCLE_NODE(SQRT, CircleSqrtGraphBuilder); // 75
+ CIRCLE_NODE(SQUARE, CircleSquareGraphBuilder); // 92
+ CIRCLE_NODE(SQUARED_DIFFERENCE, CircleSquaredDifferenceGraphBuilder); // 99
+ CIRCLE_NODE(SQUEEZE, CircleSqueezeGraphBuilder); // 43
+ CIRCLE_NODE(STRIDED_SLICE, CircleStridedSliceGraphBuilder); // 45
+ CIRCLE_NODE(SUB, CircleSubGraphBuilder); // 41
+ CIRCLE_NODE(SUM, CircleSumGraphBuilder); // 74
+ CIRCLE_NODE(TANH, CircleTanhGraphBuilder); // 28
+ CIRCLE_NODE(TILE, CircleTileGraphBuilder); // 69
+ CIRCLE_NODE(TOPK_V2, CircleTopKV2GraphBuilder); // 48
+ CIRCLE_NODE(TRANSPOSE, CircleTransposeGraphBuilder); // 39
+ CIRCLE_NODE(TRANSPOSE_CONV, CircleTransposeConvGraphBuilder); // 67
+ CIRCLE_NODE(UNPACK, CircleUnpackGraphBuilder); // 88
+ CIRCLE_NODE(WHERE, CircleWhereGraphBuilder); // 109
+ CIRCLE_NODE(WHILE, CircleWhileGraphBuilder); // 119
+ CIRCLE_NODE(ZEROS_LIKE, CircleZerosLikeGraphBuilder); // 93
#undef CIRCLE_NODE
// BuiltinOperator_DEQUANTIZE = 6,
// BuiltinOperator_EMBEDDING_LOOKUP = 7,
- // BuiltinOperator_FLOOR = 8,
// BuiltinOperator_HASHTABLE_LOOKUP = 10,
- // BuiltinOperator_L2_NORMALIZATION = 11,
- // BuiltinOperator_L2_POOL_2D = 12,
- // BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
- // BuiltinOperator_LOGISTIC = 14,
// BuiltinOperator_LSH_PROJECTION = 15,
// BuiltinOperator_LSTM = 16,
- // BuiltinOperator_RELU_N1_TO_1 = 20,
- // BuiltinOperator_RELU6 = 21,
- // BuiltinOperator_RESIZE_BILINEAR = 23,
// BuiltinOperator_RNN = 24,
- // BuiltinOperator_SPACE_TO_DEPTH = 26,
// BuiltinOperator_SVDF = 27,
- // BuiltinOperator_TANH = 28,
// BuiltinOperator_CONCAT_EMBEDDINGS = 29,
// BuiltinOperator_SKIP_GRAM = 30,
// BuiltinOperator_CALL = 31,
- // BuiltinOperator_CUSTOM = 32,
// BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
// BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- // BuiltinOperator_GATHER = 36,
- // BuiltinOperator_SPACE_TO_BATCH_ND = 38,
- // BuiltinOperator_SQUEEZE = 43,
// BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- // BuiltinOperator_STRIDED_SLICE = 45,
// BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
- // BuiltinOperator_TOPK_V2 = 48,
- // BuiltinOperator_SPLIT = 49,
- // BuiltinOperator_LOG_SOFTMAX = 50,
// BuiltinOperator_DELEGATE = 51,
// BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- // BuiltinOperator_CAST = 53,
- // BuiltinOperator_PRELU = 54,
- // BuiltinOperator_MAXIMUM = 55,
// BuiltinOperator_ARG_MAX = 56,
- // BuiltinOperator_MINIMUM = 57,
- // BuiltinOperator_LESS = 58,
- // BuiltinOperator_NEG = 59,
// BuiltinOperator_PADV2 = 60,
- // BuiltinOperator_GREATER = 61,
- // BuiltinOperator_GREATER_EQUAL = 62,
- // BuiltinOperator_LESS_EQUAL = 63,
- // BuiltinOperator_SELECT = 64,
- // BuiltinOperator_SLICE = 65,
- // BuiltinOperator_SIN = 66,
- // BuiltinOperator_TRANSPOSE_CONV = 67,
- // BuiltinOperator_SPARSE_TO_DENSE = 68,
- // BuiltinOperator_TILE = 69,
- // BuiltinOperator_EXPAND_DIMS = 70,
- // BuiltinOperator_NOT_EQUAL = 72,
- // BuiltinOperator_LOG = 73,
- // BuiltinOperator_SUM = 74,
- // BuiltinOperator_SQRT = 75,
- // BuiltinOperator_SHAPE = 77,
- // BuiltinOperator_POW = 78,
- // BuiltinOperator_ARG_MIN = 79,
// BuiltinOperator_FAKE_QUANT = 80,
- // BuiltinOperator_REDUCE_PROD = 81,
- // BuiltinOperator_REDUCE_MAX = 82,
- // BuiltinOperator_ONE_HOT = 85,
- // BuiltinOperator_LOGICAL_AND = 86,
- // BuiltinOperator_UNPACK = 88,
- // BuiltinOperator_REDUCE_MIN = 89,
- // BuiltinOperator_FLOOR_DIV = 90,
- // BuiltinOperator_REDUCE_ANY = 91,
- // BuiltinOperator_SQUARE = 92,
- // BuiltinOperator_ZEROS_LIKE = 93,
- // BuiltinOperator_FILL = 94,
- // BuiltinOperator_FLOOR_MOD = 95,
- // BuiltinOperator_RANGE = 96,
- // BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
- // BuiltinOperator_LEAKY_RELU = 98,
- // BuiltinOperator_SQUARED_DIFFERENCE = 99,
- // BuiltinOperator_MIRROR_PAD = 100,
- // BuiltinOperator_SPLIT_V = 102,
// BuiltinOperator_UNIQUE = 103,
- // BuiltinOperator_CEIL = 104,
- // BuiltinOperator_REVERSE_V2 = 105,
- // BuiltinOperator_ADD_N = 106,
- // BuiltinOperator_GATHER_ND = 107,
- // BuiltinOperator_WHERE = 109,
- // BuiltinOperator_RANK = 110,
- // BuiltinOperator_ELU = 111,
- // BuiltinOperator_REVERSE_SEQUENCE = 112,
- // BuiltinOperator_MATRIX_DIAG = 113,
// BuiltinOperator_QUANTIZE = 114,
- // BuiltinOperator_MATRIX_SET_DIAG = 115,
- // BuiltinOperator_ROUND = 116,
// BuiltinOperator_HARD_SWISH = 117,
- // BuiltinOperator_IF = 118,
- // BuiltinOperator_WHILE = 119,
// BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120,
// BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121,
- // BuiltinOperator_SCATTER_ND = 122,
- // BuiltinOperator_SELECT_V2 = 123,
// BuiltinOperator_DENSIFY = 124,
- // BuiltinOperator_SEGMENT_SUM = 125,
- // BuiltinOperator_BATCH_MATMUL = 126,
- // BuiltinOperator_INSTANCE_NORM = 254,
}
} // namespace luci
diff --git a/compiler/luci/import/src/Importer.cpp b/compiler/luci/import/src/Importer.cpp
index 964c47633..ab89f3587 100644
--- a/compiler/luci/import/src/Importer.cpp
+++ b/compiler/luci/import/src/Importer.cpp
@@ -15,6 +15,7 @@
*/
#include "luci/Importer.h"
+#include "PostImport.h"
#include "luci/Import/GraphBuilder.h"
#include "luci/Import/GraphBuilderContext.h"
@@ -27,6 +28,7 @@
#include <luci/Log.h>
#include <luci/LogHelper.h>
+#include <oops/InternalExn.h>
#include <oops/UserExn.h>
#include <memory>
@@ -40,11 +42,28 @@ void convert_graph(const luci::GraphBuilderSource &source, luci::CircleReader &r
LOGGER(l);
auto nodefinder = std::make_unique<luci::IndexNodeFinder>();
+ auto tensoroutputs = std::make_unique<luci::IndexTensorOutputs>();
- luci::GraphBuilderContext gb_context(graph, &reader, nodefinder.get());
+ luci::GraphBuilderContext gb_context(graph, &reader, nodefinder.get(), tensoroutputs.get());
const auto &operators = reader.operators();
const auto &tensors = reader.tensors();
+ auto tensors_ptr = reader.tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ // build a cache to identify if a tensor is output of an operator
+ // if this is set, we should not create a CircleConst for this tensor
+ for (uint32_t i = 0; i < operators.size(); ++i)
+ {
+ const circle::OperatorT &op = *operators[i];
+ const auto &outputs = op.outputs;
+
+ for (uint32_t j = 0; j < outputs.size(); ++j)
+ {
+ auto tidx = outputs[j];
+ tensoroutputs->enroll(tidx);
+ }
+ }
// graph inputs; there are no input nodes in TFlite but just Tensors
// creating virtual input nodes will make possible to connect nodes that uses them
@@ -55,51 +74,43 @@ void convert_graph(const luci::GraphBuilderSource &source, luci::CircleReader &r
assert(input_node != nullptr);
const circle::TensorT &tensor = *tensors[input];
- auto tname = luci::tensor_name(tensor);
- input_node->name(tname);
- auto quantization = luci::tensor_quantization(tensor);
- if (quantization)
- {
- auto quantparam = luci::luci_quantparam(quantization);
- if (quantparam.get())
- input_node->quantparam(std::move(quantparam));
- }
+ luci::copy_tensor_attributes(tensor, input_node);
+ if (tensors_ptr->Get(input)->shape() == nullptr)
+ input_node->shape_status(luci::ShapeStatus::NOSHAPE);
+ else
+ input_node->shape_status(luci::ShapeStatus::VALID);
INFO(l) << "[luci] NodeFinder INPUT(" << input << ") = " << input_node << std::endl;
nodefinder->enroll(input, input_node);
- // Shape of Input
- const std::vector<int32_t> &input_dims = tensor.shape; // in NHWC
- input_node->rank(input_dims.size());
- for (uint32_t r = 0; r < input_dims.size(); ++r)
- input_node->dim(r) = loco::Dimension(input_dims[r]);
-
- // Data type of Input
- auto dtype = luci::luci_datatype(tensor.type);
- input_node->dtype(dtype);
+ // input_node is also an output to a tensor
+ tensoroutputs->enroll(input);
// Name
auto graph_input = graph->inputs()->create();
- graph_input->name(tname);
+ graph_input->name(input_node->name());
// Set GraphInputOutputIndex for graph
input_node->index(graph_input->index());
// Data type
- graph_input->dtype(dtype);
+ graph_input->dtype(input_node->dtype());
+
+ // Shape of GraphInput
+ auto input_shape = std::make_unique<loco::TensorShape>();
+ const std::vector<int32_t> &input_dims = tensor.shape; // in NHWC
+ input_shape->rank(input_dims.size());
+ for (uint32_t r = 0; r < input_dims.size(); ++r)
+ input_shape->dim(r) = loco::Dimension(input_dims[r]);
+ graph_input->shape(std::move(input_shape));
}
// Create CircleConst nodes for constant tensors.
- const auto &buffers = reader.buffers();
for (uint32_t i = 0; i < tensors.size(); ++i)
{
- const circle::TensorT &tensor = *tensors[i];
- const std::vector<uint8_t> &buffer = buffers[tensor.buffer]->data;
- if (!buffer.empty())
- {
- luci::CircleConst *const_node = luci::create_circleconst(&gb_context, i);
+ luci::CircleConst *const_node = luci::create_circleconst(&gb_context, i);
+ if (const_node != nullptr)
nodefinder->enroll(i, const_node);
- }
}
// Import the operators.
@@ -130,18 +141,38 @@ void convert_graph(const luci::GraphBuilderSource &source, luci::CircleReader &r
// graph outputs
for (auto output : reader.outputs())
{
+ const circle::TensorT &tensor = *tensors[output];
+
auto output_node = graph->nodes()->create<luci::CircleOutput>();
assert(output_node != nullptr);
- output_node->from(nodefinder->node(output));
+ auto output_from = nodefinder->node(output);
+ if (output_from != nullptr)
+ output_node->from(output_from);
+ else
+ {
+ // NOTE loco::Graph requires all input node(s) to a node should exist.
+ // Here, CircleOutput needs an input node.
+ // We add a dummy node to make it happy.
+ auto output_dummy = graph->nodes()->create<luci::CircleOutputDummy>();
+ assert(output_dummy != nullptr);
+ output_node->from(output_dummy);
+
+ luci::copy_tensor_attributes(tensor, output_dummy);
+ if (tensors_ptr->Get(output)->shape() == nullptr)
+ output_dummy->shape_status(luci::ShapeStatus::NOSHAPE);
+ else
+ output_dummy->shape_status(luci::ShapeStatus::VALID);
+ }
INFO(l) << "[luci] NodeFinder OUTPUT(" << output << ") = " << output_node << std::endl;
// set the graph output name and node object
- const circle::TensorT &tensor = *tensors[output];
auto graph_output = graph->outputs()->create();
std::string tname = luci::tensor_name(tensor);
graph_output->name("output_" + tname);
+ luci::copy_tensor_attributes(tensor, output_node);
+
// Set GraphInputOutputIndex for graph
output_node->index(graph_output->index());
@@ -195,8 +226,10 @@ std::unique_ptr<loco::Graph> Importer::import(const circle::Model *model) const
if (!reader.parse(model))
return nullptr;
- // TODO support multiple subgraph when Circle supports
- assert(reader.num_subgraph() == 1);
+ if (reader.num_subgraph() != 1)
+ {
+ INTERNAL_EXN("Use 'importModule()' for multiple subgraphs");
+ }
if (!reader.select_subgraph(0))
return nullptr;
@@ -204,11 +237,14 @@ std::unique_ptr<loco::Graph> Importer::import(const circle::Model *model) const
convert_graph(*source_ptr, reader, graph.get());
LOGGER(l);
- INFO(l) << fmt(graph.get());
+ VERBOSE(l, 3) << "--- graph dump begin -------------------------------------------";
+ VERBOSE(l, 3) << "Name: " << graph->name();
+ VERBOSE(l, 3) << fmt(graph.get());
+ VERBOSE(l, 3) << "--- graph dump end ---------------------------------------------";
assert(loco::valid(graph.get(), std::make_unique<ValidateCollector>()));
- return std::move(graph);
+ return graph;
}
std::unique_ptr<Module> Importer::importModule(const circle::Model *model) const
@@ -240,14 +276,19 @@ std::unique_ptr<Module> Importer::importModule(const circle::Model *model) const
convert_graph(*source_ptr, reader, graph.get());
LOGGER(l);
- INFO(l) << fmt(graph.get());
+ VERBOSE(l, 3) << "--- graph dump begin -------------------------------------------";
+ VERBOSE(l, 3) << "Name: " << graph->name();
+ VERBOSE(l, 3) << fmt(graph.get());
+ VERBOSE(l, 3) << "--- graph dump end ---------------------------------------------";
assert(loco::valid(graph.get(), std::make_unique<ValidateCollector>()));
module->add(std::move(graph));
}
- return std::move(module);
+ post_import_graph(module.get(), reader);
+
+ return module;
}
} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleAddN.cpp b/compiler/luci/import/src/Nodes/CircleAddN.cpp
new file mode 100644
index 000000000..2f1716e62
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleAddN.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleAddN.h"
+
+#include <luci/IR/Nodes/CircleAdd.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleAddNGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() < 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleAddNGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleAddN>(inputs.size());
+ for (uint32_t i = 0; i < inputs.size(); ++i)
+ {
+ node->inputs(i, inputs[i]);
+ }
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleArgMin.cpp b/compiler/luci/import/src/Nodes/CircleArgMin.cpp
new file mode 100644
index 000000000..4d85bbff0
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleArgMin.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleArgMin.h"
+
+#include <luci/IR/Nodes/CircleArgMin.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleArgMinGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleArgMinGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleArgMin>();
+ node->input(inputs[0]);
+ node->dimension(inputs[1]);
+
+ const auto *options = op.builtin_options.AsArgMinOptions();
+ node->output_type(luci_datatype(options->output_type));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp b/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp
new file mode 100644
index 000000000..7cc077ed6
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleBCQFullyConnected.h"
+
+#include <luci/IR/Nodes/CircleBCQFullyConnected.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleBCQFullyConnectedGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 5)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleBCQFullyConnectedGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleBCQFullyConnected>();
+
+ node->input(inputs[0]);
+ node->weights_scales(inputs[1]);
+ node->weights_binary(inputs[2]);
+ node->bias(inputs[3]);
+ node->weights_clusters(inputs[4]);
+
+ // TODO Find and move to appropriate place for setting optional input
+ if (auto bias = dynamic_cast<luci::CircleOutputExclude *>(node->bias()))
+ {
+ // bias is not used for type inference, but node itself should have a type
+ bias->dtype(loco::DataType::FLOAT32);
+
+ // bias is not used for shape inference
+ }
+
+ const auto *options = op.builtin_options.AsBCQFullyConnectedOptions();
+ node->weights_hidden_size(options->weights_hidden_size);
+ node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleBCQGather.cpp b/compiler/luci/import/src/Nodes/CircleBCQGather.cpp
new file mode 100644
index 000000000..c6d2ab559
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleBCQGather.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleBCQGather.h"
+
+#include <luci/IR/Nodes/CircleBCQGather.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleBCQGatherGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 4)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleBCQGatherGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleBCQGather>();
+
+ node->input_scales(inputs[0]);
+ node->input_binary(inputs[1]);
+ node->indices(inputs[2]);
+ node->input_clusters(inputs[3]);
+
+ const auto *options = op.builtin_options.AsBCQGatherOptions();
+ node->input_hidden_size(options->input_hidden_size);
+ node->axis(options->axis);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp b/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp
new file mode 100644
index 000000000..6026b2a72
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleBatchMatMul.h"
+
+#include <luci/IR/Nodes/CircleBatchMatMul.h>
+
+namespace luci
+{
+
+bool CircleBatchMatMulGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleBatchMatMulGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleBatchMatMul>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ const auto *options = op.builtin_options.AsBatchMatMulOptions();
+ node->adj_x(options->adjoint_lhs);
+ node->adj_y(options->adjoint_rhs);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleCast.cpp b/compiler/luci/import/src/Nodes/CircleCast.cpp
new file mode 100644
index 000000000..a4d09b505
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleCast.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleCast.h"
+
+#include <luci/IR/Nodes/CircleCast.h>
+
+#include <luci/UserSettings.h>
+#include <luci/Log.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleCastGraphBuilder::validate(const ValidateArgs &args) const
+{
+ LOGGER(l);
+
+ auto settings = luci::UserSettings::settings();
+
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 1)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ // NOTE real models do have type mismatch
+ const auto *options = args.op.builtin_options.AsCastOptions();
+ if (options != nullptr)
+ {
+ const auto &tensors = args.reader.tensors();
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ auto name = tensor_name(output_tensor);
+
+ const auto &tensor_in = tensors.at(inputs[0]);
+ if (tensor_in->type != options->in_data_type)
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ WARN(l) << "Warning: import Cast(" << name << ") dtype mismatch";
+ }
+ else
+ return false;
+ }
+ const auto &tensor_out = tensors.at(outputs[0]);
+ if (tensor_out->type != options->out_data_type)
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ WARN(l) << "Warning: import Cast(" << name << ") dtype mismatch";
+ }
+ else
+ return false;
+ }
+ }
+
+ return true;
+}
+
+CircleNode *CircleCastGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleCast>();
+ node->x(inputs[0]);
+
+ const auto *options = op.builtin_options.AsCastOptions();
+ if (options != nullptr)
+ {
+ node->in_data_type(luci_datatype(options->in_data_type));
+ node->out_data_type(luci_datatype(options->out_data_type));
+ }
+ else
+ {
+ node->in_data_type(inputs[0]->dtype());
+ node->out_data_type(loco::DataType::Unknown);
+ // type inference should use node->dtype() for Unknown
+ // export should use BuiltinOptions_NONE for Unknown
+ }
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleCeil.cpp b/compiler/luci/import/src/Nodes/CircleCeil.cpp
new file mode 100644
index 000000000..d3d6cd945
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleCeil.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleCeil.h"
+
+#include <luci/IR/Nodes/CircleCeil.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleCeilGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 1)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ // TODO dtype check
+
+ return true;
+}
+
+CircleNode *CircleCeilGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleCeil>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleConst.cpp b/compiler/luci/import/src/Nodes/CircleConst.cpp
index 1d798983b..7131dc115 100644
--- a/compiler/luci/import/src/Nodes/CircleConst.cpp
+++ b/compiler/luci/import/src/Nodes/CircleConst.cpp
@@ -24,6 +24,24 @@
#include <cassert>
+namespace
+{
+
+std::ostream &operator<<(std::ostream &os, const std::vector<int32_t> &vect)
+{
+ uint32_t seq = 0;
+ for (auto &v : vect)
+ {
+ if (seq)
+ os << ", ";
+ os << v;
+ seq++;
+ }
+ return os;
+}
+
+} // namespace
+
namespace luci
{
@@ -53,55 +71,73 @@ CircleConst *create_circleconst(GraphBuilderContext *context, int32_t tensor_ind
auto graph = context->graph();
auto reader = context->reader();
const auto &tensors = reader->tensors();
-
- // (1) create CircleConst
- auto const_node = graph->nodes()->create<CircleConst>();
const circle::TensorT &const_tensor = *tensors[tensor_index];
- const_node->name(tensor_name(const_tensor));
- auto quantization = luci::tensor_quantization(const_tensor);
- if (quantization)
+
+ const std::vector<uint8_t> &buffer = reader->buffers()[const_tensor.buffer]->data;
+ std::vector<int32_t> const_dims = const_tensor.shape; // in NHWC
+ if (const_dims.size() == 0 && buffer.empty())
{
- auto quantparam = luci::luci_quantparam(quantization);
- if (quantparam.get())
- const_node->quantparam(std::move(quantparam));
+ // unknown shape tensor
+ return nullptr;
}
- INFO(l) << "[luci] NodeFinder const_node(" << tensor_index << ") -> " << const_node << std::endl;
-
- // (2) set data_type to CircleConst
- const_node->dtype(luci_datatype(const_tensor.type));
+ // if tensor_index is used as output to some other operator, this is not a constant
+ auto tensoroutputs = context->tensoroutputs();
+ if (tensoroutputs->find(tensor_index))
+ {
+ // other operator output tensor
+ return nullptr;
+ }
- // (3) set shape to CicleConst
- std::vector<int32_t> const_dims = const_tensor.shape; // in NHWC
- const_node->rank(const_dims.size());
uint32_t num_elements = 1;
for (uint32_t r = 0; r < const_dims.size(); ++r)
{
- const_node->dim(r) = loco::Dimension(const_dims[r]);
num_elements = num_elements * const_dims[r];
}
- // (4) constant values from circle buffer
- const std::vector<uint8_t> &buffer = reader->buffers()[const_tensor.buffer]->data;
- if (buffer.empty())
- throw oops::UserExn("Empty buffer");
-
- switch (luci_datatype(const_tensor.type))
+ if (buffer.empty() && num_elements > 0)
{
- case loco::DataType::FLOAT32:
- copy_data<loco::DataType::FLOAT32>(buffer, num_elements, const_node);
- break;
-
- case loco::DataType::U8:
- copy_data<loco::DataType::U8>(buffer, num_elements, const_node);
- break;
-
- case loco::DataType::S32:
- copy_data<loco::DataType::S32>(buffer, num_elements, const_node);
- break;
+ // normal empty tensor
+ return nullptr;
+ }
- default:
- throw oops::UserExn("Unsupported tensor type", circle::EnumNameTensorType(const_tensor.type));
+ auto const_node = graph->nodes()->create<CircleConst>();
+ copy_tensor_attributes(const_tensor, const_node);
+ const_node->shape_status(luci::ShapeStatus::VALID);
+ INFO(l) << "[luci] NodeFinder const_node(" << tensor_index << ") -> " << const_node << " "
+ << const_dims << std::endl;
+ if (num_elements > 0)
+ {
+ switch (luci_datatype(const_tensor.type))
+ {
+ case loco::DataType::FLOAT32:
+ copy_data<loco::DataType::FLOAT32>(buffer, num_elements, const_node);
+ break;
+
+ case loco::DataType::U8:
+ copy_data<loco::DataType::U8>(buffer, num_elements, const_node);
+ break;
+
+ case loco::DataType::S16:
+ copy_data<loco::DataType::S16>(buffer, num_elements, const_node);
+ break;
+
+ case loco::DataType::S32:
+ copy_data<loco::DataType::S32>(buffer, num_elements, const_node);
+ break;
+
+ case loco::DataType::S64:
+ copy_data<loco::DataType::S64>(buffer, num_elements, const_node);
+ break;
+
+ case loco::DataType::BOOL:
+ copy_data<loco::DataType::BOOL>(buffer, num_elements, const_node);
+ break;
+
+ default:
+ throw oops::UserExn("Unsupported tensor type",
+ circle::EnumNameTensorType(const_tensor.type));
+ }
}
return const_node;
diff --git a/compiler/luci/import/src/Nodes/CircleConv2D.cpp b/compiler/luci/import/src/Nodes/CircleConv2D.cpp
index ec9dce0d2..42c5c265a 100644
--- a/compiler/luci/import/src/Nodes/CircleConv2D.cpp
+++ b/compiler/luci/import/src/Nodes/CircleConv2D.cpp
@@ -50,7 +50,8 @@ CircleNode *CircleConv2DGraphBuilder::build_node(const circle::OperatorT &op,
node->stride()->w(options->stride_w);
node->stride()->h(options->stride_h);
node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
- // FIXME Check dilation_w_factor, dilation_h_factor.
+ node->dilation()->w(options->dilation_w_factor);
+ node->dilation()->h(options->dilation_h_factor);
return node;
}
diff --git a/compiler/luci/import/src/Nodes/CircleCustom.cpp b/compiler/luci/import/src/Nodes/CircleCustom.cpp
new file mode 100644
index 000000000..d541ee87b
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleCustom.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleCustom.h"
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleCustomGraphBuilder::validate(const ValidateArgs &) const
+{
+ // DO NOTHING
+ return true;
+}
+
+void CircleCustomGraphBuilder::build(const circle::OperatorT &op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ // Create CircleCustom
+ const auto &opcodes = context->reader()->opcodes();
+ const uint32_t opcode_index = op.opcode_index;
+ const circle::OperatorCodeT &opcode = *opcodes[opcode_index];
+
+ auto *node = graph->nodes()->create<CircleCustom>(inputs.size());
+ uint32_t input_idx = 0;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ node->inputs(input_idx++, context->nodefinder()->node(input_tensor_index));
+ }
+ node->custom_options(std::vector<uint8_t>{op.custom_options.begin(), op.custom_options.end()});
+ node->custom_code(opcode.custom_code);
+ // Operator version of custom is always 1, so do nothing
+
+ uint32_t output_count = outputs.size();
+
+ assert(output_count > 0);
+ {
+ // Let's use attributes from output 0 for this node
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->dtype(luci_datatype(output_tensor.type));
+ }
+
+ // Create virtual outputs of Custom
+ for (uint32_t n = 0; n < output_count; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleCustomOut>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp b/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp
new file mode 100644
index 000000000..827b63468
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleDepthToSpace.h"
+
+#include <luci/IR/Nodes/CircleDepthToSpace.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleDepthToSpaceGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ const auto *options = args.op.builtin_options.AsDepthToSpaceOptions();
+
+ if (inputs.size() != 1)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[outputs[0]]->type != tensors[inputs[0]]->type)
+ {
+ return false;
+ }
+
+ if (options->block_size < 2)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleDepthToSpaceGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleDepthToSpace>();
+ node->input(inputs[0]);
+
+ const auto *options = op.builtin_options.AsDepthToSpaceOptions();
+ node->block_size(options->block_size);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp b/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp
index c6d3b1f1e..2b13f9ebb 100644
--- a/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp
+++ b/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp
@@ -52,7 +52,8 @@ CircleNode *CircleDepthwiseConv2DGraphBuilder::build_node(const circle::Operator
node->stride()->h(options->stride_h);
node->depthMultiplier(options->depth_multiplier);
node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
- // FIXME Check dilation_w_factor, dilation_h_factor.
+ node->dilation()->w(options->dilation_w_factor);
+ node->dilation()->h(options->dilation_h_factor);
return node;
}
diff --git a/compiler/luci/import/src/Nodes/CircleElu.cpp b/compiler/luci/import/src/Nodes/CircleElu.cpp
new file mode 100644
index 000000000..37a290cb1
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleElu.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleElu.h"
+
+#include <luci/IR/Nodes/CircleElu.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleEluGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 1)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT32:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensors[outputs[0]]->type != tensor->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleEluGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleElu>();
+ node->features(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleExp.cpp b/compiler/luci/import/src/Nodes/CircleExp.cpp
index 44fc93d09..a32851458 100644
--- a/compiler/luci/import/src/Nodes/CircleExp.cpp
+++ b/compiler/luci/import/src/Nodes/CircleExp.cpp
@@ -16,7 +16,7 @@
#include "luci/Import/Nodes/CircleExp.h"
-#include <luci/IR/Nodes/CircleAbs.h>
+#include <luci/IR/Nodes/CircleExp.h>
#include <loco.h>
@@ -50,7 +50,7 @@ CircleNode *CircleExpGraphBuilder::build_node(const circle::OperatorT &,
const std::vector<CircleNode *> &inputs,
loco::Graph *graph) const
{
- auto *node = graph->nodes()->create<CircleAbs>();
+ auto *node = graph->nodes()->create<CircleExp>();
node->x(inputs[0]);
return node;
diff --git a/compiler/luci/import/src/Nodes/CircleExpandDims.cpp b/compiler/luci/import/src/Nodes/CircleExpandDims.cpp
new file mode 100644
index 000000000..1cef67a83
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleExpandDims.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleExpandDims.h"
+
+#include <luci/IR/Nodes/CircleExpandDims.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleExpandDimsGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+
+ return tensors[inputs[1]]->type == circle::TensorType_INT32;
+}
+
+CircleNode *CircleExpandDimsGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleExpandDims>();
+ node->input(inputs[0]);
+ node->axis(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleFill.cpp b/compiler/luci/import/src/Nodes/CircleFill.cpp
new file mode 100644
index 000000000..6c3d3a247
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleFill.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleFill.h"
+
+#include <luci/IR/Nodes/CircleFill.h>
+
+namespace luci
+{
+
+bool CircleFillGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleFillGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleFill>();
+ node->dims(inputs[0]);
+ node->value(inputs[1]);
+
+ const auto *options = op.builtin_options.AsFillOptions();
+ (void)options;
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleFloor.cpp b/compiler/luci/import/src/Nodes/CircleFloor.cpp
new file mode 100644
index 000000000..302a9eae3
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleFloor.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleFloor.h"
+
+#include <luci/IR/Nodes/CircleFloor.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleFloorGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 1)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ // TODO dtype check
+
+ return true;
+}
+
+CircleNode *CircleFloorGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleFloor>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp b/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp
new file mode 100644
index 000000000..875197890
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleFloorDiv.h"
+
+#include <luci/IR/Nodes/CircleFloorDiv.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleFloorDivGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in_0 = tensors.at(inputs[0]);
+ const auto &tensor_in_1 = tensors.at(inputs[1]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+
+ if (tensor_in_0->type != tensor_in_1->type)
+ return false;
+
+ if (tensor_out->type != tensor_in_1->type)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleFloorDivGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleFloorDiv>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleFloorMod.cpp b/compiler/luci/import/src/Nodes/CircleFloorMod.cpp
new file mode 100644
index 000000000..3ccdce0cd
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleFloorMod.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleFloorMod.h"
+
+#include <luci/IR/Nodes/CircleFloorMod.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleFloorModGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in_0 = tensors.at(inputs[0]);
+ const auto &tensor_in_1 = tensors.at(inputs[1]);
+ if (tensor_in_0->type != tensor_in_1->type)
+ return false;
+
+ // TODO dtype check
+
+ return true;
+}
+
+CircleNode *CircleFloorModGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleFloorMod>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp b/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp
index 8f74fe9ce..8937e78f1 100644
--- a/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp
+++ b/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp
@@ -17,6 +17,7 @@
#include "luci/Import/Nodes/CircleFullyConnected.h"
#include <luci/IR/Nodes/CircleFullyConnected.h>
+#include <luci/IR/Nodes/CircleOutput.h>
#include <loco.h>
#include <oops/UserExn.h>
@@ -39,7 +40,16 @@ CircleNode *CircleFullyConnectedGraphBuilder::build_node(const circle::OperatorT
auto *node = graph->nodes()->create<CircleFullyConnected>();
node->input(inputs[0]);
node->weights(inputs[1]);
- node->bias(inputs[2]);
+ node->bias(inputs[2]); // bias is optional
+
+ // TODO Find and move to appropriate place for setting optional input
+ if (auto bias = dynamic_cast<luci::CircleOutputExclude *>(node->bias()))
+ {
+ // bias is not used for type inference, but node itself should have a type
+ bias->dtype(loco::DataType::FLOAT32);
+
+ // bias is not used for shape inference
+ }
const auto *options = op.builtin_options.AsFullyConnectedOptions();
node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
diff --git a/compiler/luci/import/src/Nodes/CircleGather.cpp b/compiler/luci/import/src/Nodes/CircleGather.cpp
new file mode 100644
index 000000000..1caa05ec2
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleGather.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleGather.h"
+
+#include <luci/IR/Nodes/CircleGather.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleGatherGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ const auto *options = args.op.builtin_options.AsGatherOptions();
+
+ int32_t axis = options->axis;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ if (axis < 0)
+ axis += inputs.size();
+
+ if (axis < 0)
+ return false;
+
+ // TODO do indices type check
+ // TODO do axis check when shape information is given
+
+ return true;
+}
+
+CircleNode *CircleGatherGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleGather>();
+
+ node->params(inputs[0]);
+ node->indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsGatherOptions();
+ node->axis(options->axis);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleGatherNd.cpp b/compiler/luci/import/src/Nodes/CircleGatherNd.cpp
new file mode 100644
index 000000000..621d4ae92
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleGatherNd.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleGatherNd.h"
+
+#include <luci/IR/Nodes/CircleGatherNd.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+#include <mio/circle/schema_generated.h>
+
+namespace luci
+{
+
+bool CircleGatherNdGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ auto &indices_tensor = args.reader.tensors()[inputs[1]];
+
+ if (!(indices_tensor->type == circle::TensorType::TensorType_INT32 ||
+ indices_tensor->type == circle::TensorType::TensorType_INT64))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleGatherNdGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleGatherNd>();
+
+ node->params(inputs[0]);
+ node->indices(inputs[1]);
+
+ // GatherNd options empty
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleGreater.cpp b/compiler/luci/import/src/Nodes/CircleGreater.cpp
new file mode 100644
index 000000000..88107589c
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleGreater.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleGreater.h"
+
+#include <luci/IR/Nodes/CircleGreater.h>
+
+#include <luci/UserSettings.h>
+#include <luci/Log.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleGreaterGraphBuilder::validate(const ValidateArgs &args) const
+{
+ LOGGER(l);
+
+ auto settings = luci::UserSettings::settings();
+
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[inputs[0]]->type != tensors[inputs[1]]->type)
+ return false;
+
+ // NOTE: real models do have output dtype NOT BOOL
+ if (tensors[outputs[0]]->type != circle::TensorType_BOOL)
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ auto name = tensor_name(output_tensor);
+ WARN(l) << "Warning: import Greater(" << name << ") output dtype is not boolean";
+ }
+ else
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleGreaterGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleGreater>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp b/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp
new file mode 100644
index 000000000..dff1510c5
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleGreaterEqual.h"
+
+#include <luci/IR/Nodes/CircleGreaterEqual.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleGreaterEqualGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[inputs[0]]->type != tensors[inputs[1]]->type)
+ {
+ return false;
+ }
+
+ return tensors[outputs[0]]->type == circle::TensorType::TensorType_BOOL;
+}
+
+CircleNode *CircleGreaterEqualGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleGreaterEqual>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleIf.cpp b/compiler/luci/import/src/Nodes/CircleIf.cpp
new file mode 100644
index 000000000..d6090640d
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleIf.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleIf.h"
+
+#include <luci/IR/Nodes/CircleIf.h>
+#include <luci/IR/Nodes/CircleIfOut.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleIfGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto *options = args.op.builtin_options.AsIfOptions();
+
+ if (inputs.size() < 2) // cond + input
+ return false;
+ if (args.op.outputs.size() < 1) // output
+ return false;
+
+ auto num_graphs = static_cast<int32_t>(args.reader.num_subgraph());
+ if (options->then_subgraph_index >= num_graphs)
+ return false;
+ if (options->else_subgraph_index >= num_graphs)
+ return false;
+
+ // input 0 should be BOOL type
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ if (tensor->type != circle::TensorType_BOOL)
+ return false;
+
+ const auto &shape = tensor->shape;
+ if (shape.size() != 1 && shape.size() != 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * @brief If Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleIfOut nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleIf --- Node ---
+ * \- Node ---
+ *
+ * will be created like this
+ *
+ * --- CircleIf --- CircleIfOut --- Node ---
+ * \- CircleIfOut --- Node ---
+ */
+
+void CircleIfGraphBuilder::build(const circle::OperatorT &op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ }
+
+ uint32_t input_count = inputs.size() - 1;
+ uint32_t output_count = outputs.size();
+
+ // Create CircleIf
+ CircleIf *node = graph->nodes()->create<CircleIf>(input_count, output_count);
+
+ node->cond(input_nodes[0]);
+ for (uint32_t idx = 0; idx < input_count; ++idx)
+ {
+ node->input(idx, input_nodes[idx + 1]);
+ }
+
+ const auto *options = op.builtin_options.AsIfOptions();
+ node->then_branch(options->then_subgraph_index);
+ node->else_branch(options->else_subgraph_index);
+
+ assert(outputs.size() > 0);
+ {
+ // Lets use name of output 0 as If name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for If itself but to virtual outputs
+ }
+
+ // Create virtual outputs of If
+ for (uint32_t n = 0; n < output_count; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleIfOut>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp b/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp
new file mode 100644
index 000000000..b95c54c89
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleInstanceNorm.h"
+
+#include <luci/IR/Nodes/CircleInstanceNorm.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleInstanceNormGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 3)
+ return false;
+
+ // TODO check dtypes
+
+ return true;
+}
+
+CircleNode *CircleInstanceNormGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleInstanceNorm>();
+ node->input(inputs[0]);
+ node->gamma(inputs[1]);
+ node->beta(inputs[2]);
+
+ const auto *options = op.builtin_options.AsInstanceNormOptions();
+ node->epsilon(options->epsilon);
+ node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp b/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp
new file mode 100644
index 000000000..fe10a8572
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleL2Normalize.h"
+
+#include <luci/IR/Nodes/CircleL2Normalize.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleL2NormalizeGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 1)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleL2NormalizeGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleL2Normalize>();
+ node->x(inputs[0]);
+ const auto *options = op.builtin_options.AsL2NormOptions();
+ node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp b/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp
new file mode 100644
index 000000000..023206695
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleL2Pool2D.h"
+
+#include <luci/IR/Nodes/CircleL2Pool2D.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleL2Pool2DGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ // TODO check dtypes
+
+ return true;
+}
+
+CircleNode *CircleL2Pool2DGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleL2Pool2D>();
+ node->value(inputs[0]);
+
+ const auto *options = op.builtin_options.AsPool2DOptions();
+ node->padding(luci_padding(options->padding));
+ node->stride()->w(options->stride_w);
+ node->stride()->h(options->stride_h);
+ node->filter()->w(options->filter_width);
+ node->filter()->h(options->filter_height);
+ node->fusedActivationFunction(luci_actfunc(options->fused_activation_function));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp b/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp
new file mode 100644
index 000000000..4957ceae0
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLeakyRelu.h"
+
+#include <luci/IR/Nodes/CircleLeakyRelu.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLeakyReluGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleLeakyReluGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLeakyRelu>();
+ node->features(inputs[0]);
+
+ const auto *options = op.builtin_options.AsLeakyReluOptions();
+ node->alpha(options->alpha);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLess.cpp b/compiler/luci/import/src/Nodes/CircleLess.cpp
new file mode 100644
index 000000000..40ad28c6e
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLess.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLess.h"
+
+#include <luci/IR/Nodes/CircleLess.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLessGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_INT32:
+ case circle::TensorType_UINT8:
+ case circle::TensorType_INT16:
+ case circle::TensorType_INT8:
+ case circle::TensorType_INT64:
+ case circle::TensorType_FLOAT16:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensors[inputs[1]]->type != tensor->type)
+ {
+ return false;
+ }
+
+ return tensors[outputs[0]]->type == circle::TensorType_BOOL;
+}
+
+CircleNode *CircleLessGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLess>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLessEqual.cpp b/compiler/luci/import/src/Nodes/CircleLessEqual.cpp
new file mode 100644
index 000000000..13e995069
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLessEqual.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLessEqual.h"
+
+#include <luci/IR/Nodes/CircleLessEqual.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLessEqualGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[inputs[0]]->type != tensors[inputs[1]]->type)
+ {
+ return false;
+ }
+
+ return tensors[outputs[0]]->type == circle::TensorType::TensorType_BOOL;
+}
+
+CircleNode *CircleLessEqualGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLessEqual>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp b/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp
new file mode 100644
index 000000000..7b1f0db56
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLocalResponseNormalization.h"
+
+#include <luci/IR/Nodes/CircleLocalResponseNormalization.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLocalResponseNormalizationGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ // TODO do attribute checks
+
+ return true;
+}
+
+CircleNode *CircleLocalResponseNormalizationGraphBuilder::build_node(
+ const circle::OperatorT &op, const std::vector<CircleNode *> &inputs, loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLocalResponseNormalization>();
+ node->input(inputs[0]);
+
+ const auto *options = op.builtin_options.AsLocalResponseNormalizationOptions();
+ node->radius(options->radius);
+ node->bias(options->bias);
+ node->alpha(options->alpha);
+ node->beta(options->beta);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLog.cpp b/compiler/luci/import/src/Nodes/CircleLog.cpp
new file mode 100644
index 000000000..21408327d
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLog.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLog.h"
+
+#include <luci/IR/Nodes/CircleLog.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLogGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ // input type check
+ // Must be one of bfloat16, half, float32, float64, complex64, complex128.
+ // Currently circle supports half(float16), float32, float64, complex64.
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_COMPLEX64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleLogGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLog>();
+ node->x(inputs[0]);
+
+ // No options for Log
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp b/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp
new file mode 100644
index 000000000..e738c4a0c
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLogSoftmax.h"
+
+#include <luci/IR/Nodes/CircleLogSoftmax.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLogSoftmaxGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ // TODO do attribute checks
+
+ return true;
+}
+
+CircleNode *CircleLogSoftmaxGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLogSoftmax>();
+ node->logits(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp b/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp
new file mode 100644
index 000000000..8509dbaf3
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLogicalAnd.h"
+
+#include <luci/IR/Nodes/CircleLogicalAnd.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLogicalAndGraphBuilder::validate(const ValidateArgs &args) const
+{
+ // Only BOOL type is allowed for inputs
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 2)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ for (auto input : inputs)
+ {
+ const auto &tensor = tensors.at(input);
+ if (tensor->type != circle::TensorType::TensorType_BOOL)
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleLogicalAndGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLogicalAnd>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleLogistic.cpp b/compiler/luci/import/src/Nodes/CircleLogistic.cpp
new file mode 100644
index 000000000..85e7e55b2
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleLogistic.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleLogistic.h"
+
+#include <luci/IR/Nodes/CircleLogistic.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleLogisticGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+ const auto &outputs = args.op.outputs;
+ if (outputs.size() != 1)
+ return false;
+
+ // Must be one of the following types
+ // float16, float32, float64, complex64, or complex128
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_COMPLEX64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleLogisticGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleLogistic>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp b/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp
new file mode 100644
index 000000000..f4ae03c58
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleMatrixDiag.h"
+
+#include <luci/IR/Nodes/CircleMatrixDiag.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleMatrixDiagGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 1)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ if (tensors[outputs[0]]->type != tensor->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleMatrixDiagGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleMatrixDiag>();
+ node->diagonal(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp b/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp
new file mode 100644
index 000000000..d6f6aee33
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleMatrixSetDiag.h"
+
+#include <luci/IR/Nodes/CircleMatrixSetDiag.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleMatrixSetDiagGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ if (tensors[outputs[0]]->type != tensor->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleMatrixSetDiagGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleMatrixSetDiag>();
+ node->input(inputs[0]);
+ node->diagonal(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleMaximum.cpp b/compiler/luci/import/src/Nodes/CircleMaximum.cpp
new file mode 100644
index 000000000..6ca7e4079
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleMaximum.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleMaximum.h"
+
+#include <luci/IR/Nodes/CircleMaximum.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleMaximumGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensors[inputs[1]]->type != tensor->type)
+ return false;
+
+ if (tensors[outputs[0]]->type != tensor->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleMaximumGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleMaximum>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleMinimum.cpp b/compiler/luci/import/src/Nodes/CircleMinimum.cpp
new file mode 100644
index 000000000..b770f365f
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleMinimum.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleMinimum.h"
+
+#include <luci/IR/Nodes/CircleMinimum.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleMinimumGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensors[inputs[1]]->type != tensor->type)
+ return false;
+
+ if (tensors[outputs[0]]->type != tensor->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleMinimumGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleMinimum>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp b/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp
new file mode 100644
index 000000000..41b5e5d80
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleMirrorPad.h"
+
+#include <luci/IR/Nodes/CircleMirrorPad.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleMirrorPadGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ // TODO check others
+
+ return true;
+}
+
+CircleNode *CircleMirrorPadGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleMirrorPad>();
+ node->input(inputs[0]);
+ node->paddings(inputs[1]);
+
+ const auto *options = op.builtin_options.AsMirrorPadOptions();
+ node->mode(luci_mirrorpad_mode(options->mode));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleNeg.cpp b/compiler/luci/import/src/Nodes/CircleNeg.cpp
new file mode 100644
index 000000000..3d3079ca2
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleNeg.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleNeg.h"
+
+#include <luci/IR/Nodes/CircleNeg.h>
+
+#include <loco.h>
+
+namespace luci
+{
+bool CircleNegGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ // TODO Support type check
+ return true;
+}
+
+CircleNode *CircleNegGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleNeg>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleNotEqual.cpp b/compiler/luci/import/src/Nodes/CircleNotEqual.cpp
new file mode 100644
index 000000000..5b04856db
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleNotEqual.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleNotEqual.h"
+
+#include <luci/IR/Nodes/CircleNotEqual.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleNotEqualGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ {
+ return false;
+ }
+
+ if (outputs.size() != 1)
+ {
+ return false;
+ }
+
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[inputs[0]]->type != tensors[inputs[1]]->type)
+ {
+ return false;
+ }
+
+ return tensors[outputs[0]]->type == circle::TensorType::TensorType_BOOL;
+}
+
+CircleNode *CircleNotEqualGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleNotEqual>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleOneHot.cpp b/compiler/luci/import/src/Nodes/CircleOneHot.cpp
new file mode 100644
index 000000000..9fdbfa84d
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleOneHot.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleOneHot.h"
+
+#include <luci/IR/Nodes/CircleOneHot.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleOneHotGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ const auto *options = args.op.builtin_options.AsOneHotOptions();
+
+ // Only 4 Input come refered from
+ if (inputs.size() != 4)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &indices = tensors.at(inputs[0]);
+ const auto &depth = tensors.at(inputs[1]);
+ const auto &on_value = tensors.at(inputs[2]);
+ const auto &off_value = tensors.at(inputs[3]);
+
+ if (options->axis < -1 || options->axis > static_cast<int32_t>(indices->shape.size()))
+ return false;
+ if (depth->shape.size() != 0)
+ return false;
+ if (on_value->shape.size() != 0)
+ return false;
+ if (off_value->shape.size() != 0)
+ return false;
+ if (on_value->type != off_value->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleOneHotGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleOneHot>();
+
+ node->indices(inputs[0]);
+ node->depth(inputs[1]);
+ node->on_value(inputs[2]);
+ node->off_value(inputs[3]);
+
+ const auto *options = op.builtin_options.AsOneHotOptions();
+ node->axis(options->axis);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CirclePRelu.cpp b/compiler/luci/import/src/Nodes/CirclePRelu.cpp
new file mode 100644
index 000000000..0d87cd423
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CirclePRelu.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CirclePRelu.h"
+
+#include <luci/IR/Nodes/CirclePRelu.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CirclePReluGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CirclePReluGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CirclePRelu>();
+ node->input(inputs[0]);
+ node->alpha(inputs[1]);
+
+ // PRelu options are empty
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CirclePow.cpp b/compiler/luci/import/src/Nodes/CirclePow.cpp
new file mode 100644
index 000000000..ff9833165
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CirclePow.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CirclePow.h"
+
+#include <luci/IR/Nodes/CirclePow.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CirclePowGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CirclePowGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CirclePow>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ // Pow options are empty
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleRange.cpp b/compiler/luci/import/src/Nodes/CircleRange.cpp
new file mode 100644
index 000000000..c21191605
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleRange.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleRange.h"
+
+#include <luci/IR/Nodes/CircleRange.h>
+
+#include <loco.h>
+
+namespace luci
+{
+bool CircleRangeGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 3)
+ return false;
+
+ // TODO Support type check
+ return true;
+}
+
+CircleNode *CircleRangeGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleRange>();
+ node->start(inputs[0]);
+ node->limit(inputs[1]);
+ node->delta(inputs[2]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleRank.cpp b/compiler/luci/import/src/Nodes/CircleRank.cpp
new file mode 100644
index 000000000..705ae0120
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleRank.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleRank.h"
+
+#include <luci/IR/Nodes/CircleRank.h>
+
+#include <loco.h>
+
+namespace luci
+{
+bool CircleRankGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleRankGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleRank>();
+ node->input(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReduceAny.cpp b/compiler/luci/import/src/Nodes/CircleReduceAny.cpp
new file mode 100644
index 000000000..030c5304c
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReduceAny.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReduceAny.h"
+
+#include <luci/IR/Nodes/CircleReduceAny.h>
+
+namespace luci
+{
+
+bool CircleReduceAnyGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_0 = tensors.at(inputs[0]);
+ const auto &tensor_1 = tensors.at(inputs[1]);
+ const auto &tensor_o = tensors.at(outputs[0]);
+
+ if (tensor_0->type != circle::TensorType_BOOL)
+ return false;
+ if (tensor_o->type != circle::TensorType_BOOL)
+ return false;
+
+ switch (tensor_1->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleReduceAnyGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReduceAny>();
+ node->input(inputs[0]);
+ node->reduction_indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReducerOptions();
+ node->keep_dims(options->keep_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReduceMax.cpp b/compiler/luci/import/src/Nodes/CircleReduceMax.cpp
new file mode 100644
index 000000000..8ca8e2e34
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReduceMax.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReduceMax.h"
+
+#include <luci/IR/Nodes/CircleReduceMax.h>
+
+namespace luci
+{
+
+bool CircleReduceMaxGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_axis = tensors.at(inputs[1]);
+
+ switch (tensor_axis->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleReduceMaxGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReduceMax>();
+ node->input(inputs[0]);
+ node->reduction_indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReducerOptions();
+ node->keep_dims(options->keep_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReduceMin.cpp b/compiler/luci/import/src/Nodes/CircleReduceMin.cpp
new file mode 100644
index 000000000..3020c3778
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReduceMin.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReduceMin.h"
+
+#include <luci/IR/Nodes/CircleReduceMin.h>
+
+namespace luci
+{
+
+bool CircleReduceMinGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_axis = tensors.at(inputs[1]);
+
+ switch (tensor_axis->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleReduceMinGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReduceMin>();
+ node->input(inputs[0]);
+ node->reduction_indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReducerOptions();
+ node->keep_dims(options->keep_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReduceProd.cpp b/compiler/luci/import/src/Nodes/CircleReduceProd.cpp
new file mode 100644
index 000000000..2bb43f6ce
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReduceProd.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReduceProd.h"
+
+#include <luci/IR/Nodes/CircleReduceProd.h>
+
+namespace luci
+{
+
+bool CircleReduceProdGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 2)
+ return false;
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_1 = tensors.at(inputs[1]);
+
+ // TODO check input types
+
+ // Check for reduction_indices types
+ switch (tensor_1->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleReduceProdGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReduceProd>();
+ node->input(inputs[0]);
+ node->reduction_indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReducerOptions();
+ node->keep_dims(options->keep_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleRelu6.cpp b/compiler/luci/import/src/Nodes/CircleRelu6.cpp
new file mode 100644
index 000000000..5b443993b
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleRelu6.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleRelu6.h"
+
+#include <luci/IR/Nodes/CircleRelu6.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleRelu6GraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleRelu6GraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleRelu6>();
+ node->features(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp b/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp
new file mode 100644
index 000000000..edf662fb9
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReluN1To1.h"
+
+#include <luci/IR/Nodes/CircleReluN1To1.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleReluN1To1GraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ // TODO check dtypes
+
+ return true;
+}
+
+CircleNode *CircleReluN1To1GraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReluN1To1>();
+ node->features(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReshape.cpp b/compiler/luci/import/src/Nodes/CircleReshape.cpp
index c83f143a6..f72c152b1 100644
--- a/compiler/luci/import/src/Nodes/CircleReshape.cpp
+++ b/compiler/luci/import/src/Nodes/CircleReshape.cpp
@@ -66,7 +66,14 @@ CircleNode *CircleReshapeGraphBuilder::build_node(const circle::OperatorT &op,
if (shape_node == nullptr)
{
const auto *options = op.builtin_options.AsReshapeOptions();
- shape_node = create_shape_node(options->new_shape, graph);
+ if (options != nullptr)
+ shape_node = create_shape_node(options->new_shape, graph);
+ else
+ {
+ shape_node = graph->nodes()->create<CircleOutputDummy>();
+ shape_node->dtype(loco::DataType::S32);
+ shape_node->rank(0);
+ }
}
auto *node = graph->nodes()->create<CircleReshape>();
@@ -74,7 +81,8 @@ CircleNode *CircleReshapeGraphBuilder::build_node(const circle::OperatorT &op,
node->shape(shape_node);
const auto *options = op.builtin_options.AsReshapeOptions();
- setup_shape_attribute(options->new_shape, node);
+ if (options)
+ setup_shape_attribute(options->new_shape, node);
return node;
}
diff --git a/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp b/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp
new file mode 100644
index 000000000..6128f1b86
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleResizeBilinear.h"
+
+#include <luci/IR/Nodes/CircleConst.h>
+#include <luci/IR/Nodes/CircleResizeBilinear.h>
+
+namespace luci
+{
+
+bool CircleResizeBilinearGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleResizeBilinearGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleResizeBilinear>();
+ node->input(inputs[0]);
+ node->size(inputs[1]);
+
+ const auto *options = op.builtin_options.AsResizeBilinearOptions();
+ node->align_corners(options->align_corners);
+ node->half_pixel_centers(options->half_pixel_centers);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp b/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp
new file mode 100644
index 000000000..a1f1ef0ff
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleResizeNearestNeighbor.h"
+
+#include <luci/IR/Nodes/CircleConst.h>
+#include <luci/IR/Nodes/CircleResizeNearestNeighbor.h>
+
+namespace luci
+{
+
+bool CircleResizeNearestNeighborGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleResizeNearestNeighborGraphBuilder::build_node(
+ const circle::OperatorT &op, const std::vector<CircleNode *> &inputs, loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleResizeNearestNeighbor>();
+ node->input(inputs[0]);
+ node->size(inputs[1]);
+
+ const auto *options = op.builtin_options.AsResizeNearestNeighborOptions();
+ node->align_corners(options->align_corners);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp b/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp
new file mode 100644
index 000000000..72d3b153d
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReverseSequence.h"
+
+#include <luci/IR/Nodes/CircleReverseSequence.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleReverseSequenceGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in = tensors.at(inputs[0]);
+ const auto &tensor_lengths = tensors.at(inputs[1]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+
+ switch (tensor_lengths->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensor_in->type != tensor_out->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleReverseSequenceGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReverseSequence>();
+ node->input(inputs[0]);
+ node->seq_lengths(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReverseSequenceOptions();
+ node->seq_axis(options->seq_dim);
+ node->batch_axis(options->batch_dim);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleReverseV2.cpp b/compiler/luci/import/src/Nodes/CircleReverseV2.cpp
new file mode 100644
index 000000000..cd18128a7
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleReverseV2.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleReverseV2.h"
+
+#include <luci/IR/Nodes/CircleReverseV2.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleReverseV2GraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in = tensors.at(inputs[0]);
+ const auto &tensor_axis = tensors.at(inputs[1]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+
+ switch (tensor_axis->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensor_out->type != tensor_in->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleReverseV2GraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleReverseV2>();
+ node->tensor(inputs[0]);
+ node->axis(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleRound.cpp b/compiler/luci/import/src/Nodes/CircleRound.cpp
new file mode 100644
index 000000000..896489521
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleRound.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleRound.h"
+
+#include <luci/IR/Nodes/CircleRound.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleRoundGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 1)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ // Must be one of the following types
+ // bfloat16, half (float16), float32, float64, complex64, complex128
+ // Currently, circle supports float16, float32, complex64
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in = tensors.at(inputs[0]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+
+ switch (tensor_in->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensor_out->type != tensor_in->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleRoundGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleRound>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleScatterNd.cpp b/compiler/luci/import/src/Nodes/CircleScatterNd.cpp
new file mode 100644
index 000000000..adcaa0030
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleScatterNd.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleScatterNd.h"
+
+#include <luci/IR/Nodes/CircleScatterNd.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleScatterNdGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 3)
+ return false;
+
+ // indices must have the same type as shape
+ const auto &tensors = args.reader.tensors();
+
+ if (tensors[inputs[0]]->type != tensors[inputs[2]]->type)
+ return false;
+
+ // indices must be either int32 or int64
+ if (tensors[inputs[0]]->type != circle::TensorType_INT32 &&
+ tensors[inputs[0]]->type != circle::TensorType_INT64)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleScatterNdGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleScatterNd>();
+ node->indices(inputs[0]);
+ node->updates(inputs[1]);
+ node->shape(inputs[2]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp b/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp
new file mode 100644
index 000000000..1122bdca3
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSegmentSum.h"
+
+#include <luci/IR/Nodes/CircleSegmentSum.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSegmentSumGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_in = tensors.at(inputs[0]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+ const auto &tensor_ids = tensors.at(inputs[1]);
+
+ switch (tensor_ids->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ if (tensor_out->type != tensor_in->type)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleSegmentSumGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSegmentSum>();
+ node->input(inputs[0]);
+ node->segment_ids(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSelect.cpp b/compiler/luci/import/src/Nodes/CircleSelect.cpp
new file mode 100644
index 000000000..ff94212c3
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSelect.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSelect.h"
+
+#include <luci/IR/Nodes/CircleSelect.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSelectGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 3)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ if (tensor->type != circle::TensorType_BOOL)
+ return false;
+ // TODO check dtypes for input 1, 2
+
+ return true;
+}
+
+CircleNode *CircleSelectGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSelect>();
+ node->condition(inputs[0]);
+ node->t(inputs[1]);
+ node->e(inputs[2]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSelectV2.cpp b/compiler/luci/import/src/Nodes/CircleSelectV2.cpp
new file mode 100644
index 000000000..78b2e6459
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSelectV2.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSelectV2.h"
+
+#include <luci/IR/Nodes/CircleSelectV2.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSelectV2GraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 3)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &condition = tensors.at(inputs[0]);
+ if (condition->type != circle::TensorType_BOOL)
+ return false;
+
+ const auto &t = tensors.at(inputs[1]);
+ const auto &e = tensors.at(inputs[2]);
+ if (t->type != e->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSelectV2GraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSelectV2>();
+ node->condition(inputs[0]);
+ node->t(inputs[1]);
+ node->e(inputs[2]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleShape.cpp b/compiler/luci/import/src/Nodes/CircleShape.cpp
new file mode 100644
index 000000000..864b5eb51
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleShape.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleShape.h"
+
+#include <luci/IR/Nodes/CircleShape.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleShapeGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ if (inputs.size() != 1)
+ return false;
+ if (outputs.size() != 1)
+ return false;
+
+ // TODO check shape, dtype
+
+ return true;
+}
+
+CircleNode *CircleShapeGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleShape>();
+ node->input(inputs[0]);
+
+ const auto *options = op.builtin_options.AsShapeOptions();
+ node->out_type(luci_datatype(options->out_type));
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSin.cpp b/compiler/luci/import/src/Nodes/CircleSin.cpp
new file mode 100644
index 000000000..61d60c78f
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSin.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSin.h"
+
+#include <luci/IR/Nodes/CircleSin.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSinGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ // input type check
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ break;
+ // TODO support TensorType_COMPLEX64, complex128, bfloat16
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleSinGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSin>();
+ node->x(inputs[0]);
+
+ // No options for Sin
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSlice.cpp b/compiler/luci/import/src/Nodes/CircleSlice.cpp
new file mode 100644
index 000000000..313c35599
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSlice.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSlice.h"
+
+#include <luci/IR/Nodes/CircleSlice.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleSliceGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 3)
+ return false;
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ // TODO check shapes and types
+
+ return true;
+}
+
+CircleNode *CircleSliceGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSlice>();
+ node->input(inputs[0]);
+ node->begin(inputs[1]);
+ node->size(inputs[2]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp b/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp
new file mode 100644
index 000000000..f1361fb11
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSpaceToBatchND.h"
+
+#include <luci/IR/Nodes/CircleSpaceToBatchND.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleSpaceToBatchNDGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 3)
+ return false;
+
+ // input 1 and 2 should have INT32/INT64 type
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_1 = tensors.at(inputs[1]);
+ switch (tensor_1->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+ const auto &tensor_2 = tensors.at(inputs[2]);
+ switch (tensor_2->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ // Only support input shape dimension 3 and 4 only
+ const auto &tensor_0 = tensors.at(inputs[0]);
+ const auto t_0_s = tensor_0->shape.size();
+ if (t_0_s != 3 && t_0_s != 4)
+ return false;
+
+ // TODO check input shape
+
+ return true;
+}
+
+CircleNode *CircleSpaceToBatchNDGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSpaceToBatchND>();
+ node->input(inputs[0]);
+ node->block_shape(inputs[1]);
+ node->paddings(inputs[2]);
+
+ // No options for SpaceToBatchND
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp b/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp
new file mode 100644
index 000000000..b612c9a9a
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSpaceToDepth.h"
+
+#include <luci/IR/Nodes/CircleSpaceToDepth.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleSpaceToDepthGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+
+ // TODO do attribute checks
+
+ return true;
+}
+
+CircleNode *CircleSpaceToDepthGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSpaceToDepth>();
+ node->input(inputs[0]);
+
+ const auto *options = op.builtin_options.AsSpaceToDepthOptions();
+ node->block_size(options->block_size);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp b/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp
new file mode 100644
index 000000000..bfe790fc1
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSparseToDense.h"
+
+#include <luci/IR/Nodes/CircleSparseToDense.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSparseToDenseGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 4)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSparseToDenseGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSparseToDense>();
+ node->indices(inputs[0]);
+ node->output_shape(inputs[1]);
+ node->values(inputs[2]);
+ node->default_value(inputs[3]);
+
+ const auto *options = op.builtin_options.AsSparseToDenseOptions();
+ node->validate_indices(options->validate_indices);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSplit.cpp b/compiler/luci/import/src/Nodes/CircleSplit.cpp
new file mode 100644
index 000000000..07b6cc939
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSplit.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSplit.h"
+
+#include <luci/IR/Nodes/CircleSplit.h>
+#include <luci/IR/Nodes/CircleSplitOut.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleSplitGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ const auto *options = args.op.builtin_options.AsSplitOptions();
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (static_cast<int32_t>(outputs.size()) != options->num_splits)
+ return false;
+
+ // TODO check types
+
+ return true;
+}
+
+/**
+ * @brief Split Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleSplitOut nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleSplit --- FullyConnected ---
+ * \- FullyConnected ---
+ *
+ * will be created like this
+ *
+ * --- CircleSplit --- CircleSplitOut --- FullyConnected ---
+ * \- CircleSplitOut --- FullyConnected ---
+ */
+
+void CircleSplitGraphBuilder::build(const circle::OperatorT &op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ }
+
+ // Create CircleSplit
+ auto node = graph->nodes()->create<CircleSplit>();
+ node->split_dim(input_nodes[0]);
+ node->input(input_nodes[1]);
+
+ const auto *options = op.builtin_options.AsSplitOptions();
+ node->num_split(options->num_splits);
+
+ assert(outputs.size() > 0);
+ assert(int32_t(outputs.size()) == options->num_splits);
+ {
+ // Let's use name of output 0 as Split name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for Split itself but to virtual outputs
+ }
+
+ // Create virtual outputs of Split
+ for (int32_t n = 0; n < options->num_splits; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleSplitOut>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSplitV.cpp b/compiler/luci/import/src/Nodes/CircleSplitV.cpp
new file mode 100644
index 000000000..7c6e83e17
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSplitV.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSplitV.h"
+
+#include <luci/IR/Nodes/CircleSplitV.h>
+#include <luci/IR/Nodes/CircleSplitVOut.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleSplitVGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ const auto *options = args.op.builtin_options.AsSplitVOptions();
+
+ if (inputs.size() != 3)
+ return false;
+
+ if (static_cast<int32_t>(outputs.size()) != options->num_splits)
+ return false;
+
+ // TODO check types
+
+ return true;
+}
+
+/**
+ * @brief SplitV Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleSplitVOut nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleSplitV --- FullyConnected ---
+ * \- FullyConnected ---
+ *
+ * will be created like this
+ *
+ * --- CircleSplitV --- CircleSplitVOut --- FullyConnected ---
+ * \- CircleSplitVOut --- FullyConnected ---
+ */
+
+void CircleSplitVGraphBuilder::build(const circle::OperatorT &op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ }
+
+ // Create CircleSplitV
+ auto node = graph->nodes()->create<CircleSplitV>();
+ node->input(input_nodes[0]);
+ node->size_splits(input_nodes[1]);
+ node->split_dim(input_nodes[2]);
+
+ const auto *options = op.builtin_options.AsSplitVOptions();
+ node->num_split(options->num_splits);
+
+ assert(outputs.size() > 0);
+ assert(int32_t(outputs.size()) == options->num_splits);
+ {
+ // Let's use name of output 0 as Split name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for Split itself but to virtual outputs
+ }
+
+ // Create virtual outputs of Split
+ for (int32_t n = 0; n < options->num_splits; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleSplitVOut>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSqrt.cpp b/compiler/luci/import/src/Nodes/CircleSqrt.cpp
new file mode 100644
index 000000000..8a90f6691
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSqrt.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSqrt.h"
+
+#include <luci/IR/Nodes/CircleSqrt.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSqrtGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSqrtGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSqrt>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSquare.cpp b/compiler/luci/import/src/Nodes/CircleSquare.cpp
new file mode 100644
index 000000000..8398548b6
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSquare.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSquare.h"
+
+#include <luci/IR/Nodes/CircleSquare.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSquareGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+
+ // Must be one of the following types
+ // bfloat16, half (float16), float32, float64, complex64, complex128
+ // Currently, circle supports float16, float32, complex64
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_COMPLEX64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleSquareGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSquare>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp b/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp
new file mode 100644
index 000000000..93ce959e2
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSquaredDifference.h"
+
+#include <luci/IR/Nodes/CircleSquaredDifference.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleSquaredDifferenceGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ // Inputs must be one of the following types
+ // bfloat16, half(float16), float32, float64, int32, int64, complex64, complex128
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_FLOAT64:
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ case circle::TensorType_COMPLEX64:
+ break;
+ // TODO support bfloat16, complex128
+ default:
+ return false;
+ }
+
+ // Input types must match
+ if (tensors.at(inputs[0])->type != tensors.at(inputs[1])->type)
+ return false;
+
+ // Input and output types must match
+ if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSquaredDifferenceGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSquaredDifference>();
+ node->x(inputs[0]);
+ node->y(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSqueeze.cpp b/compiler/luci/import/src/Nodes/CircleSqueeze.cpp
new file mode 100644
index 000000000..a5252d0bb
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSqueeze.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSqueeze.h"
+
+#include <luci/IR/Nodes/CircleConst.h>
+#include <luci/IR/Nodes/CircleSqueeze.h>
+
+namespace luci
+{
+
+bool CircleSqueezeGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSqueezeGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSqueeze>();
+ node->input(inputs[0]);
+
+ const auto *options = op.builtin_options.AsSqueezeOptions();
+ assert(options);
+
+ node->squeeze_dims(options->squeeze_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp b/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp
new file mode 100644
index 000000000..95e446704
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleStridedSlice.h"
+
+#include <luci/IR/Nodes/CircleStridedSlice.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleStridedSliceGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 4)
+ return false;
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ // TODO check shapes and types
+
+ return true;
+}
+
+CircleNode *CircleStridedSliceGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleStridedSlice>();
+ node->input(inputs[0]);
+ node->begin(inputs[1]);
+ node->end(inputs[2]);
+ node->strides(inputs[3]);
+
+ const auto *options = op.builtin_options.AsStridedSliceOptions();
+ node->begin_mask(options->begin_mask);
+ node->end_mask(options->end_mask);
+ node->ellipsis_mask(options->ellipsis_mask);
+ node->new_axis_mask(options->new_axis_mask);
+ node->shrink_axis_mask(options->shrink_axis_mask);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleSum.cpp b/compiler/luci/import/src/Nodes/CircleSum.cpp
new file mode 100644
index 000000000..b4865de59
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleSum.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleSum.h"
+
+#include <luci/IR/Nodes/CircleSum.h>
+
+namespace luci
+{
+
+bool CircleSumGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 2)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleSumGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleSum>();
+ node->input(inputs[0]);
+ node->reduction_indices(inputs[1]);
+
+ const auto *options = op.builtin_options.AsReducerOptions();
+ node->keep_dims(options->keep_dims);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleTanh.cpp b/compiler/luci/import/src/Nodes/CircleTanh.cpp
new file mode 100644
index 000000000..8986378c4
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleTanh.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleTanh.h"
+
+#include <luci/IR/Nodes/CircleTanh.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleTanhGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ if (inputs.size() != 1)
+ return false;
+
+ // Must be one of the following types
+ // bfloat16, half (float16), float32, float64, complex64, complex128
+ // Currently, circle supports float16, float32, complex64
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_FLOAT16:
+ case circle::TensorType_FLOAT32:
+ case circle::TensorType_COMPLEX64:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+CircleNode *CircleTanhGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleTanh>();
+ node->x(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleTile.cpp b/compiler/luci/import/src/Nodes/CircleTile.cpp
new file mode 100644
index 000000000..91054ce7f
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleTile.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleTile.h"
+
+#include <luci/IR/Nodes/CircleTile.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleTileGraphBuilder::validate(const ValidateArgs &args) const
+{
+ auto inputs = args.op.inputs;
+ auto outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ // Multiples (inputs[1]) must be one of the following types
+ // int32, int64
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[1]);
+ switch (tensor->type)
+ {
+ case circle::TensorType_INT32:
+ case circle::TensorType_INT64:
+ break;
+ default:
+ return false;
+ }
+
+ // Type of input and output must be the same
+ if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleTileGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleTile>();
+ node->input(inputs[0]);
+ node->multiples(inputs[1]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleTopKV2.cpp b/compiler/luci/import/src/Nodes/CircleTopKV2.cpp
new file mode 100644
index 000000000..5c1051c43
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleTopKV2.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleTopKV2.h"
+
+#include <luci/IR/Nodes/CircleTopKV2.h>
+#include <luci/IR/Nodes/CircleTopKV2Out.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleTopKV2GraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 2)
+ return false;
+ if (outputs.size() != 2)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[1]);
+ if (tensor->type != circle::TensorType_INT32)
+ return false;
+
+ return true;
+}
+
+/**
+ * @brief TopKV2 Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleTopKV2Out nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleTopKV2--- FullyConnected ---
+ * \- FullyConnected ---
+ *
+ * will be created like this
+ *
+ * --- CircleTopKV2 --- CircleTopKV2Out --- FullyConnected ---
+ * \- CircleTopKV2Out --- FullyConnected ---
+ */
+
+void CircleTopKV2GraphBuilder::build(const circle::OperatorT &op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ }
+
+ // Create CircleTopKV2
+ auto node = graph->nodes()->create<CircleTopKV2>();
+ node->input(input_nodes[0]);
+ node->k(input_nodes[1]);
+
+ assert(outputs.size() == 2);
+ {
+ // Let's use name of output 0 as TopKV2 name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for TopKV2 itself but to virtual outputs
+ }
+
+ // Create virtual outputs of TopKV2
+ for (size_t n = 0; n < outputs.size(); ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleTopKV2Out>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp b/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp
new file mode 100644
index 000000000..7bdf46daa
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleTransposeConv.h"
+
+#include <luci/IR/Nodes/CircleTransposeConv.h>
+
+#include <loco.h>
+
+#include <cassert>
+
+namespace luci
+{
+
+bool CircleTransposeConvGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 3)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleTransposeConvGraphBuilder::build_node(const circle::OperatorT &op,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleTransposeConv>();
+
+ node->inputSizes(inputs[0]);
+ node->filter(inputs[1]);
+ node->outBackprop(inputs[2]);
+
+ const auto *options = op.builtin_options.AsTransposeConvOptions();
+ node->padding(luci_padding(options->padding));
+ node->stride()->w(options->stride_w);
+ node->stride()->h(options->stride_h);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleUnpack.cpp b/compiler/luci/import/src/Nodes/CircleUnpack.cpp
new file mode 100644
index 000000000..c4282e24f
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleUnpack.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleUnpack.h"
+
+#include <luci/IR/Nodes/CircleUnpack.h>
+#include <luci/IR/Nodes/CircleUnpackOut.h>
+
+#include <luci/UserSettings.h>
+#include <luci/Log.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleUnpackGraphBuilder::validate(const ValidateArgs &args) const
+{
+ LOGGER(l);
+
+ auto settings = luci::UserSettings::settings();
+
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+ const auto *options = args.op.builtin_options.AsUnpackOptions();
+
+ if (inputs.size() != 1)
+ return false;
+
+ // NOTE real models may have mismatch
+ if (static_cast<int32_t>(outputs.size()) != options->num)
+ {
+ if (settings->get(luci::UserSettings::Key::DisableValidation))
+ {
+ const auto &tensors = args.reader.tensors();
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ auto name = tensor_name(output_tensor);
+ WARN(l) << "Warning: import Unpack(" << name << ") 'num' is not same as outputs used";
+ }
+ else
+ return false;
+ }
+
+ if (options->num < 0)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor = tensors.at(inputs[0]);
+ const auto &shape = tensor->shape;
+ auto shape_size = static_cast<int32_t>(shape.size());
+ if (shape_size > 0)
+ {
+ // NOTE for unknown shape, shape_size is 0
+ if (options->axis < -shape_size || options->axis >= shape_size)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * @brief Unpack Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleUnpackOut nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleUnpack --- FullyConnected ---
+ * \- FullyConnected ---
+ *
+ * will be created like this
+ *
+ * --- CircleUnpack --- CircleUnpackOut --- FullyConnected ---
+ * \- CircleUnpackOut --- FullyConnected ---
+ */
+
+void CircleUnpackGraphBuilder::build(const circle::OperatorT &op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+ auto tensors_ptr = context->reader()->tensors_ptr();
+ assert(tensors_ptr != nullptr);
+
+ // NOTE Unpack has only one input so running a loop is not necessary
+ // This is provided as a reference for other Ops as a reference
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ input_nodes.push_back(context->nodefinder()->node(input_tensor_index));
+ }
+
+ // Create CircleUnpack
+ CircleUnpack *node = graph->nodes()->create<CircleUnpack>();
+ node->value(input_nodes[0]);
+
+ const auto *options = op.builtin_options.AsUnpackOptions();
+ node->num(options->num);
+ node->axis(options->axis);
+
+ assert(outputs.size() > 0);
+ {
+ // Let's use name of output 0 as Unpack name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for Unpack itself but to virtual outputs
+ }
+
+ // Create virtual outputs of Unpack
+ for (int32_t n = 0; n < options->num; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleUnpackOut>();
+ copy_tensor_attributes(output_tensor, nodeout);
+ // mark shape_status
+ if (tensors_ptr->Get(outputs[n])->shape() == nullptr)
+ nodeout->shape_status(ShapeStatus::NOSHAPE);
+ else
+ nodeout->shape_status(ShapeStatus::VALID);
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleWhere.cpp b/compiler/luci/import/src/Nodes/CircleWhere.cpp
new file mode 100644
index 000000000..a13c4d6c9
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleWhere.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleWhere.h"
+
+#include <luci/IR/Nodes/CircleWhere.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleWhereGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto &outputs = args.op.outputs;
+
+ if (inputs.size() != 1)
+ return false;
+
+ if (outputs.size() != 1)
+ return false;
+
+ const auto &tensors = args.reader.tensors();
+ const auto &tensor_condition = tensors.at(inputs[0]);
+ const auto &tensor_out = tensors.at(outputs[0]);
+
+ if (tensor_condition->type != circle::TensorType_BOOL)
+ return false;
+
+ if (tensor_out->type != circle::TensorType_INT64)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleWhereGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleWhere>();
+ node->condition(inputs[0]);
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleWhile.cpp b/compiler/luci/import/src/Nodes/CircleWhile.cpp
new file mode 100644
index 000000000..aead25071
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleWhile.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleWhile.h"
+
+#include <luci/IR/Nodes/CircleWhile.h>
+#include <luci/IR/Nodes/CircleWhileOut.h>
+
+#include <loco.h>
+#include <oops/UserExn.h>
+
+namespace luci
+{
+
+bool CircleWhileGraphBuilder::validate(const ValidateArgs &args) const
+{
+ const auto &inputs = args.op.inputs;
+ const auto *options = args.op.builtin_options.AsWhileOptions();
+
+ if (inputs.size() != args.op.outputs.size())
+ return false;
+
+ auto num_graphs = static_cast<int32_t>(args.reader.num_subgraph());
+ if (options->cond_subgraph_index >= num_graphs)
+ return false;
+ if (options->body_subgraph_index >= num_graphs)
+ return false;
+
+ return true;
+}
+
+/**
+ * @brief While Node builder
+ *
+ * @note Current loco does not provide multiple outputs
+ * We will create multiple CircleWhileOut nodes to emulate this
+ * For two outputs that may look like this
+ *
+ * --- CircleWhile --- Node ---
+ * \- Node ---
+ *
+ * will be created like this
+ *
+ * --- CircleWhile --- CircleWhileOut --- Node ---
+ * \- CircleWhileOut --- Node ---
+ */
+
+void CircleWhileGraphBuilder::build(const circle::OperatorT &op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ auto graph = context->graph();
+
+ const std::vector<int32_t> &inputs = op.inputs;
+ const std::vector<int32_t> &outputs = op.outputs;
+ const auto &tensors = context->reader()->tensors();
+ const auto &opcodes = context->reader()->opcodes();
+
+ std::vector<CircleNode *> input_nodes;
+ for (const int32_t input_tensor_index : inputs)
+ {
+ auto input_node = context->nodefinder()->node(input_tensor_index);
+ assert(input_node != nullptr);
+ input_nodes.push_back(input_node);
+ }
+
+ uint32_t input_count = inputs.size();
+ uint32_t output_count = outputs.size();
+
+ // Create CircleWhile
+ CircleWhile *node = graph->nodes()->create<CircleWhile>(input_count, output_count);
+
+ for (uint32_t idx = 0; idx < input_count; ++idx)
+ {
+ node->input(idx, input_nodes[idx]);
+ }
+
+ const auto *options = op.builtin_options.AsWhileOptions();
+ node->cond_branch(options->cond_subgraph_index);
+ node->body_branch(options->body_subgraph_index);
+
+ assert(outputs.size() > 0);
+ {
+ // Lets use name of output 0 as While name
+ const circle::TensorT &output_tensor = *tensors[outputs[0]];
+ node->name(tensor_name(output_tensor));
+ node->op_version(opcodes[op.opcode_index].get()->version);
+
+ // NOTE We don't set quantization for While itself but to virtual outputs
+ }
+
+ // Create virtual outputs of While
+ for (uint32_t n = 0; n < output_count; ++n)
+ {
+ const circle::TensorT &output_tensor = *tensors[outputs[n]];
+
+ auto *nodeout = graph->nodes()->create<CircleWhileOut>();
+
+ nodeout->input(node);
+ nodeout->index(n);
+
+ copy_tensor_attributes(output_tensor, nodeout);
+
+ // Note: leave shape_status to UNKNOWN to run shape inference
+
+ context->nodefinder()->enroll(outputs[n], nodeout);
+ }
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/Nodes/CircleZerosLike.cpp b/compiler/luci/import/src/Nodes/CircleZerosLike.cpp
new file mode 100644
index 000000000..4362925cd
--- /dev/null
+++ b/compiler/luci/import/src/Nodes/CircleZerosLike.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Import/Nodes/CircleZerosLike.h"
+
+#include <luci/IR/Nodes/CircleZerosLike.h>
+
+#include <loco.h>
+
+namespace luci
+{
+
+bool CircleZerosLikeGraphBuilder::validate(const ValidateArgs &args) const
+{
+ if (args.op.inputs.size() != 1)
+ return false;
+
+ if (args.op.outputs.size() != 1)
+ return false;
+
+ return true;
+}
+
+CircleNode *CircleZerosLikeGraphBuilder::build_node(const circle::OperatorT &,
+ const std::vector<CircleNode *> &inputs,
+ loco::Graph *graph) const
+{
+ auto *node = graph->nodes()->create<CircleZerosLike>();
+ node->input(inputs[0]);
+
+ // ZerosLikeOptinos are empty
+
+ return node;
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/PostImport.cpp b/compiler/luci/import/src/PostImport.cpp
new file mode 100644
index 000000000..f436b48e8
--- /dev/null
+++ b/compiler/luci/import/src/PostImport.cpp
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PostImport.h"
+
+#include "luci/Import/CircleReader.h"
+
+#include <luci/IR/CircleNodes.h>
+#include <luci/IR/CircleDialect.h>
+#include <luci/IR/CircleNodeVisitor.h>
+#include <luci/Log.h>
+
+#include <loco.h>
+#include <oops/InternalExn.h>
+
+namespace
+{
+
+/**
+ * @brief FixInterGraphNodes will fix inter graph connections for each Nodes
+ */
+class FixInterGraphNodes final : public luci::CircleNodeMutableVisitor<void>
+{
+public:
+ FixInterGraphNodes(const luci::Module *m, const luci::CircleReader &r) : _module(m), _reader(r) {}
+
+ /**
+ * @note This will set Graph* to every CircleIf nodes 'else' and 'then'
+ */
+ void visit(luci::CircleIf *node) final
+ {
+ LOGGER(l);
+ INFO(l) << "CircleIf " << node->name() << std::endl;
+
+ auto then_branch = node->then_branch();
+ auto else_branch = node->else_branch();
+ auto num_graphs = static_cast<int32_t>(_module->size());
+ (void)num_graphs;
+
+ assert(num_graphs > 0);
+ assert(then_branch >= 0 && then_branch < num_graphs);
+ assert(else_branch >= 0 && else_branch < num_graphs);
+
+ auto then_graph = _module->graph(then_branch);
+ auto else_graph = _module->graph(else_branch);
+ assert(then_graph != nullptr);
+ assert(else_graph != nullptr);
+
+ node->then_graph(then_graph);
+ node->else_graph(else_graph);
+ }
+
+ void visit(luci::CircleWhile *node) final
+ {
+ LOGGER(l);
+ INFO(l) << "CircleWhile " << node->name() << std::endl;
+
+ auto cond_branch = node->cond_branch();
+ auto body_branch = node->body_branch();
+ auto num_graphs = static_cast<int32_t>(_module->size());
+ (void)num_graphs;
+
+ assert(num_graphs > 0);
+ assert(cond_branch >= 0 && cond_branch < num_graphs);
+ assert(body_branch >= 0 && body_branch < num_graphs);
+
+ auto cond_graph = _module->graph(cond_branch);
+ auto body_graph = _module->graph(body_branch);
+ assert(cond_graph != nullptr);
+ assert(body_graph != nullptr);
+
+ node->cond_graph(cond_graph);
+ node->body_graph(body_graph);
+ }
+
+ void visit(luci::CircleNode *) final
+ {
+ // DO NOTHING
+ }
+
+private:
+ const luci::Module *_module;
+ const luci::CircleReader &_reader;
+};
+
+/**
+ * @brief FixInterGraph will fix inter graph connections
+ */
+class FixInterGraph final
+{
+public:
+ void run(loco::Graph *g, const luci::Module *m, const luci::CircleReader &r)
+ {
+ for (auto node : loco::postorder_traversal(loco::output_nodes(g)))
+ {
+ if (recognize(node->dialect()))
+ {
+ auto cn = loco::must_cast<luci::CircleNode *>(node);
+
+ fix(cn, m, r);
+ }
+ }
+ }
+
+private:
+ bool recognize(const loco::Dialect *dialect) { return (dialect == luci::CircleDialect::get()); }
+
+ void fix(luci::CircleNode *node, const luci::Module *module, const luci::CircleReader &reader)
+ {
+ FixInterGraphNodes fix(module, reader);
+ node->accept(&fix);
+ }
+};
+
+} // namespace
+
+namespace
+{
+/**
+ * @brief ValidateNodeProp will validate inter graph connections for each Nodes
+ */
+class ValidateNodeProp final : public luci::CircleNodeMutableVisitor<void>
+{
+public:
+ ValidateNodeProp(const luci::Module *m, const luci::CircleReader &r) : _module(m), _reader(r) {}
+
+ /**
+ * @note Validate CircleIf node 'else' and 'then' graph input/output count
+ * shape and type
+ */
+ void visit(luci::CircleIf *node) final
+ {
+ LOGGER(l);
+ INFO(l) << "CircleIf " << node->name() << std::endl;
+
+ auto then_graph = node->then_graph();
+ auto else_graph = node->else_graph();
+ assert(then_graph != nullptr);
+ assert(else_graph != nullptr);
+
+ // TODO support for differnt shape; but how?
+ // NODE Shape/Type inference assume below conditions
+
+ // Check both "then" and "else" subgraph outputs are same in count
+ auto then_outputs = loco::output_nodes(then_graph); // CircleOutput nodes
+ auto else_outputs = loco::output_nodes(else_graph);
+ if (then_outputs.size() != else_outputs.size())
+ {
+ INTERNAL_EXN("CircleIf THEN and ELSE Graph are not same in size");
+ }
+
+ // check outputs have same shape and dtype
+ auto then_graph_outputs = then_graph->outputs(); // loco::GraphOutput items
+ auto else_graph_outputs = else_graph->outputs();
+ for (size_t idx = 0; idx < then_outputs.size(); ++idx)
+ {
+ auto then_out = loco::must_cast<luci::CircleOutput *>(then_outputs.at(idx));
+ auto else_out = loco::must_cast<luci::CircleOutput *>(else_outputs.at(idx));
+
+ auto then_graph_output = then_graph_outputs->at(then_out->index());
+ auto else_graph_output = else_graph_outputs->at(else_out->index());
+ if (!(*then_graph_output->shape() == *else_graph_output->shape()))
+ {
+ INTERNAL_EXN_V("CircleIf THEN and ELSE Graph Output shape mismatch ", idx);
+ }
+ if (then_graph_output->dtype() != else_graph_output->dtype())
+ {
+ INTERNAL_EXN_V("CircleIf THEN and ELSE Graph Output type mismatch ", idx);
+ }
+ }
+ }
+
+ /**
+ * @note Validate CircleWhile node 'cond' and 'body' graph input/output count
+ * shape and type
+ */
+ void visit(luci::CircleWhile *node) final
+ {
+ LOGGER(l);
+ INFO(l) << "CircleWhile " << node->name() << std::endl;
+
+ auto cond_graph = node->cond_graph();
+ auto body_graph = node->body_graph();
+ assert(cond_graph != nullptr);
+ assert(body_graph != nullptr);
+
+ // Check input of "cond" and input/output of "body" subgraph have the same size
+ auto cond_inputs = loco::input_nodes(cond_graph);
+ auto cond_outputs = loco::output_nodes(cond_graph);
+ auto body_inputs = loco::input_nodes(body_graph);
+ auto body_outputs = loco::output_nodes(body_graph);
+ if (cond_inputs.size() != body_outputs.size())
+ {
+ INTERNAL_EXN("CircleWhile COND input and BODY output have different sizes");
+ }
+ if (cond_inputs.size() != body_inputs.size())
+ {
+ INTERNAL_EXN("CircleWhile COND input and BODY input have different sizes");
+ }
+ if (cond_outputs.size() != 1)
+ {
+ INTERNAL_EXN("CircleWhile COND output must have size 1");
+ }
+ auto cond_out = loco::must_cast<luci::CircleOutput *>(cond_outputs.at(0));
+ if (cond_out->dtype() != loco::DataType::BOOL)
+ {
+ INTERNAL_EXN("CircleWhile COND output must have bool type");
+ }
+
+ // input of "cond" and input/output of "body" subgraph must have the same shape and type
+ // First we compare input of "cond" with input of "body"
+ auto cond_graph_inputs = cond_graph->inputs();
+ auto body_graph_inputs = body_graph->inputs();
+ for (size_t idx = 0; idx < cond_inputs.size(); ++idx)
+ {
+ auto cond_in = loco::must_cast<luci::CircleInput *>(cond_inputs.at(idx));
+ auto body_in = loco::must_cast<luci::CircleInput *>(body_inputs.at(idx));
+
+ auto cond_graph_input = cond_graph_inputs->at(cond_in->index());
+ auto body_graph_input = body_graph_inputs->at(body_in->index());
+ if ((cond_in->rank() != body_in->rank()))
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY input shape mismatch ", idx);
+ }
+ if (cond_in->rank() > 0 && body_in->rank() > 0)
+ {
+ if (!(*cond_graph_input->shape() == *body_graph_input->shape()))
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY input shape mismatch ", idx);
+ }
+ }
+ if (cond_in->dtype() != body_in->dtype())
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY input type mismatch ", idx);
+ }
+ }
+
+ // Next we compare input of "cond" with output of "body"
+ auto body_graph_outputs = body_graph->outputs();
+ for (size_t idx = 0; idx < cond_inputs.size(); ++idx)
+ {
+ auto cond_in = loco::must_cast<luci::CircleInput *>(cond_inputs.at(idx));
+ auto body_out = loco::must_cast<luci::CircleOutput *>(body_outputs.at(idx));
+
+ auto cond_graph_input = cond_graph_inputs->at(cond_in->index());
+ auto body_graph_output = body_graph_outputs->at(body_out->index());
+ if ((cond_in->rank() != body_out->rank()))
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY output shape mismatch ", idx);
+ }
+ if (cond_in->rank() > 0 && body_out->rank() > 0)
+ {
+ if (!(*cond_graph_input->shape() == *body_graph_output->shape()))
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY output shape mismatch ", idx);
+ }
+ }
+ if (cond_in->dtype() != body_out->dtype())
+ {
+ INTERNAL_EXN_V("CircleWhile COND input and BODY output type mismatch ", idx);
+ }
+ }
+ }
+
+ void visit(luci::CircleNode *) final
+ {
+ // DO NOTHING
+ }
+
+private:
+ const luci::Module *_module;
+ const luci::CircleReader &_reader;
+};
+
+/**
+ * @brief ValidateGraphProp will validate inter graph node properties
+ */
+class ValidateGraphProp final
+{
+public:
+ void run(loco::Graph *g, const luci::Module *m, const luci::CircleReader &r)
+ {
+ for (auto node : loco::postorder_traversal(loco::output_nodes(g)))
+ {
+ if (recognize(node->dialect()))
+ {
+ auto cn = loco::must_cast<luci::CircleNode *>(node);
+
+ eval(cn, m, r);
+ }
+ }
+ }
+
+private:
+ bool recognize(const loco::Dialect *dialect) { return (dialect == luci::CircleDialect::get()); }
+
+ void eval(luci::CircleNode *node, const luci::Module *module, const luci::CircleReader &reader)
+ {
+ ValidateNodeProp val(module, reader);
+ node->accept(&val);
+ }
+};
+
+} // namespace
+
+namespace luci
+{
+
+/**
+ * @brief Do post import actions
+ */
+void post_import_graph(luci::Module *module, const luci::CircleReader &reader)
+{
+ LOGGER(l);
+
+ auto count = module->size();
+
+ for (size_t s = 0; s < count; ++s)
+ {
+ auto g = module->graph(s);
+ assert(g != nullptr);
+
+ INFO(l) << "--- FixInterGraph " << g->name() << "-------------------------";
+ FixInterGraph fix;
+ fix.run(g, module, reader);
+ }
+
+ for (size_t s = 0; s < count; ++s)
+ {
+ auto g = module->graph(s);
+ assert(g != nullptr);
+
+ INFO(l) << "--- ValidateGraphProp " << g->name() << "---------------------";
+ ValidateGraphProp prop;
+ prop.run(g, module, reader);
+ }
+
+ INFO(l) << "--- post_import_graph done -------------------------------------";
+}
+
+} // namespace luci
diff --git a/compiler/luci/import/src/PostImport.h b/compiler/luci/import/src/PostImport.h
new file mode 100644
index 000000000..c719c588a
--- /dev/null
+++ b/compiler/luci/import/src/PostImport.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_POST_IMPORT_H__
+#define __LUCI_POST_IMPORT_H__
+
+#include "luci/Import/CircleReader.h"
+
+#include "luci/IR/Module.h"
+
+namespace luci
+{
+
+/**
+ * @brief Do post import actions
+ */
+void post_import_graph(luci::Module *module, const luci::CircleReader &reader);
+
+} // namespace luci
+
+#endif // __LUCI_POST_IMPORT_H__
diff --git a/compiler/luci/lang/CMakeLists.txt b/compiler/luci/lang/CMakeLists.txt
index 564e777fb..32d0a890d 100644
--- a/compiler/luci/lang/CMakeLists.txt
+++ b/compiler/luci/lang/CMakeLists.txt
@@ -7,6 +7,7 @@ target_include_directories(luci_lang PRIVATE src)
target_include_directories(luci_lang PUBLIC include)
target_link_libraries(luci_lang PUBLIC loco)
target_link_libraries(luci_lang PUBLIC oops)
+target_link_libraries(luci_lang PRIVATE logo)
target_link_libraries(luci_lang PRIVATE nncc_common)
install(TARGETS luci_lang DESTINATION lib)
@@ -20,3 +21,4 @@ nnas_find_package(GTest REQUIRED)
GTest_AddTest(luci_lang_test ${TESTS})
target_include_directories(luci_lang_test PRIVATE src)
target_link_libraries(luci_lang_test luci_lang)
+target_link_libraries(luci_lang_test logo)
diff --git a/compiler/luci/lang/include/luci/IR/AttrDilation.h b/compiler/luci/lang/include/luci/IR/AttrDilation.h
new file mode 100644
index 000000000..c2b28d77d
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/AttrDilation.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_ATTRDILATION_H__
+#define __LUCI_IR_ATTRDILATION_H__
+
+#include <stdint.h>
+
+namespace luci
+{
+
+class Dilation final
+{
+public:
+ Dilation() : _w(1), _h(1) {}
+
+ int32_t w() const { return _w; }
+ void w(int32_t w) { _w = w; }
+
+ int32_t h() const { return _h; }
+ void h(int32_t h) { _h = h; }
+
+private:
+ int32_t _w;
+ int32_t _h;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_ATTRDILATION_H__
diff --git a/compiler/luci/lang/include/luci/IR/AttrMirrorPadMode.h b/compiler/luci/lang/include/luci/IR/AttrMirrorPadMode.h
new file mode 100644
index 000000000..7ca9d5d99
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/AttrMirrorPadMode.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_ATTR_MIRROR_PAD_MODE_H__
+#define __LUCI_IR_ATTR_MIRROR_PAD_MODE_H__
+
+namespace luci
+{
+
+enum class MirrorPadMode
+{
+ UNDEFINED, // This is not defined by Circle. This was added to prevent programming error.
+
+ REFLECT,
+ SYMMETRIC,
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_ATTR_MIRROR_PAD_MODE_H__
diff --git a/compiler/luci/lang/include/luci/IR/CircleNodeDecl.h b/compiler/luci/lang/include/luci/IR/CircleNodeDecl.h
index b87bdf9d0..967103e3c 100644
--- a/compiler/luci/lang/include/luci/IR/CircleNodeDecl.h
+++ b/compiler/luci/lang/include/luci/IR/CircleNodeDecl.h
@@ -17,8 +17,10 @@
#ifndef __LUCI_IR_CIRCLENODEDECL_H__
#define __LUCI_IR_CIRCLENODEDECL_H__
-#include <loco/IR/Node.h>
#include <loco/IR/Dialect.h>
+#include <loco/IR/Node.h>
+#include <loco/IR/NodeMixins.h>
+#include <luci/IR/PropertyShapeStatus.h>
#include "CircleOpcode.h"
#include "CircleNodeVisitor.forward.h"
@@ -31,7 +33,9 @@ namespace luci
using NodeName = std::string;
-struct CircleNode : public loco::Node
+struct CircleNode : public loco::Node,
+ public loco::NodeMixin<loco::NodeTrait::DataType>,
+ public loco::NodeMixin<loco::NodeTrait::TensorShape>
{
virtual ~CircleNode() = default;
@@ -50,9 +54,17 @@ struct CircleNode : public loco::Node
_quantparam = std::move(quantparam);
}
+ ShapeStatus shape_status(void) const { return _shape_status; }
+ void shape_status(ShapeStatus ss) { _shape_status = ss; }
+
+ int32_t op_version(void) const { return _op_version; }
+ void op_version(int32_t op_version) { _op_version = op_version; }
+
private:
NodeName _name;
std::unique_ptr<CircleQuantParam> _quantparam;
+ ShapeStatus _shape_status{ShapeStatus::UNDEFINED};
+ int32_t _op_version = 1;
};
template <CircleOpcode Code> struct CircleNodeImpl : public CircleNode
diff --git a/compiler/luci/lang/include/luci/IR/CircleNodeImpl.h b/compiler/luci/lang/include/luci/IR/CircleNodeImpl.h
index bdcfc9c9d..a6b9488db 100644
--- a/compiler/luci/lang/include/luci/IR/CircleNodeImpl.h
+++ b/compiler/luci/lang/include/luci/IR/CircleNodeImpl.h
@@ -18,7 +18,6 @@
#define __LUCI_IR_CIRCLENODEIMPL_H__
#include "CircleNodes.h"
-#include "CircleNodeVisitor.h"
#include <oops/InternalExn.h>
diff --git a/compiler/luci/lang/include/luci/IR/CircleNodes.h b/compiler/luci/lang/include/luci/IR/CircleNodes.h
index cc822842b..3b31149b2 100644
--- a/compiler/luci/lang/include/luci/IR/CircleNodes.h
+++ b/compiler/luci/lang/include/luci/IR/CircleNodes.h
@@ -19,42 +19,124 @@
#include "Nodes/CircleAbs.h"
#include "Nodes/CircleAdd.h"
+#include "Nodes/CircleAddN.h"
#include "Nodes/CircleArgMax.h"
+#include "Nodes/CircleArgMin.h"
#include "Nodes/CircleAveragePool2D.h"
+#include "Nodes/CircleBatchMatMul.h"
#include "Nodes/CircleBatchToSpaceND.h"
+#include "Nodes/CircleCast.h"
+#include "Nodes/CircleCeil.h"
#include "Nodes/CircleConcatenation.h"
#include "Nodes/CircleConst.h"
#include "Nodes/CircleConv2D.h"
#include "Nodes/CircleCos.h"
+#include "Nodes/CircleCustom.h"
+#include "Nodes/CircleDepthToSpace.h"
#include "Nodes/CircleDepthwiseConv2D.h"
#include "Nodes/CircleDiv.h"
+#include "Nodes/CircleElu.h"
#include "Nodes/CircleEqual.h"
#include "Nodes/CircleExp.h"
+#include "Nodes/CircleExpandDims.h"
+#include "Nodes/CircleFill.h"
+#include "Nodes/CircleFloor.h"
+#include "Nodes/CircleFloorDiv.h"
+#include "Nodes/CircleFloorMod.h"
#include "Nodes/CircleFullyConnected.h"
#include "Nodes/CircleGather.h"
+#include "Nodes/CircleGatherNd.h"
+#include "Nodes/CircleGreater.h"
+#include "Nodes/CircleGreaterEqual.h"
+#include "Nodes/CircleIf.h"
+#include "Nodes/CircleL2Normalize.h"
+#include "Nodes/CircleL2Pool2D.h"
+#include "Nodes/CircleLeakyRelu.h"
+#include "Nodes/CircleLess.h"
+#include "Nodes/CircleLessEqual.h"
+#include "Nodes/CircleLocalResponseNormalization.h"
+#include "Nodes/CircleLog.h"
+#include "Nodes/CircleLogicalAnd.h"
#include "Nodes/CircleLogicalNot.h"
#include "Nodes/CircleLogicalOr.h"
+#include "Nodes/CircleLogistic.h"
+#include "Nodes/CircleLogSoftmax.h"
+#include "Nodes/CircleMatrixDiag.h"
+#include "Nodes/CircleMatrixSetDiag.h"
#include "Nodes/CircleMaximum.h"
#include "Nodes/CircleMaxPool2D.h"
#include "Nodes/CircleMean.h"
+#include "Nodes/CircleMinimum.h"
+#include "Nodes/CircleMirrorPad.h"
#include "Nodes/CircleMul.h"
+#include "Nodes/CircleNeg.h"
+#include "Nodes/CircleNotEqual.h"
+#include "Nodes/CircleOneHot.h"
#include "Nodes/CirclePack.h"
#include "Nodes/CirclePad.h"
-#include "Nodes/CircleRelu6.h"
+#include "Nodes/CirclePow.h"
+#include "Nodes/CirclePRelu.h"
+#include "Nodes/CircleRange.h"
+#include "Nodes/CircleRank.h"
+#include "Nodes/CircleReduceAny.h"
+#include "Nodes/CircleReduceMax.h"
+#include "Nodes/CircleReduceMin.h"
+#include "Nodes/CircleReduceProd.h"
#include "Nodes/CircleRelu.h"
+#include "Nodes/CircleRelu6.h"
+#include "Nodes/CircleReluN1To1.h"
#include "Nodes/CircleReshape.h"
+#include "Nodes/CircleResizeBilinear.h"
+#include "Nodes/CircleResizeNearestNeighbor.h"
+#include "Nodes/CircleReverseSequence.h"
+#include "Nodes/CircleReverseV2.h"
+#include "Nodes/CircleRound.h"
#include "Nodes/CircleRsqrt.h"
+#include "Nodes/CircleScatterNd.h"
+#include "Nodes/CircleSegmentSum.h"
+#include "Nodes/CircleSelect.h"
+#include "Nodes/CircleSelectV2.h"
+#include "Nodes/CircleShape.h"
+#include "Nodes/CircleSin.h"
+#include "Nodes/CircleSlice.h"
#include "Nodes/CircleSoftmax.h"
+#include "Nodes/CircleSpaceToBatchND.h"
+#include "Nodes/CircleSpaceToDepth.h"
+#include "Nodes/CircleSparseToDense.h"
+#include "Nodes/CircleSplit.h"
+#include "Nodes/CircleSplitV.h"
#include "Nodes/CircleSqrt.h"
+#include "Nodes/CircleSquare.h"
#include "Nodes/CircleSquaredDifference.h"
+#include "Nodes/CircleSqueeze.h"
+#include "Nodes/CircleStridedSlice.h"
#include "Nodes/CircleSub.h"
-#include "Nodes/CircleTransposeConv.h"
+#include "Nodes/CircleSum.h"
+#include "Nodes/CircleTanh.h"
+#include "Nodes/CircleTile.h"
+#include "Nodes/CircleTopKV2.h"
#include "Nodes/CircleTranspose.h"
+#include "Nodes/CircleTransposeConv.h"
+#include "Nodes/CircleUnpack.h"
+#include "Nodes/CircleWhere.h"
+#include "Nodes/CircleWhile.h"
+#include "Nodes/CircleZerosLike.h"
// Circle only
+#include "Nodes/CircleBCQFullyConnected.h"
+#include "Nodes/CircleBCQGather.h"
#include "Nodes/CircleInstanceNorm.h"
// Virtual nodes
#include "Nodes/CircleInput.h"
#include "Nodes/CircleOutput.h"
+#include "Nodes/CircleCustomOut.h"
+#include "Nodes/CircleIfOut.h"
+#include "Nodes/CircleUnpackOut.h"
+#include "Nodes/CircleSplitOut.h"
+#include "Nodes/CircleSplitVOut.h"
+#include "Nodes/CircleTopKV2Out.h"
+#include "Nodes/CircleWhileOut.h"
+
+#include <loco/IR/Graph.h>
namespace luci
{
@@ -68,6 +150,18 @@ namespace luci
*/
void set_new_shape(CircleReshape *node, int32_t *base, uint32_t size);
+/// @brief Link GraphOutput with CircleOutput node
+void link(loco::GraphOutput *, CircleOutput *);
+
+/// @brief Link GraphInput with CircleInput node
+void link(loco::GraphInput *, CircleInput *);
+
+/// @brief Find a CircleOutput node with a given output index
+CircleOutput *output_node(loco::Graph *g, const loco::GraphOutputIndex &index);
+
+/// @brief Find a Pull node with a given input index
+CircleInput *input_node(loco::Graph *g, const loco::GraphInputIndex &index);
+
} // namespace luci
#endif // __LUCI_IR_CIRCLENODES_H__
diff --git a/compiler/luci/lang/include/luci/IR/CircleNodes.lst b/compiler/luci/lang/include/luci/IR/CircleNodes.lst
index ca3f7fb0f..488dcfb89 100644
--- a/compiler/luci/lang/include/luci/IR/CircleNodes.lst
+++ b/compiler/luci/lang/include/luci/IR/CircleNodes.lst
@@ -13,40 +13,121 @@
CIRCLE_NODE(ABS, luci::CircleAbs)
CIRCLE_NODE(ADD, luci::CircleAdd)
+CIRCLE_NODE(ADD_N, luci::CircleAddN)
CIRCLE_NODE(ARG_MAX, luci::CircleArgMax)
+CIRCLE_NODE(ARG_MIN, luci::CircleArgMin)
CIRCLE_NODE(AVERAGE_POOL_2D, luci::CircleAveragePool2D)
CIRCLE_NODE(BATCH_TO_SPACE_ND, luci::CircleBatchToSpaceND)
+CIRCLE_NODE(BATCHMATMUL, luci::CircleBatchMatMul)
+CIRCLE_NODE(CAST, luci::CircleCast)
+CIRCLE_NODE(CEIL, luci::CircleCeil)
CIRCLE_NODE(CONCATENATION, luci::CircleConcatenation)
CIRCLE_NODE(CONST, luci::CircleConst)
CIRCLE_NODE(CONV_2D, luci::CircleConv2D)
CIRCLE_NODE(COS, luci::CircleCos)
+CIRCLE_NODE(CUSTOM, luci::CircleCustom)
+CIRCLE_NODE(DEPTH_TO_SPACE, luci::CircleDepthToSpace)
CIRCLE_NODE(DEPTHWISE_CONV_2D, luci::CircleDepthwiseConv2D)
CIRCLE_NODE(DIV, luci::CircleDiv)
+CIRCLE_NODE(ELU, luci::CircleElu)
CIRCLE_NODE(EQUAL, luci::CircleEqual)
CIRCLE_NODE(EXP, luci::CircleExp)
+CIRCLE_NODE(EXPAND_DIMS, luci::CircleExpandDims)
+CIRCLE_NODE(FILL, luci::CircleFill)
+CIRCLE_NODE(FLOOR, luci::CircleFloor)
+CIRCLE_NODE(FLOOR_DIV, luci::CircleFloorDiv)
+CIRCLE_NODE(FLOOR_MOD, luci::CircleFloorMod)
CIRCLE_NODE(FULLY_CONNECTED, luci::CircleFullyConnected)
CIRCLE_NODE(GATHER, luci::CircleGather)
+CIRCLE_NODE(GATHER_ND, luci::CircleGatherNd)
+CIRCLE_NODE(GREATER, luci::CircleGreater)
+CIRCLE_NODE(GREATER_EQUAL, luci::CircleGreaterEqual)
+CIRCLE_NODE(IF, luci::CircleIf)
+CIRCLE_NODE(L2_NORMALIZATION, luci::CircleL2Normalize)
+CIRCLE_NODE(L2_POOL_2D, luci::CircleL2Pool2D)
+CIRCLE_NODE(LEAKY_RELU, luci::CircleLeakyRelu)
+CIRCLE_NODE(LESS, luci::CircleLess)
+CIRCLE_NODE(LESS_EQUAL, luci::CircleLessEqual)
+CIRCLE_NODE(LOCAL_RESPONSE_NORMALIZATION, luci::CircleLocalResponseNormalization)
+CIRCLE_NODE(LOG, luci::CircleLog)
+CIRCLE_NODE(LOGICAL_AND, luci::CircleLogicalAnd)
CIRCLE_NODE(LOGICAL_NOT, luci::CircleLogicalNot)
CIRCLE_NODE(LOGICAL_OR, luci::CircleLogicalOr)
-CIRCLE_NODE(MAXIMUM, luci::CircleMaximum)
+CIRCLE_NODE(LOGISTIC, luci::CircleLogistic)
+CIRCLE_NODE(LOG_SOFTMAX, luci::CircleLogSoftmax)
+CIRCLE_NODE(MATRIX_DIAG, luci::CircleMatrixDiag)
CIRCLE_NODE(MAX_POOL_2D, luci::CircleMaxPool2D)
+CIRCLE_NODE(MATRIX_SET_DIAG, luci::CircleMatrixSetDiag)
+CIRCLE_NODE(MAXIMUM, luci::CircleMaximum)
CIRCLE_NODE(MEAN, luci::CircleMean)
+CIRCLE_NODE(MINIMUM, luci::CircleMinimum)
+CIRCLE_NODE(MIRROR_PAD, luci::CircleMirrorPad)
CIRCLE_NODE(MUL, luci::CircleMul)
+CIRCLE_NODE(NEG, luci::CircleNeg)
+CIRCLE_NODE(NOT_EQUAL, luci::CircleNotEqual)
+CIRCLE_NODE(ONE_HOT, luci::CircleOneHot)
CIRCLE_NODE(PACK, luci::CirclePack)
CIRCLE_NODE(PAD, luci::CirclePad)
+CIRCLE_NODE(POW, luci::CirclePow)
+CIRCLE_NODE(PRELU, luci::CirclePRelu)
+CIRCLE_NODE(RANGE, luci::CircleRange)
+CIRCLE_NODE(RANK, luci::CircleRank)
+CIRCLE_NODE(REDUCE_ANY, luci::CircleReduceAny)
+CIRCLE_NODE(REDUCE_MAX, luci::CircleReduceMax)
+CIRCLE_NODE(REDUCE_MIN, luci::CircleReduceMin)
+CIRCLE_NODE(REDUCE_PROD, luci::CircleReduceProd)
CIRCLE_NODE(RELU, luci::CircleRelu)
CIRCLE_NODE(RELU6, luci::CircleRelu6)
+CIRCLE_NODE(RELU_N1_TO_1, luci::CircleReluN1To1)
CIRCLE_NODE(RESHAPE, luci::CircleReshape)
+CIRCLE_NODE(RESIZE_BILINEAR, luci::CircleResizeBilinear)
+CIRCLE_NODE(RESIZE_NEAREST_NEIGHBOR, luci::CircleResizeNearestNeighbor)
+CIRCLE_NODE(REVERSE_SEQUENCE, luci::CircleReverseSequence)
+CIRCLE_NODE(REVERSE_V2, luci::CircleReverseV2)
+CIRCLE_NODE(ROUND, luci::CircleRound)
CIRCLE_NODE(RSQRT, luci::CircleRsqrt)
+CIRCLE_NODE(SCATTER_ND, luci::CircleScatterNd)
+CIRCLE_NODE(SEGMENT_SUM, luci::CircleSegmentSum)
+CIRCLE_NODE(SELECT, luci::CircleSelect)
+CIRCLE_NODE(SELECT_V2, luci::CircleSelectV2)
+CIRCLE_NODE(SHAPE, luci::CircleShape)
+CIRCLE_NODE(SIN, luci::CircleSin)
+CIRCLE_NODE(SLICE, luci::CircleSlice)
CIRCLE_NODE(SOFTMAX, luci::CircleSoftmax)
+CIRCLE_NODE(SPACE_TO_BATCH_ND, luci::CircleSpaceToBatchND)
+CIRCLE_NODE(SPACE_TO_DEPTH, luci::CircleSpaceToDepth)
+CIRCLE_NODE(SPARSE_TO_DENSE, luci::CircleSparseToDense)
+CIRCLE_NODE(SPLIT, luci::CircleSplit)
+CIRCLE_NODE(SPLIT_V, luci::CircleSplitV)
CIRCLE_NODE(SQRT, luci::CircleSqrt)
+CIRCLE_NODE(SQUARE, luci::CircleSquare)
CIRCLE_NODE(SQUARED_DIFFERENCE, luci::CircleSquaredDifference)
+CIRCLE_NODE(SQUEEZE, luci::CircleSqueeze)
+CIRCLE_NODE(STRIDED_SLICE, luci::CircleStridedSlice)
CIRCLE_NODE(SUB, luci::CircleSub)
-// TODO TFLTanh
+CIRCLE_NODE(SUM, luci::CircleSum)
+CIRCLE_NODE(TANH, luci::CircleTanh)
+CIRCLE_NODE(TILE, luci::CircleTile)
+CIRCLE_NODE(TOPK_V2, luci::CircleTopKV2)
CIRCLE_NODE(TRANSPOSE, luci::CircleTranspose)
CIRCLE_NODE(TRANSPOSE_CONV, luci::CircleTransposeConv)
+CIRCLE_NODE(UNPACK, luci::CircleUnpack)
+CIRCLE_NODE(WHERE, luci::CircleWhere)
+CIRCLE_NODE(WHILE, luci::CircleWhile)
+CIRCLE_NODE(ZEROS_LIKE, luci::CircleZerosLike)
// Circle Only
+CIRCLE_NODE(BCQ_FULLY_CONNECTED, luci::CircleBCQFullyConnected)
+CIRCLE_NODE(BCQ_GATHER, luci::CircleBCQGather)
CIRCLE_NODE(INSTANCE_NORM, luci::CircleInstanceNorm)
// Virtual node(s)
CIRCLE_NODE(CIRCLEINPUT, luci::CircleInput)
CIRCLE_NODE(CIRCLEOUTPUT, luci::CircleOutput)
+CIRCLE_NODE(CIRCLEOUTPUTDUMMY, luci::CircleOutputDummy)
+CIRCLE_NODE(CIRCLEOUTPUTEXCLUDE, luci::CircleOutputExclude)
+CIRCLE_NODE(CIRCLECUSTOMOUT, luci::CircleCustomOut)
+CIRCLE_NODE(CIRCLEIFOUT, luci::CircleIfOut)
+CIRCLE_NODE(CIRCLESPLITOUT, luci::CircleSplitOut)
+CIRCLE_NODE(CIRCLESPLITVOUT, luci::CircleSplitVOut)
+CIRCLE_NODE(CIRCLETOPKV2OUT, luci::CircleTopKV2Out)
+CIRCLE_NODE(CIRCLEUNPACKOUT, luci::CircleUnpackOut)
+CIRCLE_NODE(CIRCLEWHILEOUT, luci::CircleWhileOut)
diff --git a/compiler/luci/lang/include/luci/IR/LuciNodeMixins.h b/compiler/luci/lang/include/luci/IR/LuciNodeMixins.h
index b18ac5dc4..c1bb0db11 100644
--- a/compiler/luci/lang/include/luci/IR/LuciNodeMixins.h
+++ b/compiler/luci/lang/include/luci/IR/LuciNodeMixins.h
@@ -22,6 +22,8 @@
#include <loco/IR/Node.h>
#include <loco/IR/NodeMixins.h>
+#include <vector>
+
namespace luci
{
@@ -70,6 +72,7 @@ template <unsigned N, typename Base> class FixedArityNode : public Base
public:
FixedArityNode()
{
+ _args.resize(N);
for (uint32_t n = 0; n < N; ++n)
{
_args[n] = std::make_unique<loco::Use>(this);
@@ -96,7 +99,7 @@ protected:
loco::Use *at(unsigned n) const { return _args.at(n).get(); }
private:
- std::array<std::unique_ptr<loco::Use>, N> _args;
+ std::vector<std::unique_ptr<loco::Use>> _args{};
};
} // namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleAddN.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleAddN.h
new file mode 100644
index 000000000..6ba4a96bc
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleAddN.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCEL_ADD_N_H__
+#define __LUCI_IR_CIRCEL_ADD_N_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/VariadicArityNode.h"
+
+namespace luci
+{
+
+/**
+ * @brief ADD_N in Circle
+ */
+class CircleAddN final : public VariadicArityNode<CircleNodeImpl<CircleOpcode::ADD_N>>
+{
+public:
+ CircleAddN(uint32_t arity) : VariadicArityNode<CircleNodeImpl<CircleOpcode::ADD_N>>(arity)
+ {
+ assert(arity >= 1);
+ }
+
+public:
+ Node *inputs(uint32_t index) const { return at(index)->node(); }
+ void inputs(uint32_t index, Node *node) { at(index)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCEL_ADD_N_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleArgMin.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleArgMin.h
new file mode 100644
index 000000000..8cb561983
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleArgMin.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCELARGMIN_H__
+#define __LUCI_IR_CIRCELARGMIN_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ARG_Min in Circle
+ */
+class CircleArgMin final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::ARG_MIN>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *dimension(void) const { return at(1)->node(); }
+ void dimension(loco::Node *node) { at(1)->node(node); }
+
+public:
+ loco::DataType output_type(void) const { return _output_type; }
+ void output_type(loco::DataType ot) { _output_type = ot; }
+
+private:
+ loco::DataType _output_type{loco::DataType::S64};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCELARGMIN_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQFullyConnected.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQFullyConnected.h
new file mode 100644
index 000000000..7d12d593a
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQFullyConnected.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEBCQFULLYCONNECTED_H__
+#define __LUCI_IR_CIRCLEBCQFULLYCONNECTED_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/AttrFusedActFunc.h"
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief BCQ_FULLY_CONNECTED in Circle
+ */
+class CircleBCQFullyConnected final
+ : public FixedArityNode<5, CircleNodeImpl<CircleOpcode::BCQ_FULLY_CONNECTED>>,
+ public LuciNodeMixin<LuciNodeTrait::FusedActFunc>,
+ public LuciNodeMixin<LuciNodeTrait::Bias>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *weights_scales(void) const { return at(1)->node(); }
+ void weights_scales(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *weights_binary(void) const { return at(2)->node(); }
+ void weights_binary(loco::Node *node) { at(2)->node(node); }
+
+ loco::Node *bias(void) const override { return at(3)->node(); }
+ void bias(loco::Node *node) override { at(3)->node(node); }
+
+ loco::Node *weights_clusters(void) const { return at(4)->node(); }
+ void weights_clusters(loco::Node *node) { at(4)->node(node); }
+
+public:
+ int32_t weights_hidden_size(void) const { return _weights_hidden_size; }
+ void weights_hidden_size(int32_t weights_hidden_size)
+ {
+ _weights_hidden_size = weights_hidden_size;
+ }
+
+private:
+ int32_t _weights_hidden_size = 0;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEBCQFULLYCONNECTED_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQGather.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQGather.h
new file mode 100644
index 000000000..f7638261d
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleBCQGather.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEBCQGATHER_H__
+#define __LUCI_IR_CIRCLEBCQGATHER_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief BCQ_GATHER in Circle
+ */
+class CircleBCQGather final : public FixedArityNode<4, CircleNodeImpl<CircleOpcode::BCQ_GATHER>>
+{
+public:
+ loco::Node *input_scales(void) const { return at(0)->node(); }
+ void input_scales(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *input_binary(void) const { return at(1)->node(); }
+ void input_binary(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *indices(void) const { return at(2)->node(); }
+ void indices(loco::Node *node) { at(2)->node(node); }
+
+ loco::Node *input_clusters(void) const { return at(3)->node(); }
+ void input_clusters(loco::Node *node) { at(3)->node(node); }
+
+public:
+ int32_t axis(void) const { return _axis; }
+ void axis(int32_t axis) { _axis = axis; }
+
+ int32_t input_hidden_size(void) const { return _input_hidden_size; }
+ void input_hidden_size(int32_t input_hidden_size) { _input_hidden_size = input_hidden_size; }
+
+private:
+ int32_t _axis = 0;
+ int32_t _input_hidden_size = 0;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEBCQGATHER_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleBatchMatMul.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleBatchMatMul.h
new file mode 100644
index 000000000..19999924e
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleBatchMatMul.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCELBATCHMATMUL_H__
+#define __LUCI_IR_CIRCELBATCHMATMUL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief BATCHMATMUL in Circle
+ */
+class CircleBatchMatMul final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::BATCHMATMUL>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool adj_x(void) const { return _adj_x; }
+ void adj_x(bool arg) { _adj_x = arg; }
+
+ bool adj_y(void) const { return _adj_y; }
+ void adj_y(bool arg) { _adj_y = arg; }
+
+private:
+ bool _adj_x = false;
+ bool _adj_y = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCELBATCHMATMUL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleCast.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleCast.h
new file mode 100644
index 000000000..9a89d0b2b
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleCast.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCELECAST_H__
+#define __LUCI_IR_CIRCELECAST_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief CAST in Circle
+ */
+class CircleCast final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CAST>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+public:
+ loco::DataType in_data_type(void) const { return _in_data_type; }
+ void in_data_type(loco::DataType it) { _in_data_type = it; }
+
+ loco::DataType out_data_type(void) const { return _out_data_type; }
+ void out_data_type(loco::DataType ot) { _out_data_type = ot; }
+
+private:
+ loco::DataType _in_data_type{loco::DataType::FLOAT32};
+ loco::DataType _out_data_type{loco::DataType::FLOAT32};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCELECAST_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleCeil.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleCeil.h
new file mode 100644
index 000000000..8a8715dcf
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleCeil.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_CEIL_H__
+#define __LUCI_IR_CIRCLE_CEIL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief CEIL in Circle
+ */
+class CircleCeil final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CEIL>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_CEIL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleConcatenation.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleConcatenation.h
index 8a6778a2f..dea1a4613 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleConcatenation.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleConcatenation.h
@@ -64,7 +64,7 @@ public:
void axis(int32_t axis) { _axis = axis; }
private:
- int32_t _axis;
+ int32_t _axis{0};
};
} // namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h
index 089836eb9..fc671746f 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
#include <loco/IR/DataTypeTraits.h>
@@ -32,9 +31,7 @@ namespace luci
* @brief Class to build tensor data
* @note This will not be exported as a specific op
*/
-class CircleConst final : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CONST>>,
- public loco::NodeMixin<loco::NodeTrait::DataType>,
- public loco::NodeMixin<loco::NodeTrait::TensorShape>
+class CircleConst final : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CONST>>
{
public:
CircleConst() = default;
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleConv2D.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleConv2D.h
index 54318e65c..13657cee4 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleConv2D.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleConv2D.h
@@ -22,6 +22,7 @@
#include "luci/IR/AttrPadding.h"
#include "luci/IR/AttrStride.h"
+#include "luci/IR/AttrDilation.h"
#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
@@ -52,9 +53,13 @@ public:
const Stride *stride(void) const { return &_stride; }
Stride *stride(void) { return &_stride; }
+ const Dilation *dilation(void) const { return &_dilation; }
+ Dilation *dilation(void) { return &_dilation; }
+
private:
Padding _padding = Padding::UNDEFINED;
Stride _stride;
+ Dilation _dilation;
};
} // namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleCustom.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleCustom.h
new file mode 100644
index 000000000..6c722b766
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleCustom.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLECUSTOM_H__
+#define __LUCI_IR_CIRCLECUSTOM_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/VariadicArityNode.h"
+
+namespace luci
+{
+
+/**
+ * @brief CUSTOM in Circle
+ */
+class CircleCustom final : public VariadicArityNode<CircleNodeImpl<CircleOpcode::CUSTOM>>
+{
+public:
+ CircleCustom(uint32_t arity) : VariadicArityNode<CircleNodeImpl<CircleOpcode::CUSTOM>>(arity)
+ {
+ // TODO Support when arity is 0
+ assert(arity >= 1);
+ }
+
+public:
+ uint32_t numInputs(void) const { return arity(); }
+
+public:
+ Node *inputs(uint32_t index) const { return at(index)->node(); }
+ void inputs(uint32_t index, Node *node) { at(index)->node(node); }
+
+ const std::vector<uint8_t> &custom_options(void) const { return _custom_options; }
+ void custom_options(const std::vector<uint8_t> &custom_options)
+ {
+ _custom_options = custom_options;
+ }
+
+ const std::string &custom_code(void) const { return _custom_code; }
+ void custom_code(const std::string &custom_code) { _custom_code = custom_code; }
+
+private:
+ std::vector<uint8_t> _custom_options;
+ std::string _custom_code;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLECUSTOM_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleCustomOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleCustomOut.h
new file mode 100644
index 000000000..36b8e4aed
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleCustomOut.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_CUSTOMOUT_H__
+#define __LUCI_IR_CIRCLE_CUSTOMOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLECUSTOMOUT in Circle
+ */
+class CircleCustomOut final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLECUSTOMOUT>>
+{
+public:
+ CircleCustomOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_CUSTOMOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthToSpace.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthToSpace.h
new file mode 100644
index 000000000..e19282b97
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthToSpace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_DEPTHTOSPACE_H__
+#define __LUCI_IR_CIRCLE_DEPTHTOSPACE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief DEPTH_TO_SPACE in Circle
+ */
+class CircleDepthToSpace final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::DEPTH_TO_SPACE>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int block_size(void) const { return _block_size; }
+ void block_size(int block_size) { _block_size = block_size; }
+
+private:
+ int _block_size{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_DEPTHTOSPACE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthwiseConv2D.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthwiseConv2D.h
index 15ee62ba7..eb058cec1 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthwiseConv2D.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleDepthwiseConv2D.h
@@ -20,6 +20,7 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
+#include "luci/IR/AttrDilation.h"
#include "luci/IR/AttrFilter.h"
#include "luci/IR/AttrPadding.h"
#include "luci/IR/AttrStride.h"
@@ -57,10 +58,14 @@ public:
int32_t depthMultiplier(void) const { return _depth_multiplier; }
void depthMultiplier(int32_t arg) { _depth_multiplier = arg; }
+ const Dilation *dilation(void) const { return &_dilation; }
+ Dilation *dilation(void) { return &_dilation; }
+
private:
Padding _padding = Padding::UNDEFINED;
Stride _stride;
int32_t _depth_multiplier = 0;
+ Dilation _dilation;
};
} // namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleElu.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleElu.h
new file mode 100644
index 000000000..fbb2f3533
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleElu.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEELU_H__
+#define __LUCI_IR_CIRCLEELU_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ELU in Circle
+ */
+class CircleElu final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::ELU>>
+{
+public:
+ CircleElu() = default;
+
+public:
+ loco::Node *features(void) const { return at(0)->node(); }
+ void features(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEELU_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleExpandDims.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleExpandDims.h
new file mode 100644
index 000000000..f70219614
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleExpandDims.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEEXPAND_DIMS_H__
+#define __LUCI_IR_CIRCLEEXPAND_DIMS_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief EXPAND_DIMS in Circle
+ */
+class CircleExpandDims final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::EXPAND_DIMS>>
+{
+public:
+ CircleExpandDims() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *axis(void) const { return at(1)->node(); }
+ void axis(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEEXPAND_DIMS_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleFill.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleFill.h
new file mode 100644
index 000000000..bfc65274a
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleFill.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCEFILL_H__
+#define __LUCI_IR_CIRCEFILL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief FILL in Circle
+ */
+class CircleFill final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::FILL>>
+{
+public:
+ loco::Node *dims(void) const { return at(0)->node(); }
+ void dims(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *value(void) const { return at(1)->node(); }
+ void value(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCEFILL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleFloor.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloor.h
new file mode 100644
index 000000000..7e10547b6
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloor.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_FLOOR_H__
+#define __LUCI_IR_CIRCLE_FLOOR_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief FLOOR in Circle
+ */
+class CircleFloor final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::FLOOR>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_FLOOR_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorDiv.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorDiv.h
new file mode 100644
index 000000000..ba9db010c
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorDiv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_FLOOR_DIV_H__
+#define __LUCI_IR_CIRCLE_FLOOR_DIV_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief FLOOR_DIV in Circle
+ */
+class CircleFloorDiv final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::FLOOR_DIV>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_FLOOR_DIV_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorMod.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorMod.h
new file mode 100644
index 000000000..4d13717a0
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleFloorMod.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_FLOOR_MOD_H__
+#define __LUCI_IR_CIRCLE_FLOOR_MOD_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief FLOOR_MOD in Circle
+ */
+class CircleFloorMod final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::FLOOR_MOD>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_FLOOR_MOD_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleGather.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleGather.h
index 489596c04..1e8c4982a 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleGather.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleGather.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
@@ -32,11 +31,11 @@ namespace luci
class CircleGather final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::GATHER>>
{
public:
- loco::Node *input(void) const { return at(0)->node(); }
- void input(loco::Node *node) { at(0)->node(node); }
+ loco::Node *params(void) const { return at(0)->node(); }
+ void params(loco::Node *node) { at(0)->node(node); }
- loco::Node *positions(void) const { return at(1)->node(); }
- void positions(loco::Node *node) { at(1)->node(node); }
+ loco::Node *indices(void) const { return at(1)->node(); }
+ void indices(loco::Node *node) { at(1)->node(node); }
public:
int32_t axis(void) const { return _axis; }
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleGatherNd.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleGatherNd.h
new file mode 100644
index 000000000..3423a8216
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleGatherNd.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEGATHER_ND_H__
+#define __LUCI_IR_CIRCLEGATHER_ND_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief GATHER_ND in Circle
+ */
+class CircleGatherNd final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::GATHER_ND>>
+{
+public:
+ loco::Node *params(void) const { return at(0)->node(); }
+ void params(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *indices(void) const { return at(1)->node(); }
+ void indices(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEGATHER_ND_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleGreater.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleGreater.h
new file mode 100644
index 000000000..040a4e338
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleGreater.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_GREATER_H__
+#define __LUCI_IR_CIRCLE_GREATER_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Greater in Circle
+ */
+class CircleGreater final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::GREATER>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_GREATER_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleGreaterEqual.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleGreaterEqual.h
new file mode 100644
index 000000000..82bdab212
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleGreaterEqual.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_GREATEREQUAL_H__
+#define __LUCI_IR_CIRCLE_GREATEREQUAL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief GREATER EQUAL in Circle
+ */
+class CircleGreaterEqual final
+ : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::GREATER_EQUAL>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_GREATEREQUAL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleIf.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleIf.h
new file mode 100644
index 000000000..2f9eac211
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleIf.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_IF_H__
+#define __LUCI_IR_CIRCLE_IF_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/VariadicArityNode.h"
+
+#include <cassert>
+
+namespace luci
+{
+
+/**
+ * @brief IF in Circle
+ */
+class CircleIf final : public VariadicArityNode<CircleNodeImpl<CircleOpcode::IF>>
+{
+public:
+ CircleIf(uint32_t arity, uint32_t out)
+ : VariadicArityNode<CircleNodeImpl<CircleOpcode::IF>>(arity + 1), _output_count(out)
+ {
+ assert(arity > 0);
+ assert(out > 0);
+ }
+
+public:
+ uint32_t input_count(void) const { return arity() - 1; }
+ uint32_t output_count(void) const { return _output_count; }
+
+public:
+ Node *cond(void) const { return at(0)->node(); }
+ void cond(Node *node) { at(0)->node(node); }
+
+ Node *input(uint32_t index) const { return at(index + 1)->node(); }
+ void input(uint32_t index, Node *node) { at(index + 1)->node(node); }
+
+public:
+ int32_t then_branch(void) const { return _then_branch; }
+ void then_branch(int32_t then_branch) { _then_branch = then_branch; }
+
+ int32_t else_branch(void) const { return _else_branch; }
+ void else_branch(int32_t else_branch) { _else_branch = else_branch; }
+
+public:
+ loco::Graph *then_graph(void) const { return _then_graph; }
+ void then_graph(loco::Graph *then_graph) { _then_graph = then_graph; }
+
+ loco::Graph *else_graph(void) const { return _else_graph; }
+ void else_graph(loco::Graph *else_graph) { _else_graph = else_graph; }
+
+private:
+ uint32_t _output_count{0};
+ int32_t _then_branch{-1};
+ int32_t _else_branch{-1};
+
+ loco::Graph *_then_graph{nullptr};
+ loco::Graph *_else_graph{nullptr};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_IF_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleIfOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleIfOut.h
new file mode 100644
index 000000000..3654e943b
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleIfOut.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_IFOUT_H__
+#define __LUCI_IR_CIRCLE_IFOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLEIFOUT in Circle
+ */
+class CircleIfOut final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLEIFOUT>>
+{
+public:
+ CircleIfOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_IFOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleInput.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleInput.h
index 2c4d60253..4a7d36a4e 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleInput.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleInput.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
#include <loco/IR/DataTypeTraits.h>
@@ -33,9 +32,7 @@ namespace luci
* @brief CircleNode used for Input of the Graph
* @note This will not be exported as a specific op
*/
-class CircleInput final : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CIRCLEINPUT>>,
- public loco::NodeMixin<loco::NodeTrait::DataType>,
- public loco::NodeMixin<loco::NodeTrait::TensorShape>
+class CircleInput final : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CIRCLEINPUT>>
{
public:
CircleInput() = default;
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Normalize.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Normalize.h
new file mode 100644
index 000000000..efa932d95
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Normalize.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCEL2NORMALIZE_H__
+#define __LUCI_IR_CIRCEL2NORMALIZE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/AttrFusedActFunc.h"
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief L2_NORMALIZATION in Circle
+ */
+class CircleL2Normalize final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::L2_NORMALIZATION>>,
+ public LuciNodeMixin<LuciNodeTrait::FusedActFunc>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCEL2NORMALIZE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Pool2D.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Pool2D.h
new file mode 100644
index 000000000..7c76ee5d0
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleL2Pool2D.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_L2_POOL2D_H__
+#define __LUCI_IR_CIRCLE_L2_POOL2D_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/AttrFilter.h"
+#include "luci/IR/AttrPadding.h"
+#include "luci/IR/AttrStride.h"
+#include "luci/IR/AttrFusedActFunc.h"
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief L2_POOL_2D in Circle
+ */
+class CircleL2Pool2D final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::L2_POOL_2D>>,
+ public LuciNodeMixin<LuciNodeTrait::FusedActFunc>
+{
+public:
+ CircleL2Pool2D() : _padding(Padding::UNDEFINED) { /* empty */}
+
+public:
+ loco::Node *value(void) const { return at(0)->node(); }
+ void value(loco::Node *node) { at(0)->node(node); }
+
+ Padding padding() const { return _padding; }
+ void padding(Padding padding) { _padding = padding; }
+
+ const Filter *filter(void) const { return &_filter; }
+ Filter *filter(void) { return &_filter; }
+
+ const Stride *stride(void) const { return &_stride; }
+ Stride *stride(void) { return &_stride; }
+
+private:
+ Padding _padding;
+ Stride _stride;
+ Filter _filter;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_L2_POOL2D_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLeakyRelu.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLeakyRelu.h
new file mode 100644
index 000000000..d6ac97fc0
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLeakyRelu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LEAKY_RELU_H__
+#define __LUCI_IR_CIRCLE_LEAKY_RELU_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LEAKY_RELU in Circle
+ */
+class CircleLeakyRelu final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::LEAKY_RELU>>
+{
+public:
+ CircleLeakyRelu() = default;
+
+public:
+ loco::Node *features(void) const { return at(0)->node(); }
+ void features(loco::Node *node) { at(0)->node(node); }
+
+ float alpha() const { return _alpha; }
+ void alpha(float alpha) { _alpha = alpha; }
+
+private:
+ float _alpha = 0.2f;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LEAKY_RELU_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLess.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLess.h
new file mode 100644
index 000000000..cd6cf1872
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLess.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LESS_H__
+#define __LUCI_IR_CIRCLE_LESS_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LESS in Circle
+ */
+class CircleLess final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::LESS>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LESS_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLessEqual.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLessEqual.h
new file mode 100644
index 000000000..4c7c6a49b
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLessEqual.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LESSEQUAL_H__
+#define __LUCI_IR_CIRCLE_LESSEQUAL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LESS_EQUAL in Circle
+ */
+class CircleLessEqual final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::LESS_EQUAL>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LESSEQUAL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLocalResponseNormalization.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLocalResponseNormalization.h
new file mode 100644
index 000000000..8ad2b40fd
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLocalResponseNormalization.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLELOCAL_RESPONSE_NORMALIZATION_H__
+#define __LUCI_IR_CIRCLELOCAL_RESPONSE_NORMALIZATION_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LOCAL_RESPONSE_NORMALIZATION in Circle
+ */
+class CircleLocalResponseNormalization final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::LOCAL_RESPONSE_NORMALIZATION>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t radius(void) const { return _radius; }
+ void radius(int32_t radius) { _radius = radius; }
+
+ float bias(void) const { return _bias; }
+ void bias(float bias) { _bias = bias; }
+
+ float alpha(void) const { return _alpha; }
+ void alpha(float alpha) { _alpha = alpha; }
+
+ float beta(void) const { return _beta; }
+ void beta(float beta) { _beta = beta; }
+
+private:
+ int32_t _radius{5};
+ float _bias{1.0f};
+ float _alpha{1.0f};
+ float _beta{0.5f};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLELOCAL_RESPONSE_NORMALIZATION_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLog.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLog.h
new file mode 100644
index 000000000..aeb13fed9
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLog.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LOG_H__
+#define __LUCI_IR_CIRCLE_LOG_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LOG in Circle
+ */
+class CircleLog final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::LOG>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LOG_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLogSoftmax.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogSoftmax.h
new file mode 100644
index 000000000..5dfd2c1f9
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogSoftmax.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LOG_SOFTMAX_H__
+#define __LUCI_IR_CIRCLE_LOG_SOFTMAX_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LOG_SOFTMAX in Circle
+ */
+class CircleLogSoftmax final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::LOG_SOFTMAX>>
+{
+public:
+ loco::Node *logits(void) const { return at(0)->node(); }
+ void logits(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LOG_SOFTMAX_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLogicalAnd.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogicalAnd.h
new file mode 100644
index 000000000..975f6dbc7
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogicalAnd.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LOGICALAND_H__
+#define __LUCI_IR_CIRCLE_LOGICALAND_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LOGICAL_AND in Circle
+ */
+class CircleLogicalAnd final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::LOGICAL_AND>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LOGICALAND_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleLogistic.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogistic.h
new file mode 100644
index 000000000..8328cb328
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleLogistic.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_LOGISTIC_H__
+#define __LUCI_IR_CIRCLE_LOGISTIC_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief LOGISTIC in Circle
+ */
+class CircleLogistic final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::LOGISTIC>>
+{
+public:
+ CircleLogistic() = default;
+
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_LOGISTIC_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixDiag.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixDiag.h
new file mode 100644
index 000000000..dca6538c3
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixDiag.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEMATRIXDIAG_H__
+#define __LUCI_IR_CIRCLEMATRIXDIAG_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief MATRIX_DIAG in Circle
+ */
+class CircleMatrixDiag final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::MATRIX_DIAG>>
+{
+public:
+ loco::Node *diagonal(void) const { return at(0)->node(); }
+ void diagonal(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEMATRIXDIAG_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixSetDiag.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixSetDiag.h
new file mode 100644
index 000000000..c1f5f3023
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMatrixSetDiag.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEMATRIXSETDIAG_H__
+#define __LUCI_IR_CIRCLEMATRIXSETDIAG_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief MATRIX_SET_DIAG in Circle
+ */
+class CircleMatrixSetDiag final
+ : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::MATRIX_SET_DIAG>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *diagonal(void) const { return at(1)->node(); }
+ void diagonal(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEMATRIXSETDIAG_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMaximum.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMaximum.h
index cf7305e3a..6f789bc14 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleMaximum.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMaximum.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMean.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMean.h
index 6fd791450..7f8aeb5aa 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleMean.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMean.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMinimum.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMinimum.h
new file mode 100644
index 000000000..79d5a6f17
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMinimum.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEMINIMUM_H__
+#define __LUCI_IR_CIRCLEMINIMUM_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief MINIMUM in Circle
+ */
+class CircleMinimum final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::MINIMUM>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEMINIMUM_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleMirrorPad.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleMirrorPad.h
new file mode 100644
index 000000000..68db8f6f3
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleMirrorPad.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_MIRRORPAD_H__
+#define __LUCI_IR_CIRCLE_MIRRORPAD_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+#include "luci/IR/AttrMirrorPadMode.h"
+
+namespace luci
+{
+
+/**
+ * @brief MIRROR_PAD in Circle
+ */
+class CircleMirrorPad final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::MIRROR_PAD>>
+{
+public:
+ CircleMirrorPad() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *paddings(void) const { return at(1)->node(); }
+ void paddings(loco::Node *node) { at(1)->node(node); }
+
+public:
+ MirrorPadMode mode(void) const { return _mode; }
+ void mode(MirrorPadMode mode) { _mode = mode; }
+
+private:
+ MirrorPadMode _mode{MirrorPadMode::REFLECT};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_MIRRORPAD_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleNeg.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleNeg.h
new file mode 100644
index 000000000..4149ac4a7
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleNeg.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLENEG_H__
+#define __LUCI_IR_CIRCLENEG_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief NEG in Circle
+ */
+class CircleNeg final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::NEG>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLENEG_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleNotEqual.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleNotEqual.h
new file mode 100644
index 000000000..cca7a5e22
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleNotEqual.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_NOTEQUAL_H__
+#define __LUCI_IR_CIRCLE_NOTEQUAL_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief NOT EQUAL in Circle
+ */
+class CircleNotEqual final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::NOT_EQUAL>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_NOTEQUAL_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleOneHot.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleOneHot.h
new file mode 100644
index 000000000..665e01d48
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleOneHot.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEONEHOT_H__
+#define __LUCI_IR_CIRCLEONEHOT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ONEHOT in Circle
+ */
+class CircleOneHot final : public FixedArityNode<4, CircleNodeImpl<CircleOpcode::ONE_HOT>>
+{
+public:
+ loco::Node *indices(void) const { return at(0)->node(); }
+ void indices(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *depth(void) const { return at(1)->node(); }
+ void depth(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *on_value(void) const { return at(2)->node(); }
+ void on_value(loco::Node *node) { at(2)->node(node); }
+
+ loco::Node *off_value(void) const { return at(3)->node(); }
+ void off_value(loco::Node *node) { at(3)->node(node); }
+
+public:
+ int32_t axis(void) const { return _axis; }
+ void axis(int32_t axis) { _axis = axis; }
+
+private:
+ int32_t _axis = -1;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEONEHOT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleOutput.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleOutput.h
index c65317ad1..67e55f1a1 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleOutput.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleOutput.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
#include <loco/IR/GraphOutputIndex.h>
@@ -50,6 +49,27 @@ private:
int64_t _index = -1; // Uninitialized
};
+/**
+ * @brief Temporary DummyNode used with dangle CircleNode
+ */
+// TODO remove CircleOutputDummy
+class CircleOutputDummy final
+ : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CIRCLEOUTPUTDUMMY>>
+{
+public:
+ CircleOutputDummy() = default;
+};
+
+/**
+ * @brief CircleOutputExclude is used to specifying not exported nodes
+ */
+class CircleOutputExclude final
+ : public FixedArityNode<0, CircleNodeImpl<CircleOpcode::CIRCLEOUTPUTEXCLUDE>>
+{
+public:
+ CircleOutputExclude() = default;
+};
+
} // namespace luci
#endif // __LUCI_IR_CIRCLEOUTPUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CirclePRelu.h b/compiler/luci/lang/include/luci/IR/Nodes/CirclePRelu.h
new file mode 100644
index 000000000..693777512
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CirclePRelu.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_PRELU_H__
+#define __LUCI_IR_PRELU_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief PRelu in Circle
+ */
+class CirclePRelu final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::PRELU>>
+{
+public:
+ CirclePRelu() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *alpha(void) const { return at(1)->node(); }
+ void alpha(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_PRELU_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CirclePow.h b/compiler/luci/lang/include/luci/IR/Nodes/CirclePow.h
new file mode 100644
index 000000000..006e3dd86
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CirclePow.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_POW_H__
+#define __LUCI_IR_POW_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief POW in Circle
+ */
+class CirclePow final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::POW>>
+{
+public:
+ CirclePow() = default;
+
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *y(void) const { return at(1)->node(); }
+ void y(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_POW_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRange.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRange.h
new file mode 100644
index 000000000..977a37a52
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRange.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLERANGE_H__
+#define __LUCI_IR_CIRCLERANGE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief RANGE in Circle
+ */
+class CircleRange final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::RANGE>>
+{
+public:
+ loco::Node *start(void) const { return at(0)->node(); }
+ void start(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *limit(void) const { return at(1)->node(); }
+ void limit(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *delta(void) const { return at(2)->node(); }
+ void delta(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLERANGE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRank.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRank.h
new file mode 100644
index 000000000..ba6d67f69
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRank.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLERANK_H__
+#define __LUCI_IR_CIRCLERANK_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief RANK in Circle
+ */
+class CircleRank final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::RANK>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLERANK_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceAny.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceAny.h
new file mode 100644
index 000000000..0456be863
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceAny.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_REDUCE_ANY_H__
+#define __LUCI_IR_CIRCLE_REDUCE_ANY_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief REDUCE_ANY in Circle
+ */
+class CircleReduceAny final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REDUCE_ANY>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *reduction_indices(void) const { return at(1)->node(); }
+ void reduction_indices(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool keep_dims(void) const { return _keep_dims; }
+ void keep_dims(bool keep_dims) { _keep_dims = keep_dims; }
+
+private:
+ bool _keep_dims = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_REDUCE_ANY_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMax.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMax.h
new file mode 100644
index 000000000..925c977e5
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMax.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_REDUCE_MAX_H__
+#define __LUCI_IR_CIRCLE_REDUCE_MAX_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief REDUCE_MAX in Circle
+ */
+class CircleReduceMax final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REDUCE_MAX>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *reduction_indices(void) const { return at(1)->node(); }
+ void reduction_indices(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool keep_dims(void) const { return _keep_dims; }
+ void keep_dims(bool keep_dims) { _keep_dims = keep_dims; }
+
+private:
+ bool _keep_dims = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_REDUCE_MAX_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMin.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMin.h
new file mode 100644
index 000000000..fd789ae5e
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceMin.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_REDUCE_MIN_H__
+#define __LUCI_IR_CIRCLE_REDUCE_MIN_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief REDUCE_MIN in Circle
+ */
+class CircleReduceMin final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REDUCE_MIN>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *reduction_indices(void) const { return at(1)->node(); }
+ void reduction_indices(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool keep_dims(void) const { return _keep_dims; }
+ void keep_dims(bool keep_dims) { _keep_dims = keep_dims; }
+
+private:
+ bool _keep_dims = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_REDUCE_MIN_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceProd.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceProd.h
new file mode 100644
index 000000000..b7d226255
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReduceProd.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_REDUCE_PROD_H__
+#define __LUCI_IR_CIRCLE_REDUCE_PROD_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief REDUCE_PROD in Circle
+ */
+class CircleReduceProd final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REDUCE_PROD>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *reduction_indices(void) const { return at(1)->node(); }
+ void reduction_indices(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool keep_dims(void) const { return _keep_dims; }
+ void keep_dims(bool keep_dims) { _keep_dims = keep_dims; }
+
+private:
+ bool _keep_dims = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_REDUCE_PROD_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu.h
index afb2c667a..91272d2bf 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu6.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu6.h
index b313a5557..b4274ded9 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu6.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRelu6.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReluN1To1.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReluN1To1.h
new file mode 100644
index 000000000..a5c5710c2
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReluN1To1.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_RELU_N1_TO_1_H__
+#define __LUCI_IR_CIRCLE_RELU_N1_TO_1_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief RELU_N1_TO_1 in Circle
+ */
+class CircleReluN1To1 final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::RELU_N1_TO_1>>
+{
+public:
+ CircleReluN1To1() = default;
+
+public:
+ loco::Node *features(void) const { return at(0)->node(); }
+ void features(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_RELU_N1_TO_1_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReshape.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReshape.h
index a3a2a3f31..b13144f7e 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleReshape.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReshape.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
@@ -38,8 +37,8 @@ public:
loco::Node *tensor(void) const { return at(0)->node(); }
void tensor(loco::Node *node) { at(0)->node(node); }
- // TODO Make this input optional. That is, loco system does not emit error
- // with this input being null
+ // NOTE shape is optional and can be CircleConst or any other type
+ // and also can be CircleOutputDummy when reshape option does not exist
loco::Node *shape(void) const { return at(1)->node(); }
void shape(loco::Node *node) { at(1)->node(node); }
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeBilinear.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeBilinear.h
new file mode 100644
index 000000000..3c8223338
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeBilinear.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLERESIZE_BILINEAR_H__
+#define __LUCI_IR_CIRCLERESIZE_BILINEAR_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief RESIZE_BILINEAR in Circle
+ */
+class CircleResizeBilinear final
+ : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::RESIZE_BILINEAR>>
+{
+public:
+ CircleResizeBilinear() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *size(void) const { return at(1)->node(); }
+ void size(loco::Node *node) { at(1)->node(node); }
+
+ bool align_corners() const { return _align_corners; }
+ void align_corners(bool value) { _align_corners = value; }
+
+ bool half_pixel_centers() const { return _half_pixel_centers; }
+ void half_pixel_centers(bool value) { _half_pixel_centers = value; }
+
+private:
+ bool _align_corners = false;
+ bool _half_pixel_centers = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLERESIZE_BILINEAR_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeNearestNeighbor.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeNearestNeighbor.h
new file mode 100644
index 000000000..dc32ebee7
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleResizeNearestNeighbor.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLERESSIZE_NEAREST_NEIGHBOR_H__
+#define __LUCI_IR_CIRCLERESSIZE_NEAREST_NEIGHBOR_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief RESIZE_NEAREST_NEIGHBOR in Circle
+ */
+class CircleResizeNearestNeighbor final
+ : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::RESIZE_NEAREST_NEIGHBOR>>
+{
+public:
+ CircleResizeNearestNeighbor() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *size(void) const { return at(1)->node(); }
+ void size(loco::Node *node) { at(1)->node(node); }
+
+ bool align_corners() const { return _align_corners; }
+ void align_corners(bool value) { _align_corners = value; }
+
+private:
+ bool _align_corners = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLERESSIZE_NEAREST_NEIGHBOR_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseSequence.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseSequence.h
new file mode 100644
index 000000000..b0766dd3e
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseSequence.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEREVERSESEQUENCE_H__
+#define __LUCI_IR_CIRCLEREVERSESEQUENCE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief REVERSE_SEQUENCE in Circle
+ */
+class CircleReverseSequence final
+ : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REVERSE_SEQUENCE>>
+{
+public:
+ CircleReverseSequence() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *seq_lengths(void) const { return at(1)->node(); }
+ void seq_lengths(loco::Node *node) { at(1)->node(node); }
+
+public:
+ int seq_axis(void) const { return _seq_axis; }
+ void seq_axis(int seq_axis) { _seq_axis = seq_axis; }
+
+ int batch_axis(void) const { return _batch_axis; }
+ void batch_axis(int batch_axis) { _batch_axis = batch_axis; }
+
+private:
+ int _seq_axis{0};
+ int _batch_axis{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEREVERSESEQUENCE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseV2.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseV2.h
new file mode 100644
index 000000000..71d9f65aa
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleReverseV2.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEREVERSE_V2_H__
+#define __LUCI_IR_CIRCLEREVERSE_V2_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ReverseV2 in Circle
+ */
+class CircleReverseV2 final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::REVERSE_V2>>
+{
+public:
+ loco::Node *tensor(void) const { return at(0)->node(); }
+ void tensor(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *axis(void) const { return at(1)->node(); }
+ void axis(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEREVERSE_V2_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRound.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRound.h
new file mode 100644
index 000000000..30296ce9e
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRound.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLEROUND_H__
+#define __LUCI_IR_CIRCLEROUND_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ROUND in Circle
+ */
+class CircleRound final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::ROUND>>
+{
+public:
+ CircleRound() = default;
+
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLEROUND_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleRsqrt.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleRsqrt.h
index 44d22ef22..873397bce 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleRsqrt.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleRsqrt.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleScatterNd.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleScatterNd.h
new file mode 100644
index 000000000..9f93a0a80
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleScatterNd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLESCATTER_ND_H__
+#define __LUCI_IR_CIRCLESCATTER_ND_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SCATTER_ND in Circle
+ */
+class CircleScatterNd final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SCATTER_ND>>
+{
+public:
+ loco::Node *indices(void) const { return at(0)->node(); }
+ void indices(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *updates(void) const { return at(1)->node(); }
+ void updates(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *shape(void) const { return at(2)->node(); }
+ void shape(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLESCATTER_ND_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSegmentSum.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSegmentSum.h
new file mode 100644
index 000000000..416d617b2
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSegmentSum.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SEGMENT_SUM_H__
+#define __LUCI_IR_CIRCLE_SEGMENT_SUM_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SEGMENT_SUM in Circle
+ */
+class CircleSegmentSum final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::SEGMENT_SUM>>
+{
+public:
+ CircleSegmentSum() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *segment_ids(void) const { return at(1)->node(); }
+ void segment_ids(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SEGMENT_SUM_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSelect.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSelect.h
new file mode 100644
index 000000000..727647168
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSelect.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SELECT_H__
+#define __LUCI_IR_CIRCLE_SELECT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SELECT in Circle
+ */
+class CircleSelect final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SELECT>>
+{
+public:
+ CircleSelect() = default;
+
+public:
+ loco::Node *condition(void) const { return at(0)->node(); }
+ void condition(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *t(void) const { return at(1)->node(); }
+ void t(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *e(void) const { return at(2)->node(); }
+ void e(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SELECT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSelectV2.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSelectV2.h
new file mode 100644
index 000000000..7ac3c0524
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSelectV2.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SELECT_V2_H__
+#define __LUCI_IR_CIRCLE_SELECT_V2_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SELECT_V2 in Circle
+ */
+class CircleSelectV2 final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SELECT_V2>>
+{
+public:
+ CircleSelectV2() = default;
+
+public:
+ loco::Node *condition(void) const { return at(0)->node(); }
+ void condition(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *t(void) const { return at(1)->node(); }
+ void t(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *e(void) const { return at(2)->node(); }
+ void e(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SELECT_V2_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleShape.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleShape.h
new file mode 100644
index 000000000..ff20ce684
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleShape.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SHAPE_H__
+#define __LUCI_IR_CIRCLE_SHAPE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SHAPE in Circle
+ */
+class CircleShape final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::SHAPE>>
+{
+public:
+ CircleShape() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ loco::DataType out_type(void) const { return _out_type; }
+ void out_type(loco::DataType ot) { _out_type = ot; }
+
+private:
+ loco::DataType _out_type{loco::DataType::S32};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SHAPE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSin.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSin.h
new file mode 100644
index 000000000..5624db253
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSin.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SIN_H__
+#define __LUCI_IR_CIRCLE_SIN_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SIN in Circle
+ */
+class CircleSin final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::SIN>>
+{
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SIN_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSlice.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSlice.h
new file mode 100644
index 000000000..a2113643d
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSlice.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_SLICE_H__
+#define __LUCI_IR_SLICE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SLICE in Circle
+ */
+class CircleSlice final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SLICE>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *begin(void) const { return at(1)->node(); }
+ void begin(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *size(void) const { return at(2)->node(); }
+ void size(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_SLICE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSoftmax.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSoftmax.h
index 4ea3c4b0e..7166a329b 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleSoftmax.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSoftmax.h
@@ -39,7 +39,7 @@ public:
void beta(float beta) { _beta = beta; }
private:
- float _beta;
+ float _beta{0.0f};
};
} // namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToBatchND.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToBatchND.h
new file mode 100644
index 000000000..042ebffcd
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToBatchND.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPACETOBATCHND_H__
+#define __LUCI_IR_CIRCLE_SPACETOBATCHND_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SPACE_TO_BATCH_ND in Circle
+ */
+class CircleSpaceToBatchND final
+ : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SPACE_TO_BATCH_ND>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *block_shape(void) const { return at(1)->node(); }
+ void block_shape(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *paddings(void) const { return at(2)->node(); }
+ void paddings(loco::Node *node) { at(2)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPACETOBATCHND_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToDepth.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToDepth.h
new file mode 100644
index 000000000..420a4cb96
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSpaceToDepth.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPACETODEPTH_H__
+#define __LUCI_IR_CIRCLE_SPACETODEPTH_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SPACE_TO_DEPTH in Circle
+ */
+class CircleSpaceToDepth final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::SPACE_TO_DEPTH>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int block_size(void) const { return _block_size; }
+ void block_size(int block_size) { _block_size = block_size; }
+
+private:
+ int _block_size{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPACETODEPTH_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSparseToDense.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSparseToDense.h
new file mode 100644
index 000000000..9f5051317
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSparseToDense.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCELSPARSETODENSE_H__
+#define __LUCI_IR_CIRCELSPARSETODENSE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SPARSE_TO_DENSE in Circle
+ */
+class CircleSparseToDense final
+ : public FixedArityNode<4, CircleNodeImpl<CircleOpcode::SPARSE_TO_DENSE>>
+{
+public:
+ loco::Node *indices(void) const { return at(0)->node(); }
+ void indices(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *output_shape(void) const { return at(1)->node(); }
+ void output_shape(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *values(void) const { return at(2)->node(); }
+ void values(loco::Node *node) { at(2)->node(node); }
+
+ loco::Node *default_value(void) const { return at(3)->node(); }
+ void default_value(loco::Node *node) { at(3)->node(node); }
+
+public:
+ bool validate_indices(void) const { return _validate_indices; }
+ void validate_indices(bool validate_indices) { _validate_indices = validate_indices; }
+
+private:
+ bool _validate_indices{true};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCELSPARSETODENSE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSplit.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplit.h
new file mode 100644
index 000000000..0eda19501
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplit.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPLIT_H__
+#define __LUCI_IR_CIRCLE_SPLIT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SPLIT in Circle
+ */
+class CircleSplit final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::SPLIT>>
+{
+public:
+ loco::Node *split_dim(void) const { return at(0)->node(); }
+ void split_dim(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *input(void) const { return at(1)->node(); }
+ void input(loco::Node *node) { at(1)->node(node); }
+
+public:
+ // NOTE it is num_split() not num_splits() as we follow TF name
+ int32_t num_split(void) const { return _num_split; }
+ void num_split(int32_t num_split) { _num_split = num_split; }
+
+private:
+ int32_t _num_split{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPLIT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitOut.h
new file mode 100644
index 000000000..6bf4a9fef
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitOut.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPLITOUT_H__
+#define __LUCI_IR_CIRCLE_SPLITOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLESPLITOUT in Circle
+ */
+class CircleSplitOut final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLESPLITOUT>>
+{
+public:
+ CircleSplitOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPLITOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitV.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitV.h
new file mode 100644
index 000000000..1b7d55534
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitV.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPLIT_V_H__
+#define __LUCI_IR_CIRCLE_SPLIT_V_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SPLIT_V in Circle
+ */
+class CircleSplitV final : public FixedArityNode<3, CircleNodeImpl<CircleOpcode::SPLIT_V>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *size_splits(void) const { return at(1)->node(); }
+ void size_splits(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *split_dim(void) const { return at(2)->node(); }
+ void split_dim(loco::Node *node) { at(2)->node(node); }
+
+public:
+ // NOTE it is num_split() not num_splits() as we follow TF name
+ int32_t num_split(void) const { return _num_split; }
+ void num_split(int32_t num_split) { _num_split = num_split; }
+
+private:
+ int32_t _num_split{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPLIT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitVOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitVOut.h
new file mode 100644
index 000000000..d3b2f1e5a
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSplitVOut.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_SPLITVOUT_H__
+#define __LUCI_IR_CIRCLE_SPLITVOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLESPLITVOUT in Circle
+ */
+class CircleSplitVOut final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLESPLITVOUT>>
+{
+public:
+ CircleSplitVOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_SPLITVOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSqrt.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSqrt.h
index bc1f39d90..c96ca8498 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleSqrt.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSqrt.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSquare.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSquare.h
new file mode 100644
index 000000000..a29edfe82
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSquare.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLESQUARE_H__
+#define __LUCI_IR_CIRCLESQUARE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SQUARE in Circle
+ */
+class CircleSquare final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::SQUARE>>
+{
+public:
+ CircleSquare() = default;
+
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLESQUARE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSquaredDifference.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSquaredDifference.h
index ff337dfbe..b5b39f920 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleSquaredDifference.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSquaredDifference.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSqueeze.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSqueeze.h
new file mode 100644
index 000000000..f175f1411
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSqueeze.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLESQUEEZE_H__
+#define __LUCI_IR_CIRCLESQUEEZE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SQUEEZE in Circle
+ */
+class CircleSqueeze final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::SQUEEZE>>
+{
+public:
+ CircleSqueeze() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ const std::vector<int32_t> &squeeze_dims() const { return _squeeze_dims; }
+ void squeeze_dims(const std::vector<int32_t> &squeeze_dims) { _squeeze_dims = squeeze_dims; };
+
+private:
+ std::vector<int32_t> _squeeze_dims{};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLESQUEEZE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleStridedSlice.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleStridedSlice.h
new file mode 100644
index 000000000..98799fec1
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleStridedSlice.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_STRIDEDSLICE_H__
+#define __LUCI_IR_STRIDEDSLICE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief STRIDED_SLICE in Circle
+ */
+class CircleStridedSlice final
+ : public FixedArityNode<4, CircleNodeImpl<CircleOpcode::STRIDED_SLICE>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *begin(void) const { return at(1)->node(); }
+ void begin(loco::Node *node) { at(1)->node(node); }
+
+ loco::Node *end(void) const { return at(2)->node(); }
+ void end(loco::Node *node) { at(2)->node(node); }
+
+ loco::Node *strides(void) const { return at(3)->node(); }
+ void strides(loco::Node *node) { at(3)->node(node); }
+
+public:
+ int32_t begin_mask() const { return _begin_mask; }
+ void begin_mask(int32_t mask) { _begin_mask = mask; }
+
+ int32_t end_mask() const { return _end_mask; }
+ void end_mask(int32_t mask) { _end_mask = mask; }
+
+ int32_t ellipsis_mask() const { return _ellipsis_mask; }
+ void ellipsis_mask(int32_t mask) { _ellipsis_mask = mask; }
+
+ int32_t new_axis_mask() const { return _new_axis_mask; }
+ void new_axis_mask(int32_t mask) { _new_axis_mask = mask; }
+
+ int32_t shrink_axis_mask() const { return _shrink_axis_mask; }
+ void shrink_axis_mask(int32_t mask) { _shrink_axis_mask = mask; }
+
+private:
+ int32_t _begin_mask{0};
+ int32_t _end_mask{0};
+ int32_t _ellipsis_mask{0};
+ int32_t _new_axis_mask{0};
+ int32_t _shrink_axis_mask{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_STRIDEDSLICE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleSum.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleSum.h
new file mode 100644
index 000000000..21faa76fe
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleSum.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLESUM_H__
+#define __LUCI_IR_CIRCLESUM_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief SUM in Circle
+ */
+class CircleSum final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::SUM>>
+{
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *reduction_indices(void) const { return at(1)->node(); }
+ void reduction_indices(loco::Node *node) { at(1)->node(node); }
+
+public:
+ bool keep_dims(void) const { return _keep_dims; }
+ void keep_dims(bool keep_dims) { _keep_dims = keep_dims; }
+
+private:
+ bool _keep_dims = false;
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLESUM_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTanh.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTanh.h
new file mode 100644
index 000000000..f7444921f
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTanh.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLETANH_H__
+#define __LUCI_IR_CIRCLETANH_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief TANH in Circle
+ */
+class CircleTanh final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::TANH>>
+{
+public:
+ CircleTanh() = default;
+
+public:
+ loco::Node *x(void) const { return at(0)->node(); }
+ void x(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLETANH_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTile.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTile.h
new file mode 100644
index 000000000..96e1f69c6
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTile.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLETILE_H__
+#define __LUCI_IR_CIRCLETILE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief TILE in Circle
+ */
+class CircleTile final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::TILE>>
+{
+public:
+ CircleTile() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *multiples(void) const { return at(1)->node(); }
+ void multiples(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLETILE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2.h
new file mode 100644
index 000000000..3b2b5abb7
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_TOPK_V2_H__
+#define __LUCI_IR_CIRCLE_TOPK_V2_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief TOPK_V2 in Circle
+ */
+class CircleTopKV2 final : public FixedArityNode<2, CircleNodeImpl<CircleOpcode::TOPK_V2>>
+{
+public:
+ CircleTopKV2() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+ loco::Node *k(void) const { return at(1)->node(); }
+ void k(loco::Node *node) { at(1)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_TOPK_V2_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2Out.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2Out.h
new file mode 100644
index 000000000..5a6dd0c02
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTopKV2Out.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_TOPK_V2_OUT_H__
+#define __LUCI_IR_CIRCLE_TOPK_V2_OUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLETOPKV2OUT in Circle
+ */
+class CircleTopKV2Out final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLETOPKV2OUT>>
+{
+public:
+ CircleTopKV2Out() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_TOPK_V2_OUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTranspose.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTranspose.h
index 198b56afd..095cd6746 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleTranspose.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTranspose.h
@@ -20,7 +20,6 @@
#include "luci/IR/CircleNodeDecl.h"
#include "luci/IR/CircleOpcode.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleTransposeConv.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleTransposeConv.h
index 54a0d010c..fc638d49f 100644
--- a/compiler/luci/lang/include/luci/IR/Nodes/CircleTransposeConv.h
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleTransposeConv.h
@@ -22,7 +22,6 @@
#include "luci/IR/AttrPadding.h"
#include "luci/IR/AttrStride.h"
-#include "luci/IR/AttrFusedActFunc.h"
#include "luci/IR/LuciNodeMixins.h"
namespace luci
@@ -55,7 +54,7 @@ public:
Stride *stride(void) { return &_stride; }
private:
- Padding _padding;
+ Padding _padding{Padding::UNDEFINED};
Stride _stride;
};
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpack.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpack.h
new file mode 100644
index 000000000..cb91d7e6a
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpack.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_UNPACK_H__
+#define __LUCI_IR_CIRCLE_UNPACK_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief UNPACK in Circle
+ */
+class CircleUnpack final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::UNPACK>>
+{
+public:
+ CircleUnpack() = default;
+
+public:
+ loco::Node *value(void) const { return at(0)->node(); }
+ void value(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t num(void) const { return _num; }
+ void num(int32_t num) { _num = num; }
+
+ int32_t axis(void) const { return _axis; }
+ void axis(int32_t axis) { _axis = axis; }
+
+private:
+ int32_t _num{0};
+ int32_t _axis{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_UNPACK_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpackOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpackOut.h
new file mode 100644
index 000000000..6f24578a1
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnpackOut.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_UNPACKOUT_H__
+#define __LUCI_IR_CIRCLE_UNPACKOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLEUNPACKOUT in Circle
+ */
+class CircleUnpackOut final
+ : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLEUNPACKOUT>>
+{
+public:
+ CircleUnpackOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{0};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_UNPACKOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleWhere.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhere.h
new file mode 100644
index 000000000..51eda3d6e
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhere.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_WHERE_H__
+#define __LUCI_IR_CIRCLE_WHERE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+#include <cassert>
+
+namespace luci
+{
+
+/**
+ * @brief WHERE in Circle
+ */
+class CircleWhere final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::WHERE>>
+{
+public:
+ CircleWhere() = default;
+
+public:
+ loco::Node *condition() const { return at(0)->node(); }
+ void condition(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_WHERE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleWhile.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhile.h
new file mode 100644
index 000000000..40ec96414
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhile.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_WHILE_H__
+#define __LUCI_IR_CIRCLE_WHILE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/VariadicArityNode.h"
+
+#include <cassert>
+
+namespace luci
+{
+
+/**
+ * @brief WHILE in Circle
+ */
+class CircleWhile final : public VariadicArityNode<CircleNodeImpl<CircleOpcode::WHILE>>
+{
+public:
+ CircleWhile(uint32_t arity, uint32_t out)
+ : VariadicArityNode<CircleNodeImpl<CircleOpcode::WHILE>>(arity), _output_count(out)
+ {
+ assert(arity > 0);
+ assert(out > 0);
+
+ // input and output must have the same size
+ assert(arity == out);
+ }
+
+public:
+ uint32_t input_count(void) const { return arity(); }
+ uint32_t output_count(void) const { return _output_count; }
+
+public:
+ Node *input(uint32_t index) const { return at(index)->node(); }
+ void input(uint32_t index, Node *node) { at(index)->node(node); }
+
+public:
+ int32_t cond_branch(void) const { return _cond_branch; }
+ void cond_branch(int32_t cond_branch) { _cond_branch = cond_branch; }
+
+ int32_t body_branch(void) const { return _body_branch; }
+ void body_branch(int32_t body_branch) { _body_branch = body_branch; }
+
+public:
+ loco::Graph *cond_graph(void) const { return _cond_graph; }
+ void cond_graph(loco::Graph *cond_graph) { _cond_graph = cond_graph; }
+
+ loco::Graph *body_graph(void) const { return _body_graph; }
+ void body_graph(loco::Graph *body_graph) { _body_graph = body_graph; }
+
+private:
+ uint32_t _output_count{0};
+ int32_t _cond_branch{-1};
+ int32_t _body_branch{-1};
+
+ loco::Graph *_cond_graph{nullptr};
+ loco::Graph *_body_graph{nullptr};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_WHILE_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleWhileOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhileOut.h
new file mode 100644
index 000000000..cdf617848
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleWhileOut.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_WHILEOUT_H__
+#define __LUCI_IR_CIRCLE_WHILEOUT_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief Virtual CIRCLEWHILEOUT in Circle
+ */
+class CircleWhileOut final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::CIRCLEWHILEOUT>>
+{
+public:
+ CircleWhileOut() = default;
+
+public:
+ loco::Node *input(void) const { return at(0)->node(); }
+ void input(loco::Node *node) { at(0)->node(node); }
+
+public:
+ int32_t index(void) const { return _index; }
+ void index(int32_t index) { _index = index; }
+
+private:
+ int32_t _index{-1};
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_WHILEOUT_H__
diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleZerosLike.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleZerosLike.h
new file mode 100644
index 000000000..d3b6d272a
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleZerosLike.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_CIRCLE_ZEROS_LIKE_H__
+#define __LUCI_IR_CIRCLE_ZEROS_LIKE_H__
+
+#include "luci/IR/CircleNodeDecl.h"
+#include "luci/IR/CircleOpcode.h"
+
+#include "luci/IR/LuciNodeMixins.h"
+
+namespace luci
+{
+
+/**
+ * @brief ZEROS_LIKE in Circle
+ */
+class CircleZerosLike final : public FixedArityNode<1, CircleNodeImpl<CircleOpcode::ZEROS_LIKE>>
+{
+public:
+ CircleZerosLike() = default;
+
+public:
+ /// @brief Get the input node
+ loco::Node *input(void) const { return at(0)->node(); }
+
+ /// @brief Set the input node
+ void input(loco::Node *node) { at(0)->node(node); }
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_CIRCLE_ZEROS_LIKE_H__
diff --git a/compiler/luci/lang/include/luci/IR/PropertyShapeStatus.h b/compiler/luci/lang/include/luci/IR/PropertyShapeStatus.h
new file mode 100644
index 000000000..179a8ab3c
--- /dev/null
+++ b/compiler/luci/lang/include/luci/IR/PropertyShapeStatus.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_IR_PROPERTY_SHAPE_STATUS_H__
+#define __LUCI_IR_PROPERTY_SHAPE_STATUS_H__
+
+namespace luci
+{
+
+/**
+ * @brief ShapeStatus is to remember circle node shape status.
+ * @note This is not an attribute from the file but inner status of a node.
+ * Shape with [] is scalar but sometimes it acts as dynamic shape.
+ */
+enum class ShapeStatus
+{
+ UNDEFINED, // Shape status is undefined
+
+ NOSHAPE, // shape is unknown; to distinguish from scalar
+ VALID, // shape is valid
+};
+
+} // namespace luci
+
+#endif // __LUCI_IR_PROPERTY_SHAPE_STATUS_H__
diff --git a/compiler/luci/lang/include/luci/IR/VariadicArityNode.h b/compiler/luci/lang/include/luci/IR/VariadicArityNode.h
index a4814ee48..e83d90978 100644
--- a/compiler/luci/lang/include/luci/IR/VariadicArityNode.h
+++ b/compiler/luci/lang/include/luci/IR/VariadicArityNode.h
@@ -46,11 +46,7 @@ public:
public:
uint32_t arity(void) const final { return _args.size(); }
- loco::Node *arg(uint32_t n) const final
- {
- assert(n < _args.size());
- return _args.at(n)->node();
- }
+ loco::Node *arg(uint32_t n) const final { return _args.at(n)->node(); }
void drop(void) final
{
@@ -62,11 +58,7 @@ public:
protected:
// This API allows inherited classes to access "_args" field.
- loco::Use *at(uint32_t n) const
- {
- assert(n < _args.size());
- return _args.at(n).get();
- }
+ loco::Use *at(uint32_t n) const { return _args.at(n).get(); }
private:
std::vector<std::unique_ptr<loco::Use>> _args;
diff --git a/compiler/luci/lang/src/CircleDialect.cpp b/compiler/luci/lang/src/CircleDialect.cpp
index e1c925de4..42ca3c917 100644
--- a/compiler/luci/lang/src/CircleDialect.cpp
+++ b/compiler/luci/lang/src/CircleDialect.cpp
@@ -22,6 +22,8 @@
#include <loco/IR/GraphInputIndex.h>
#include <loco/IR/GraphOutputIndex.h>
+#include "DeadNodeQueryService.h"
+
#include <cassert>
#include <memory>
@@ -42,8 +44,7 @@ struct GiiQueryServiceImpl final : public loco::GraphInputIndexQueryService
loco::GraphOutputIndex index(const loco::Node *node) const final
{
assert(associated(node));
- auto circleinput = dynamic_cast<const luci::CircleInput *>(node);
- assert(circleinput != nullptr);
+ auto circleinput = loco::must_cast<const luci::CircleInput *>(node);
return circleinput->index();
}
};
@@ -62,8 +63,7 @@ struct GoiQueryServiceImpl final : public loco::GraphOutputIndexQueryService
loco::GraphOutputIndex index(const loco::Node *node) const final
{
assert(associated(node));
- auto circleoutput = dynamic_cast<const luci::CircleOutput *>(node);
- assert(circleoutput != nullptr);
+ auto circleoutput = loco::must_cast<const luci::CircleOutput *>(node);
return circleoutput->index();
}
};
@@ -77,6 +77,7 @@ CircleDialect::CircleDialect()
{
service<loco::GraphInputIndexQueryService>(std::make_unique<GiiQueryServiceImpl>());
service<loco::GraphOutputIndexQueryService>(std::make_unique<GoiQueryServiceImpl>());
+ service<logo::DeadNodeQueryService>(std::make_unique<DeadNodeQueryServiceImpl>());
}
loco::Dialect *CircleDialect::get(void)
diff --git a/compiler/luci/lang/src/CircleDialect.test.cpp b/compiler/luci/lang/src/CircleDialect.test.cpp
index 78221f199..a09c105ec 100644
--- a/compiler/luci/lang/src/CircleDialect.test.cpp
+++ b/compiler/luci/lang/src/CircleDialect.test.cpp
@@ -15,6 +15,10 @@
*/
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodes.h"
+
+#include <loco.h>
+#include <logo/DeadNodeQueryService.h>
#include <gtest/gtest.h>
@@ -25,10 +29,59 @@ TEST(CircleDialectTest, get_P)
// get() SHOULD return a valid(non-null) pointer
ASSERT_NE(d, nullptr);
// The return value SHOULD be stable across multiple invocations
- ASSERT_EQ(d, luci::CircleDialect::get());
+ ASSERT_EQ(luci::CircleDialect::get(), d);
}
-TEST(CircleDialectTest, get_N)
+TEST(CircleDialectTest, check_if_dead_node_service)
{
- // TBD
+ /**
+ * [CircleInput1] [CircleInput2] [CircleInput3]
+ * \ / (dangling input)
+ * \ /
+ * [CircleAdd] [CircleBatchMatMul]
+ * | (dangling node)
+ * |
+ * [CircleOutput1] [CircleOutput2]
+ * (dangling output)
+ */
+ auto g = loco::make_graph();
+
+ auto graph_input1 = g->inputs()->create();
+ auto circle_input1 = g->nodes()->create<luci::CircleInput>();
+ circle_input1->index(graph_input1->index());
+
+ auto graph_input2 = g->inputs()->create();
+ auto circle_input2 = g->nodes()->create<luci::CircleInput>();
+ circle_input2->index(graph_input2->index());
+
+ // dangling output
+ auto graph_input3 = g->inputs()->create();
+ auto dangling_input = g->nodes()->create<luci::CircleInput>();
+ dangling_input->index(graph_input3->index());
+
+ auto active_node = g->nodes()->create<luci::CircleAdd>();
+ active_node->x(circle_input1);
+ active_node->y(circle_input2);
+
+ auto dangling_node = g->nodes()->create<luci::CircleBatchMatMul>();
+
+ auto graph_output1 = g->outputs()->create();
+ auto circle_output1 = g->nodes()->create<luci::CircleOutput>();
+ circle_output1->index(graph_output1->index());
+ circle_output1->from(active_node);
+
+ // dangling output
+ auto graph_output2 = g->outputs()->create();
+ auto circle_output2 = g->nodes()->create<luci::CircleOutput>();
+ circle_output2->index(graph_output2->index());
+
+ auto service = active_node->dialect()->service<logo::DeadNodeQueryService>();
+
+ ASSERT_TRUE(service->isDeadNode(dangling_node));
+ ASSERT_FALSE(service->isDeadNode(dangling_input));
+ ASSERT_FALSE(service->isDeadNode(active_node));
+ ASSERT_FALSE(service->isDeadNode(circle_input1));
+ ASSERT_FALSE(service->isDeadNode(circle_input2));
+ ASSERT_FALSE(service->isDeadNode(circle_output1));
+ ASSERT_FALSE(service->isDeadNode(circle_output2));
}
diff --git a/compiler/luci/lang/src/CircleNodeShapeDtype.test.cpp b/compiler/luci/lang/src/CircleNodeShapeDtype.test.cpp
new file mode 100644
index 000000000..61eab4b77
--- /dev/null
+++ b/compiler/luci/lang/src/CircleNodeShapeDtype.test.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/CircleNodes.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleNodeShapeDTypeTest, constructor)
+{
+ luci::CircleAdd node;
+
+ ASSERT_EQ(loco::DataType::Unknown, node.dtype());
+ ASSERT_EQ(0, node.rank());
+}
+
+TEST(CircleNodeShapeDTypeTest, values)
+{
+ luci::CircleAdd node;
+
+ node.dtype(loco::DataType::FLOAT32);
+ ASSERT_EQ(loco::DataType::FLOAT32, node.dtype());
+
+ node.rank(4);
+ ASSERT_EQ(4, node.rank());
+ ASSERT_FALSE(node.dim(0).known());
+
+ node.dim(0) = loco::Dimension(1);
+ ASSERT_TRUE(node.dim(0).known());
+ ASSERT_EQ(1, node.dim(0).value());
+}
+
+TEST(CircleNodeShapeDTypeTest, values_NEG)
+{
+ luci::CircleAdd node;
+
+ node.rank(4);
+ EXPECT_ANY_THROW(node.dim(100).known());
+ EXPECT_ANY_THROW(node.dim(100) = loco::Dimension(1));
+}
diff --git a/compiler/luci/lang/src/CircleNodes.cpp b/compiler/luci/lang/src/CircleNodes.cpp
index 76ff7ec5a..c77c06861 100644
--- a/compiler/luci/lang/src/CircleNodes.cpp
+++ b/compiler/luci/lang/src/CircleNodes.cpp
@@ -37,6 +37,7 @@ void set_new_shape(CircleReshape *node, int32_t *base, uint32_t size)
const_shape_node->dim(0) = size;
const_shape_node->dtype(S32);
const_shape_node->size<S32>(size);
+ const_shape_node->shape_status(luci::ShapeStatus::VALID);
for (uint32_t axis = 0; axis < size; ++axis)
const_shape_node->at<S32>(axis) = base[axis];
node->shape(const_shape_node);
@@ -47,4 +48,38 @@ void set_new_shape(CircleReshape *node, int32_t *base, uint32_t size)
node->newShape()->dim(axis) = base[axis];
}
+void link(loco::GraphOutput *output, CircleOutput *node) { node->index(output->index()); }
+
+CircleOutput *output_node(loco::Graph *g, const loco::GraphOutputIndex &index)
+{
+ for (uint32_t n = 0; n < g->nodes()->size(); ++n)
+ {
+ if (auto output = dynamic_cast<CircleOutput *>(g->nodes()->at(n)))
+ {
+ if (output->indexed() && output->index() == index)
+ {
+ return output;
+ }
+ }
+ }
+ return nullptr;
+}
+
+void link(loco::GraphInput *input, CircleInput *node) { node->index(input->index()); }
+
+CircleInput *input_node(loco::Graph *g, const loco::GraphInputIndex &index)
+{
+ for (uint32_t n = 0; n < g->nodes()->size(); ++n)
+ {
+ if (auto input = dynamic_cast<CircleInput *>(g->nodes()->at(n)))
+ {
+ if (input->indexed() && input->index() == index)
+ {
+ return input;
+ }
+ }
+ }
+ return nullptr;
+}
+
} // namespace luci
diff --git a/compiler/luci/lang/src/DeadNodeQueryService.cpp b/compiler/luci/lang/src/DeadNodeQueryService.cpp
new file mode 100644
index 000000000..a22574c94
--- /dev/null
+++ b/compiler/luci/lang/src/DeadNodeQueryService.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DeadNodeQueryService.h"
+
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <loco/IR/Graph.h>
+
+namespace luci
+{
+
+struct VirtualOutputDetector final : public luci::CircleNodeMutableVisitor<bool>
+{
+ bool visit(luci::CircleIfOut *) final { return true; }
+ bool visit(luci::CircleSplitOut *) final { return true; }
+ bool visit(luci::CircleSplitVOut *) final { return true; }
+ bool visit(luci::CircleTopKV2Out *) final { return true; }
+ bool visit(luci::CircleUnpackOut *) final { return true; }
+ bool visit(luci::CircleWhileOut *) final { return true; }
+ // TODO add more nodes that multi output virtual nodes
+
+ // default is false
+ bool visit(luci::CircleNode *) final { return false; }
+};
+
+bool DeadNodeQueryServiceImpl::isDeadNode(loco::Node *node)
+{
+ auto g = node->graph();
+ auto input_nodes_vec = loco::input_nodes(g);
+ auto output_nodes_vec = loco::output_nodes(g);
+
+ auto input_nodes = std::set<loco::Node *>(input_nodes_vec.begin(), input_nodes_vec.end());
+ auto output_nodes = std::set<loco::Node *>(output_nodes_vec.begin(), output_nodes_vec.end());
+ auto active_nodes = loco::active_nodes(output_nodes_vec);
+
+ if (active_nodes.find(node) != active_nodes.end())
+ return false;
+ // input and output nodes are not dead node even if it is not active.
+ if (input_nodes.find(node) != input_nodes.end())
+ return false;
+
+ // if node is one of virtual mulitple outputs, we need to ask the real node
+ if (auto circle_node = dynamic_cast<luci::CircleNode *>(node))
+ {
+ VirtualOutputDetector d;
+ if (circle_node->accept(&d))
+ {
+ assert(node->arity() == 1);
+ loco::Node *real_node = node->arg(0);
+ if (active_nodes.find(real_node) != active_nodes.end())
+ return false;
+ if (input_nodes.find(real_node) != input_nodes.end())
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace luci
diff --git a/compiler/luci/lang/src/DeadNodeQueryService.h b/compiler/luci/lang/src/DeadNodeQueryService.h
new file mode 100644
index 000000000..d10696667
--- /dev/null
+++ b/compiler/luci/lang/src/DeadNodeQueryService.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_LANG_DEADNODEQUERYSERVICE_H__
+#define __LUCI_LANG_DEADNODEQUERYSERVICE_H__
+
+#include <logo/DeadNodeQueryService.h>
+
+#include <loco/IR/Node.h>
+
+namespace luci
+{
+
+struct DeadNodeQueryServiceImpl final : public logo::DeadNodeQueryService
+{
+ bool isDeadNode(loco::Node *node) final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_LANG_DEADNODEQUERYSERVICE_H__
diff --git a/compiler/luci/lang/src/Module.cpp b/compiler/luci/lang/src/Module.cpp
index e52d897a5..80ef61910 100644
--- a/compiler/luci/lang/src/Module.cpp
+++ b/compiler/luci/lang/src/Module.cpp
@@ -43,4 +43,4 @@ loco::Graph *Module::graph(size_t idx) const
std::unique_ptr<Module> make_module(void) { return std::make_unique<Module>(); }
-} // namespace loco
+} // namespace luci
diff --git a/compiler/luci/lang/src/Module.test.cpp b/compiler/luci/lang/src/Module.test.cpp
index f60319944..26bf073be 100644
--- a/compiler/luci/lang/src/Module.test.cpp
+++ b/compiler/luci/lang/src/Module.test.cpp
@@ -33,8 +33,8 @@ TEST(ModuleTest, add)
m->add(std::move(g));
- ASSERT_EQ(m->graph(), g_ptr);
- ASSERT_EQ(m->graph(0), g_ptr);
+ ASSERT_EQ(g_ptr, m->graph());
+ ASSERT_EQ(g_ptr, m->graph(0));
}
TEST(ModuleTest, add_more)
@@ -51,11 +51,11 @@ TEST(ModuleTest, add_more)
m->add(std::move(g2));
m->add(std::move(g3));
- ASSERT_EQ(m->size(), 3);
- ASSERT_EQ(m->graph(), g1_ptr);
- ASSERT_EQ(m->graph(0), g1_ptr);
- ASSERT_EQ(m->graph(1), g2_ptr);
- ASSERT_EQ(m->graph(2), g3_ptr);
+ ASSERT_EQ(3, m->size());
+ ASSERT_EQ(g1_ptr, m->graph());
+ ASSERT_EQ(g1_ptr, m->graph(0));
+ ASSERT_EQ(g2_ptr, m->graph(1));
+ ASSERT_EQ(g3_ptr, m->graph(2));
}
TEST(ModuleTest, add_nullptr_NEG)
diff --git a/compiler/luci/lang/src/Nodes/CircleAbs.test.cpp b/compiler/luci/lang/src/Nodes/CircleAbs.test.cpp
index 847f1500b..f97becba8 100644
--- a/compiler/luci/lang/src/Nodes/CircleAbs.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleAbs.test.cpp
@@ -17,15 +17,78 @@
#include "luci/IR/Nodes/CircleAbs.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
+#include <memory>
+
TEST(CircleAbsTest, constructor)
{
luci::CircleAbs abs_node;
- ASSERT_EQ(abs_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(abs_node.opcode(), luci::CircleOpcode::ABS);
+ ASSERT_EQ(luci::CircleDialect::get(), abs_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ABS, abs_node.opcode());
+
+ ASSERT_EQ(nullptr, abs_node.x());
+}
+
+TEST(CircleAbsTest, common_NEG)
+{
+ luci::CircleAbs abs_node;
+
+ abs_node.name("name");
+ ASSERT_EQ("name", abs_node.name());
+
+ auto q = std::make_unique<luci::CircleQuantParam>();
+ abs_node.quantparam(std::move(q));
+ ASSERT_NE(nullptr, abs_node.quantparam());
+
+ ASSERT_EQ(luci::ShapeStatus::UNDEFINED, abs_node.shape_status());
+ abs_node.shape_status(luci::ShapeStatus::NOSHAPE);
+ ASSERT_NE(luci::ShapeStatus::UNDEFINED, abs_node.shape_status());
+}
+
+TEST(CircleAbsTest, input_NEG)
+{
+ luci::CircleAbs abs_node;
+ luci::CircleAbs node;
+
+ abs_node.x(&node);
+ ASSERT_NE(nullptr, abs_node.x());
+
+ abs_node.x(nullptr);
+ ASSERT_EQ(nullptr, abs_node.x());
+}
+
+TEST(CircleAbsTest, arity_NEG)
+{
+ luci::CircleAbs abs_node;
+
+ ASSERT_NO_THROW(abs_node.arg(0));
+ ASSERT_THROW(abs_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleAbsTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleAbs abs_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(abs_node.accept(&tv), std::exception);
+}
+
+TEST(CircleAbsTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleAbs abs_node;
- ASSERT_EQ(abs_node.x(), nullptr);
+ TestVisitor tv;
+ ASSERT_THROW(abs_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleAdd.test.cpp b/compiler/luci/lang/src/Nodes/CircleAdd.test.cpp
index a7701963d..382faa5ef 100644
--- a/compiler/luci/lang/src/Nodes/CircleAdd.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleAdd.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleAdd.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleAddTest, constructor_P)
{
luci::CircleAdd add_node;
- ASSERT_EQ(add_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(add_node.opcode(), luci::CircleOpcode::ADD);
+ ASSERT_EQ(luci::CircleDialect::get(), add_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ADD, add_node.opcode());
- ASSERT_EQ(add_node.x(), nullptr);
- ASSERT_EQ(add_node.y(), nullptr);
+ ASSERT_EQ(nullptr, add_node.x());
+ ASSERT_EQ(nullptr, add_node.y());
+}
+
+TEST(CircleAddTest, input_NEG)
+{
+ luci::CircleAdd add_node;
+ luci::CircleAdd node;
+
+ add_node.x(&node);
+ add_node.y(&node);
+ ASSERT_NE(nullptr, add_node.x());
+ ASSERT_NE(nullptr, add_node.y());
+
+ add_node.x(nullptr);
+ add_node.y(nullptr);
+ ASSERT_EQ(nullptr, add_node.x());
+ ASSERT_EQ(nullptr, add_node.y());
+}
+
+TEST(CircleAddTest, arity_NEG)
+{
+ luci::CircleAdd add_node;
+
+ ASSERT_NO_THROW(add_node.arg(1));
+ ASSERT_THROW(add_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleAddTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleAdd add_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(add_node.accept(&tv), std::exception);
+}
+
+TEST(CircleAddTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleAdd add_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(add_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleAddN.test.cpp b/compiler/luci/lang/src/Nodes/CircleAddN.test.cpp
new file mode 100644
index 000000000..399d8cb82
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleAddN.test.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleAddN.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleAddNTest, constructor)
+{
+ luci::CircleAddN add_node(3);
+
+ ASSERT_EQ(luci::CircleDialect::get(), add_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ADD_N, add_node.opcode());
+
+ ASSERT_EQ(nullptr, add_node.inputs(0));
+ ASSERT_EQ(nullptr, add_node.inputs(1));
+ ASSERT_EQ(nullptr, add_node.inputs(2));
+}
+
+TEST(CircleAddNTest, input_NEG)
+{
+ luci::CircleAddN add_node(3);
+ luci::CircleAddN node(2);
+
+ add_node.inputs(0, &node);
+ add_node.inputs(1, &node);
+ add_node.inputs(2, &node);
+ ASSERT_NE(nullptr, add_node.inputs(0));
+ ASSERT_NE(nullptr, add_node.inputs(1));
+ ASSERT_NE(nullptr, add_node.inputs(2));
+
+ add_node.inputs(0, nullptr);
+ add_node.inputs(1, nullptr);
+ add_node.inputs(2, nullptr);
+ ASSERT_EQ(nullptr, add_node.inputs(0));
+ ASSERT_EQ(nullptr, add_node.inputs(1));
+ ASSERT_EQ(nullptr, add_node.inputs(2));
+}
+
+TEST(CircleAddNTest, arity_NEG)
+{
+ luci::CircleAddN add_node(3);
+ luci::CircleAddN node(2);
+
+ ASSERT_NO_THROW(add_node.inputs(2, &node));
+ ASSERT_NO_THROW(add_node.inputs(2, nullptr));
+ ASSERT_THROW(add_node.inputs(3, &node), std::out_of_range);
+
+ ASSERT_NO_THROW(add_node.arg(2));
+ ASSERT_THROW(add_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleAddNTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleAddN add_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(add_node.accept(&tv), std::exception);
+}
+
+TEST(CircleAddNTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleAddN add_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(add_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleArgMax.test.cpp b/compiler/luci/lang/src/Nodes/CircleArgMax.test.cpp
index 6b2cff11c..375a74c20 100644
--- a/compiler/luci/lang/src/Nodes/CircleArgMax.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleArgMax.test.cpp
@@ -17,16 +17,65 @@
#include "luci/IR/Nodes/CircleArgMax.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
TEST(CircleArgMaxTest, constructor_P)
{
- luci::CircleArgMax add_node;
+ luci::CircleArgMax argmax_node;
- ASSERT_EQ(add_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(add_node.opcode(), luci::CircleOpcode::ARG_MAX);
+ ASSERT_EQ(luci::CircleDialect::get(), argmax_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ARG_MAX, argmax_node.opcode());
- ASSERT_EQ(add_node.input(), nullptr);
- ASSERT_EQ(add_node.dimension(), nullptr);
+ ASSERT_EQ(nullptr, argmax_node.input());
+ ASSERT_EQ(nullptr, argmax_node.dimension());
+}
+
+TEST(CircleArgMaxTest, input_NEG)
+{
+ luci::CircleArgMax argmax_node;
+ luci::CircleArgMax node;
+
+ argmax_node.input(&node);
+ argmax_node.dimension(&node);
+ ASSERT_NE(nullptr, argmax_node.input());
+ ASSERT_NE(nullptr, argmax_node.dimension());
+
+ argmax_node.input(nullptr);
+ argmax_node.dimension(nullptr);
+ ASSERT_EQ(nullptr, argmax_node.input());
+ ASSERT_EQ(nullptr, argmax_node.dimension());
+}
+
+TEST(CircleArgMaxTest, arity_NEG)
+{
+ luci::CircleArgMax argmax_node;
+
+ ASSERT_NO_THROW(argmax_node.arg(1));
+ ASSERT_THROW(argmax_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleArgMaxTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleArgMax argmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(argmax_node.accept(&tv), std::exception);
+}
+
+TEST(CircleArgMaxTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleArgMax argmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(argmax_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleArgMin.test.cpp b/compiler/luci/lang/src/Nodes/CircleArgMin.test.cpp
new file mode 100644
index 000000000..6607bf82f
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleArgMin.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleArgMin.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleArgMinTest, constructor_P)
+{
+ luci::CircleArgMin argmin_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), argmin_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ARG_MIN, argmin_node.opcode());
+
+ ASSERT_EQ(nullptr, argmin_node.input());
+ ASSERT_EQ(nullptr, argmin_node.dimension());
+}
+
+TEST(CircleArgMinTest, input_NEG)
+{
+ luci::CircleArgMin argmin_node;
+ luci::CircleArgMin node;
+
+ argmin_node.input(&node);
+ argmin_node.dimension(&node);
+ ASSERT_NE(nullptr, argmin_node.input());
+ ASSERT_NE(nullptr, argmin_node.dimension());
+
+ argmin_node.input(nullptr);
+ argmin_node.dimension(nullptr);
+ ASSERT_EQ(nullptr, argmin_node.input());
+ ASSERT_EQ(nullptr, argmin_node.dimension());
+}
+
+TEST(CircleArgMinTest, arity_NEG)
+{
+ luci::CircleArgMin argmin_node;
+
+ ASSERT_NO_THROW(argmin_node.arg(1));
+ ASSERT_THROW(argmin_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleArgMinTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleArgMin argmin_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(argmin_node.accept(&tv), std::exception);
+}
+
+TEST(CircleArgMinTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleArgMin argmin_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(argmin_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleAveragePool2D.test.cpp b/compiler/luci/lang/src/Nodes/CircleAveragePool2D.test.cpp
new file mode 100644
index 000000000..fc7265cf0
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleAveragePool2D.test.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleAveragePool2D.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleAveragePool2DTest, constructor_P)
+{
+ luci::CircleAveragePool2D average_pool_2d_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), average_pool_2d_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::AVERAGE_POOL_2D, average_pool_2d_node.opcode());
+
+ ASSERT_EQ(nullptr, average_pool_2d_node.value());
+ ASSERT_EQ(luci::Padding::UNDEFINED, average_pool_2d_node.padding());
+ ASSERT_EQ(1, average_pool_2d_node.filter()->h());
+ ASSERT_EQ(1, average_pool_2d_node.filter()->w());
+ ASSERT_EQ(1, average_pool_2d_node.stride()->h());
+ ASSERT_EQ(1, average_pool_2d_node.stride()->w());
+}
+
+TEST(CircleAveragePool2DTest, input_NEG)
+{
+ luci::CircleAveragePool2D avgpool_node;
+ luci::CircleAveragePool2D node;
+
+ avgpool_node.value(&node);
+ ASSERT_NE(nullptr, avgpool_node.value());
+
+ avgpool_node.value(nullptr);
+ ASSERT_EQ(nullptr, avgpool_node.value());
+
+ avgpool_node.filter()->h(2);
+ avgpool_node.filter()->w(2);
+ avgpool_node.stride()->h(2);
+ avgpool_node.stride()->w(2);
+ ASSERT_NE(1, avgpool_node.filter()->h());
+ ASSERT_NE(1, avgpool_node.filter()->w());
+ ASSERT_NE(1, avgpool_node.stride()->h());
+ ASSERT_NE(1, avgpool_node.stride()->w());
+}
+
+TEST(CircleAveragePool2DTest, arity_NEG)
+{
+ luci::CircleAveragePool2D avgpool_node;
+
+ ASSERT_NO_THROW(avgpool_node.arg(0));
+ ASSERT_THROW(avgpool_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleAveragePool2DTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleAveragePool2D avgpool_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(avgpool_node.accept(&tv), std::exception);
+}
+
+TEST(CircleAveragePool2DTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleAveragePool2D avgpool_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(avgpool_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleBCQFullyConnected.test.cpp b/compiler/luci/lang/src/Nodes/CircleBCQFullyConnected.test.cpp
new file mode 100644
index 000000000..35c9ab95b
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleBCQFullyConnected.test.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleBCQFullyConnected.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleBCQFullyConnectedTest, constructor)
+{
+ luci::CircleBCQFullyConnected bcq_FC_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), bcq_FC_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::BCQ_FULLY_CONNECTED, bcq_FC_node.opcode());
+
+ ASSERT_EQ(nullptr, bcq_FC_node.input());
+ ASSERT_EQ(nullptr, bcq_FC_node.weights_scales());
+ ASSERT_EQ(nullptr, bcq_FC_node.weights_binary());
+ ASSERT_EQ(nullptr, bcq_FC_node.bias());
+ ASSERT_EQ(nullptr, bcq_FC_node.weights_clusters());
+
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, bcq_FC_node.fusedActivationFunction());
+ ASSERT_EQ(0, bcq_FC_node.weights_hidden_size());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleBCQGather.test.cpp b/compiler/luci/lang/src/Nodes/CircleBCQGather.test.cpp
new file mode 100644
index 000000000..c187a9033
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleBCQGather.test.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleBCQGather.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleBCQGatherTest, constructor)
+{
+ luci::CircleBCQGather bcq_gather_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), bcq_gather_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::BCQ_GATHER, bcq_gather_node.opcode());
+
+ ASSERT_EQ(nullptr, bcq_gather_node.input_scales());
+ ASSERT_EQ(nullptr, bcq_gather_node.input_binary());
+ ASSERT_EQ(nullptr, bcq_gather_node.indices());
+ ASSERT_EQ(nullptr, bcq_gather_node.input_clusters());
+
+ ASSERT_EQ(0, bcq_gather_node.axis());
+ ASSERT_EQ(0, bcq_gather_node.input_hidden_size());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleBatchMatMul.test.cpp b/compiler/luci/lang/src/Nodes/CircleBatchMatMul.test.cpp
new file mode 100644
index 000000000..d7712c8dd
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleBatchMatMul.test.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleBatchMatMul.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleBatchMatMulTest, constructor)
+{
+ luci::CircleBatchMatMul batchmatmul_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), batchmatmul_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::BATCHMATMUL, batchmatmul_node.opcode());
+
+ ASSERT_EQ(nullptr, batchmatmul_node.x());
+ ASSERT_EQ(nullptr, batchmatmul_node.y());
+
+ ASSERT_FALSE(batchmatmul_node.adj_x());
+ ASSERT_FALSE(batchmatmul_node.adj_y());
+}
+
+TEST(CircleBatchMatMulTest, input_NEG)
+{
+ luci::CircleBatchMatMul batchmatmul_node;
+ luci::CircleBatchMatMul node;
+
+ batchmatmul_node.x(&node);
+ batchmatmul_node.y(&node);
+ ASSERT_NE(nullptr, batchmatmul_node.x());
+ ASSERT_NE(nullptr, batchmatmul_node.y());
+
+ batchmatmul_node.x(nullptr);
+ batchmatmul_node.y(nullptr);
+ ASSERT_EQ(nullptr, batchmatmul_node.x());
+ ASSERT_EQ(nullptr, batchmatmul_node.y());
+}
+
+TEST(CircleBatchMatMulTest, arity_NEG)
+{
+ luci::CircleBatchMatMul batchmatmul_node;
+
+ ASSERT_NO_THROW(batchmatmul_node.arg(1));
+ ASSERT_THROW(batchmatmul_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleBatchMatMulTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleBatchMatMul batchmatmul_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(batchmatmul_node.accept(&tv), std::exception);
+}
+
+TEST(CircleBatchMatMulTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleBatchMatMul batchmatmul_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(batchmatmul_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleBatchToSpaceND.test.cpp b/compiler/luci/lang/src/Nodes/CircleBatchToSpaceND.test.cpp
index e995718a1..0374fe008 100644
--- a/compiler/luci/lang/src/Nodes/CircleBatchToSpaceND.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleBatchToSpaceND.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleBatchToSpaceND.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,10 +25,62 @@ TEST(CircleBatchToSpaceNDTest, constructor)
{
luci::CircleBatchToSpaceND bts_node;
- ASSERT_EQ(bts_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(bts_node.opcode(), luci::CircleOpcode::BATCH_TO_SPACE_ND);
+ ASSERT_EQ(luci::CircleDialect::get(), bts_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::BATCH_TO_SPACE_ND, bts_node.opcode());
- ASSERT_EQ(bts_node.input(), nullptr);
- ASSERT_EQ(bts_node.block_shape(), nullptr);
- ASSERT_EQ(bts_node.crops(), nullptr);
+ ASSERT_EQ(nullptr, bts_node.input());
+ ASSERT_EQ(nullptr, bts_node.block_shape());
+ ASSERT_EQ(nullptr, bts_node.crops());
+}
+
+TEST(CircleBatchToSpaceNDTest, input_NEG)
+{
+ luci::CircleBatchToSpaceND bts_node;
+ luci::CircleBatchToSpaceND node;
+
+ bts_node.input(&node);
+ bts_node.block_shape(&node);
+ bts_node.crops(&node);
+ ASSERT_NE(nullptr, bts_node.input());
+ ASSERT_NE(nullptr, bts_node.block_shape());
+ ASSERT_NE(nullptr, bts_node.crops());
+
+ bts_node.input(nullptr);
+ bts_node.block_shape(nullptr);
+ bts_node.crops(nullptr);
+ ASSERT_EQ(nullptr, bts_node.input());
+ ASSERT_EQ(nullptr, bts_node.block_shape());
+ ASSERT_EQ(nullptr, bts_node.crops());
+}
+
+TEST(CircleBatchToSpaceNDTest, arity_NEG)
+{
+ luci::CircleBatchToSpaceND bts_node;
+
+ ASSERT_NO_THROW(bts_node.arg(2));
+ ASSERT_THROW(bts_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleBatchToSpaceNDTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleBatchToSpaceND bts_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(bts_node.accept(&tv), std::exception);
+}
+
+TEST(CircleBatchToSpaceNDTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleBatchToSpaceND bts_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(bts_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleCast.test.cpp b/compiler/luci/lang/src/Nodes/CircleCast.test.cpp
new file mode 100644
index 000000000..b58bf96f9
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleCast.test.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleCast.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleCastTest, constructor)
+{
+ luci::CircleCast cast_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), cast_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CAST, cast_node.opcode());
+
+ ASSERT_EQ(nullptr, cast_node.x());
+ ASSERT_EQ(loco::DataType::FLOAT32, cast_node.in_data_type());
+ ASSERT_EQ(loco::DataType::FLOAT32, cast_node.out_data_type());
+}
+
+TEST(CircleCastTest, input_NEG)
+{
+ luci::CircleCast cast_node;
+ luci::CircleCast node;
+
+ cast_node.x(&node);
+ ASSERT_NE(nullptr, cast_node.x());
+
+ cast_node.x(nullptr);
+ ASSERT_EQ(nullptr, cast_node.x());
+}
+
+TEST(CircleCastTest, arity_NEG)
+{
+ luci::CircleCast cast_node;
+
+ ASSERT_NO_THROW(cast_node.arg(0));
+ ASSERT_THROW(cast_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleCastTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleCast cast_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(cast_node.accept(&tv), std::exception);
+}
+
+TEST(CircleCastTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleCast cast_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(cast_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleCeil.test.cpp b/compiler/luci/lang/src/Nodes/CircleCeil.test.cpp
new file mode 100644
index 000000000..efac614b2
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleCeil.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleCeil.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleCeilTest, constructor)
+{
+ luci::CircleCeil ceil_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), ceil_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CEIL, ceil_node.opcode());
+
+ ASSERT_EQ(nullptr, ceil_node.x());
+}
+
+TEST(CircleCeilTest, input_NEG)
+{
+ luci::CircleCeil ceil_node;
+ luci::CircleCeil node;
+
+ ceil_node.x(&node);
+ ASSERT_NE(nullptr, ceil_node.x());
+
+ ceil_node.x(nullptr);
+ ASSERT_EQ(nullptr, ceil_node.x());
+}
+
+TEST(CircleCeilTest, arity_NEG)
+{
+ luci::CircleCeil ceil_node;
+
+ ASSERT_NO_THROW(ceil_node.arg(0));
+ ASSERT_THROW(ceil_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleCeilTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleCeil ceil_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(ceil_node.accept(&tv), std::exception);
+}
+
+TEST(CircleCeilTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleCeil ceil_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(ceil_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleConcatenation.test.cpp b/compiler/luci/lang/src/Nodes/CircleConcatenation.test.cpp
index 7167682b2..9f219a386 100644
--- a/compiler/luci/lang/src/Nodes/CircleConcatenation.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleConcatenation.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleConcatenation.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,12 +25,60 @@ TEST(CircleConcatenationTest, constructor_P)
{
luci::CircleConcatenation concat_node(3);
- ASSERT_EQ(concat_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(concat_node.opcode(), luci::CircleOpcode::CONCATENATION);
+ ASSERT_EQ(luci::CircleDialect::get(), concat_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CONCATENATION, concat_node.opcode());
- ASSERT_EQ(concat_node.numValues(), 3);
- ASSERT_EQ(concat_node.values(0), nullptr);
- ASSERT_EQ(concat_node.values(1), nullptr);
- ASSERT_EQ(concat_node.values(2), nullptr);
- ASSERT_EQ(concat_node.fusedActivationFunction(), luci::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(3, concat_node.numValues());
+ ASSERT_EQ(nullptr, concat_node.values(0));
+ ASSERT_EQ(nullptr, concat_node.values(1));
+ ASSERT_EQ(nullptr, concat_node.values(2));
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, concat_node.fusedActivationFunction());
+}
+
+TEST(CircleConcatenationTest, input_NEG)
+{
+ luci::CircleConcatenation concat_node(2);
+ luci::CircleConcatenation node(2);
+
+ concat_node.values(0, &node);
+ concat_node.values(1, &node);
+ ASSERT_NE(nullptr, concat_node.values(0));
+ ASSERT_NE(nullptr, concat_node.values(1));
+
+ concat_node.values(0, nullptr);
+ concat_node.values(1, nullptr);
+ ASSERT_EQ(nullptr, concat_node.values(0));
+ ASSERT_EQ(nullptr, concat_node.values(1));
+}
+
+TEST(CircleConcatenationTest, arity_NEG)
+{
+ luci::CircleConcatenation concat_node(5);
+
+ ASSERT_NO_THROW(concat_node.arg(4));
+ ASSERT_THROW(concat_node.arg(5), std::out_of_range);
+}
+
+TEST(CircleConcatenationTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleConcatenation concat_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(concat_node.accept(&tv), std::exception);
+}
+
+TEST(CircleConcatenationTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleConcatenation concat_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(concat_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleConst.cpp b/compiler/luci/lang/src/Nodes/CircleConst.cpp
index 1c46884d8..17ff853eb 100644
--- a/compiler/luci/lang/src/Nodes/CircleConst.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleConst.cpp
@@ -70,9 +70,12 @@ template <loco::DataType DT> typename loco::DataTypeImpl<DT>::Type &CircleConst:
template const typename loco::DataTypeImpl<DT>::Type &CircleConst::scalar<DT>(void) const; \
template typename loco::DataTypeImpl<DT>::Type &CircleConst::scalar<DT>(void);
+INSTANTIATE(loco::DataType::S64);
INSTANTIATE(loco::DataType::S32);
+INSTANTIATE(loco::DataType::S16);
INSTANTIATE(loco::DataType::FLOAT32);
INSTANTIATE(loco::DataType::U8);
+INSTANTIATE(loco::DataType::BOOL);
#undef INSTANTIATE
diff --git a/compiler/luci/lang/src/Nodes/CircleConv2D.test.cpp b/compiler/luci/lang/src/Nodes/CircleConv2D.test.cpp
index 7931c7eba..7fcc71d6e 100644
--- a/compiler/luci/lang/src/Nodes/CircleConv2D.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleConv2D.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleConv2D.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,10 +25,84 @@ TEST(CircleConv2Dest, constructor_P)
{
luci::CircleConv2D conv2d_node;
- ASSERT_EQ(conv2d_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(conv2d_node.opcode(), luci::CircleOpcode::CONV_2D);
+ ASSERT_EQ(luci::CircleDialect::get(), conv2d_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CONV_2D, conv2d_node.opcode());
- ASSERT_EQ(conv2d_node.input(), nullptr);
- ASSERT_EQ(conv2d_node.filter(), nullptr);
- ASSERT_EQ(conv2d_node.bias(), nullptr);
+ ASSERT_EQ(nullptr, conv2d_node.input());
+ ASSERT_EQ(nullptr, conv2d_node.filter());
+ ASSERT_EQ(nullptr, conv2d_node.bias());
+ ASSERT_EQ(luci::Padding::UNDEFINED, conv2d_node.padding());
+ ASSERT_EQ(1, conv2d_node.stride()->h());
+ ASSERT_EQ(1, conv2d_node.stride()->w());
+ ASSERT_EQ(1, conv2d_node.dilation()->h());
+ ASSERT_EQ(1, conv2d_node.dilation()->w());
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, conv2d_node.fusedActivationFunction());
+}
+
+TEST(CircleConv2Dest, input_NEG)
+{
+ luci::CircleConv2D conv2d_node;
+ luci::CircleConv2D node;
+
+ conv2d_node.input(&node);
+ conv2d_node.filter(&node);
+ conv2d_node.bias(&node);
+ ASSERT_NE(nullptr, conv2d_node.input());
+ ASSERT_NE(nullptr, conv2d_node.filter());
+ ASSERT_NE(nullptr, conv2d_node.bias());
+
+ conv2d_node.input(nullptr);
+ conv2d_node.filter(nullptr);
+ conv2d_node.bias(nullptr);
+ ASSERT_EQ(nullptr, conv2d_node.input());
+ ASSERT_EQ(nullptr, conv2d_node.filter());
+ ASSERT_EQ(nullptr, conv2d_node.bias());
+
+ conv2d_node.padding(luci::Padding::SAME);
+ ASSERT_NE(luci::Padding::UNDEFINED, conv2d_node.padding());
+
+ conv2d_node.stride()->h(2);
+ conv2d_node.stride()->w(2);
+ ASSERT_EQ(2, conv2d_node.stride()->h());
+ ASSERT_EQ(2, conv2d_node.stride()->w());
+
+ conv2d_node.dilation()->h(2);
+ conv2d_node.dilation()->w(2);
+ ASSERT_EQ(2, conv2d_node.dilation()->h());
+ ASSERT_EQ(2, conv2d_node.dilation()->w());
+
+ conv2d_node.fusedActivationFunction(luci::FusedActFunc::RELU);
+ ASSERT_NE(luci::FusedActFunc::UNDEFINED, conv2d_node.fusedActivationFunction());
+}
+
+TEST(CircleConv2Dest, arity_NEG)
+{
+ luci::CircleConv2D conv2d_node;
+
+ ASSERT_NO_THROW(conv2d_node.arg(2));
+ ASSERT_THROW(conv2d_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleConv2Dest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleConv2D conv2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(conv2d_node.accept(&tv), std::exception);
+}
+
+TEST(CircleConv2Dest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleConv2D conv2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(conv2d_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleCos.test.cpp b/compiler/luci/lang/src/Nodes/CircleCos.test.cpp
index 34c2cfdf0..55438d37f 100644
--- a/compiler/luci/lang/src/Nodes/CircleCos.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleCos.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleCos.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleCosTest, constructor_P)
{
luci::CircleCos cos_node;
- ASSERT_EQ(cos_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(cos_node.opcode(), luci::CircleOpcode::COS);
+ ASSERT_EQ(luci::CircleDialect::get(), cos_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::COS, cos_node.opcode());
- ASSERT_EQ(cos_node.x(), nullptr);
+ ASSERT_EQ(nullptr, cos_node.x());
+}
+
+TEST(CircleCosTest, input_NEG)
+{
+ luci::CircleCos cos_node;
+ luci::CircleCos node;
+
+ cos_node.x(&node);
+ ASSERT_NE(nullptr, cos_node.x());
+
+ cos_node.x(nullptr);
+ ASSERT_EQ(nullptr, cos_node.x());
+}
+
+TEST(CircleCosTest, arity_NEG)
+{
+ luci::CircleCos cos_node;
+
+ ASSERT_NO_THROW(cos_node.arg(0));
+ ASSERT_THROW(cos_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleCosTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleCos cos_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(cos_node.accept(&tv), std::exception);
+}
+
+TEST(CircleCosTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleCos cos_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(cos_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp b/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp
new file mode 100644
index 000000000..74ea82c6c
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleCustom.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleCustomTest, constructor)
+{
+ luci::CircleCustom custom_node(2);
+
+ ASSERT_EQ(luci::CircleDialect::get(), custom_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CUSTOM, custom_node.opcode());
+
+ ASSERT_EQ(2, custom_node.arity());
+ ASSERT_EQ(nullptr, custom_node.arg(0));
+ ASSERT_EQ(nullptr, custom_node.arg(1));
+
+ ASSERT_EQ(2, custom_node.numInputs());
+ ASSERT_EQ(0, custom_node.custom_code().size());
+}
+
+TEST(CircleCustomTest, constructor_NEG) { ASSERT_DEBUG_DEATH(luci::CircleCustom{0}, ""); }
+
+TEST(CircleCustomTest, invalidIndex_NEG)
+{
+ luci::CircleCustom custom_node(2);
+
+ EXPECT_ANY_THROW(custom_node.arg(5));
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleCustomOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleCustomOut.test.cpp
new file mode 100644
index 000000000..8b63f97b1
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleCustomOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleCustomOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleCustomOutTest, constructor)
+{
+ luci::CircleCustomOut customout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), customout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLECUSTOMOUT, customout_node.opcode());
+
+ ASSERT_EQ(nullptr, customout_node.input());
+ ASSERT_EQ(-1, customout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleDepthToSpace.test.cpp b/compiler/luci/lang/src/Nodes/CircleDepthToSpace.test.cpp
new file mode 100644
index 000000000..9e3bbb7b7
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleDepthToSpace.test.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleDepthToSpace.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleDepthToSpaceTest, constructor_P)
+{
+ luci::CircleDepthToSpace std_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), std_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::DEPTH_TO_SPACE, std_node.opcode());
+
+ ASSERT_EQ(nullptr, std_node.input());
+ ASSERT_EQ(0, std_node.block_size());
+}
+
+TEST(CircleDepthToSpaceTest, input_NEG)
+{
+ luci::CircleDepthToSpace std_node;
+ luci::CircleDepthToSpace node;
+
+ std_node.input(&node);
+ ASSERT_NE(nullptr, std_node.input());
+
+ std_node.input(nullptr);
+ ASSERT_EQ(nullptr, std_node.input());
+
+ std_node.block_size(2);
+ ASSERT_EQ(2, std_node.block_size());
+}
+
+TEST(CircleDepthToSpaceTest, arity_NEG)
+{
+ luci::CircleDepthToSpace std_node;
+
+ ASSERT_NO_THROW(std_node.arg(0));
+ ASSERT_THROW(std_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleDepthToSpaceTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleDepthToSpace std_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(std_node.accept(&tv), std::exception);
+}
+
+TEST(CircleDepthToSpaceTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleDepthToSpace std_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(std_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleDepthwiseConv2D.test.cpp b/compiler/luci/lang/src/Nodes/CircleDepthwiseConv2D.test.cpp
index bbc1ea543..5761775e5 100644
--- a/compiler/luci/lang/src/Nodes/CircleDepthwiseConv2D.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleDepthwiseConv2D.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleDepthwiseConv2D.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,15 +25,85 @@ TEST(CircleDepthwiseConv2DTest, constructor_P)
{
luci::CircleDepthwiseConv2D dw_conv2d_node;
- ASSERT_EQ(dw_conv2d_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(dw_conv2d_node.opcode(), luci::CircleOpcode::DEPTHWISE_CONV_2D);
-
- ASSERT_EQ(dw_conv2d_node.input(), nullptr);
- ASSERT_EQ(dw_conv2d_node.filter(), nullptr);
- ASSERT_EQ(dw_conv2d_node.bias(), nullptr);
- ASSERT_EQ(dw_conv2d_node.padding(), luci::Padding::UNDEFINED);
- ASSERT_EQ(dw_conv2d_node.stride()->h(), 1);
- ASSERT_EQ(dw_conv2d_node.stride()->w(), 1);
- ASSERT_EQ(dw_conv2d_node.depthMultiplier(), 0);
- ASSERT_EQ(dw_conv2d_node.fusedActivationFunction(), luci::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(luci::CircleDialect::get(), dw_conv2d_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::DEPTHWISE_CONV_2D, dw_conv2d_node.opcode());
+
+ ASSERT_EQ(nullptr, dw_conv2d_node.input());
+ ASSERT_EQ(nullptr, dw_conv2d_node.filter());
+ ASSERT_EQ(nullptr, dw_conv2d_node.bias());
+ ASSERT_EQ(luci::Padding::UNDEFINED, dw_conv2d_node.padding());
+ ASSERT_EQ(1, dw_conv2d_node.stride()->h());
+ ASSERT_EQ(1, dw_conv2d_node.stride()->w());
+ ASSERT_EQ(1, dw_conv2d_node.dilation()->h());
+ ASSERT_EQ(1, dw_conv2d_node.dilation()->w());
+ ASSERT_EQ(0, dw_conv2d_node.depthMultiplier());
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, dw_conv2d_node.fusedActivationFunction());
+}
+
+TEST(CircleDepthwiseConv2DTest, input_NEG)
+{
+ luci::CircleDepthwiseConv2D dw_conv2d_node;
+ luci::CircleDepthwiseConv2D node;
+
+ dw_conv2d_node.input(&node);
+ dw_conv2d_node.filter(&node);
+ dw_conv2d_node.bias(&node);
+ ASSERT_NE(nullptr, dw_conv2d_node.input());
+ ASSERT_NE(nullptr, dw_conv2d_node.filter());
+ ASSERT_NE(nullptr, dw_conv2d_node.bias());
+
+ dw_conv2d_node.input(nullptr);
+ dw_conv2d_node.filter(nullptr);
+ dw_conv2d_node.bias(nullptr);
+ ASSERT_EQ(nullptr, dw_conv2d_node.input());
+ ASSERT_EQ(nullptr, dw_conv2d_node.filter());
+ ASSERT_EQ(nullptr, dw_conv2d_node.bias());
+
+ dw_conv2d_node.padding(luci::Padding::SAME);
+ ASSERT_NE(luci::Padding::UNDEFINED, dw_conv2d_node.padding());
+
+ dw_conv2d_node.stride()->h(2);
+ dw_conv2d_node.stride()->w(2);
+ ASSERT_EQ(2, dw_conv2d_node.stride()->h());
+ ASSERT_EQ(2, dw_conv2d_node.stride()->w());
+
+ dw_conv2d_node.dilation()->h(2);
+ dw_conv2d_node.dilation()->w(2);
+ ASSERT_EQ(2, dw_conv2d_node.dilation()->h());
+ ASSERT_EQ(2, dw_conv2d_node.dilation()->w());
+
+ dw_conv2d_node.fusedActivationFunction(luci::FusedActFunc::RELU);
+ ASSERT_NE(luci::FusedActFunc::UNDEFINED, dw_conv2d_node.fusedActivationFunction());
+}
+
+TEST(CircleDepthwiseConv2DTest, arity_NEG)
+{
+ luci::CircleDepthwiseConv2D dw_conv2d_node;
+
+ ASSERT_NO_THROW(dw_conv2d_node.arg(2));
+ ASSERT_THROW(dw_conv2d_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleDepthwiseConv2DTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleDepthwiseConv2D dw_conv2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(dw_conv2d_node.accept(&tv), std::exception);
+}
+
+TEST(CircleDepthwiseConv2DTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleDepthwiseConv2D dw_conv2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(dw_conv2d_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleDiv.test.cpp b/compiler/luci/lang/src/Nodes/CircleDiv.test.cpp
index e950cc6be..d0b632ca9 100644
--- a/compiler/luci/lang/src/Nodes/CircleDiv.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleDiv.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleDiv.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleDivTest, constructor_P)
{
luci::CircleDiv div_node;
- ASSERT_EQ(div_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(div_node.opcode(), luci::CircleOpcode::DIV);
+ ASSERT_EQ(luci::CircleDialect::get(), div_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::DIV, div_node.opcode());
- ASSERT_EQ(div_node.x(), nullptr);
- ASSERT_EQ(div_node.y(), nullptr);
+ ASSERT_EQ(nullptr, div_node.x());
+ ASSERT_EQ(nullptr, div_node.y());
+}
+
+TEST(CircleDivTest, input_NEG)
+{
+ luci::CircleDiv div_node;
+ luci::CircleDiv node;
+
+ div_node.x(&node);
+ div_node.y(&node);
+ ASSERT_NE(nullptr, div_node.x());
+ ASSERT_NE(nullptr, div_node.y());
+
+ div_node.x(nullptr);
+ div_node.y(nullptr);
+ ASSERT_EQ(nullptr, div_node.x());
+ ASSERT_EQ(nullptr, div_node.y());
+}
+
+TEST(CircleDivTest, arity_NEG)
+{
+ luci::CircleDiv div_node;
+
+ ASSERT_NO_THROW(div_node.arg(1));
+ ASSERT_THROW(div_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleDivTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleDiv div_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(div_node.accept(&tv), std::exception);
+}
+
+TEST(CircleDivTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleDiv div_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(div_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleElu.test.cpp b/compiler/luci/lang/src/Nodes/CircleElu.test.cpp
new file mode 100644
index 000000000..2a044d75b
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleElu.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleElu.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleEluTest, constructor_P)
+{
+ luci::CircleElu elu_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), elu_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ELU, elu_node.opcode());
+
+ ASSERT_EQ(nullptr, elu_node.features());
+}
+
+TEST(CircleEluTest, input_NEG)
+{
+ luci::CircleElu elu_node;
+ luci::CircleElu node;
+
+ elu_node.features(&node);
+ ASSERT_NE(nullptr, elu_node.features());
+
+ elu_node.features(nullptr);
+ ASSERT_EQ(nullptr, elu_node.features());
+}
+
+TEST(CircleEluTest, arity_NEG)
+{
+ luci::CircleElu elu_node;
+
+ ASSERT_NO_THROW(elu_node.arg(0));
+ ASSERT_THROW(elu_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleEluTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleElu elu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(elu_node.accept(&tv), std::exception);
+}
+
+TEST(CircleEluTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleElu elu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(elu_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleEqual.test.cpp b/compiler/luci/lang/src/Nodes/CircleEqual.test.cpp
index e2757f094..2ae15290d 100644
--- a/compiler/luci/lang/src/Nodes/CircleEqual.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleEqual.test.cpp
@@ -17,16 +17,65 @@
#include "luci/IR/Nodes/CircleEqual.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
TEST(CircleEqualTest, constructor_P)
{
- luci::CircleEqual or_node;
+ luci::CircleEqual equ_node;
- ASSERT_EQ(or_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(or_node.opcode(), luci::CircleOpcode::EQUAL);
+ ASSERT_EQ(luci::CircleDialect::get(), equ_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::EQUAL, equ_node.opcode());
- ASSERT_EQ(or_node.x(), nullptr);
- ASSERT_EQ(or_node.y(), nullptr);
+ ASSERT_EQ(nullptr, equ_node.x());
+ ASSERT_EQ(nullptr, equ_node.y());
+}
+
+TEST(CircleEqualTest, input_NEG)
+{
+ luci::CircleEqual equ_node;
+ luci::CircleEqual node;
+
+ equ_node.x(&node);
+ equ_node.y(&node);
+ ASSERT_NE(nullptr, equ_node.x());
+ ASSERT_NE(nullptr, equ_node.y());
+
+ equ_node.x(nullptr);
+ equ_node.y(nullptr);
+ ASSERT_EQ(nullptr, equ_node.x());
+ ASSERT_EQ(nullptr, equ_node.y());
+}
+
+TEST(CircleEqualTest, arity_NEG)
+{
+ luci::CircleEqual equ_node;
+
+ ASSERT_NO_THROW(equ_node.arg(1));
+ ASSERT_THROW(equ_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleEqualTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleEqual equ_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(equ_node.accept(&tv), std::exception);
+}
+
+TEST(CircleEqualTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleEqual equ_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(equ_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleExp.test.cpp b/compiler/luci/lang/src/Nodes/CircleExp.test.cpp
index db10d0b03..5ca90e90f 100644
--- a/compiler/luci/lang/src/Nodes/CircleExp.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleExp.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleExp.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleExpTest, constructor)
{
luci::CircleExp exp_node;
- ASSERT_EQ(exp_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(exp_node.opcode(), luci::CircleOpcode::EXP);
+ ASSERT_EQ(luci::CircleDialect::get(), exp_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::EXP, exp_node.opcode());
- ASSERT_EQ(exp_node.x(), nullptr);
+ ASSERT_EQ(nullptr, exp_node.x());
+}
+
+TEST(CircleExpTest, input_NEG)
+{
+ luci::CircleExp exp_node;
+ luci::CircleExp node;
+
+ exp_node.x(&node);
+ ASSERT_NE(nullptr, exp_node.x());
+
+ exp_node.x(nullptr);
+ ASSERT_EQ(nullptr, exp_node.x());
+}
+
+TEST(CircleExpTest, arity_NEG)
+{
+ luci::CircleExp exp_node;
+
+ ASSERT_NO_THROW(exp_node.arg(0));
+ ASSERT_THROW(exp_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleExpTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleExp exp_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(exp_node.accept(&tv), std::exception);
+}
+
+TEST(CircleExpTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleExp exp_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(exp_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleExpandDims.test.cpp b/compiler/luci/lang/src/Nodes/CircleExpandDims.test.cpp
new file mode 100644
index 000000000..754ef01f9
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleExpandDims.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleExpandDims.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleExpandDimsTest, constructor_P)
+{
+ luci::CircleExpandDims expand_dims;
+
+ ASSERT_EQ(luci::CircleDialect::get(), expand_dims.dialect());
+ ASSERT_EQ(luci::CircleOpcode::EXPAND_DIMS, expand_dims.opcode());
+
+ ASSERT_EQ(nullptr, expand_dims.input());
+ ASSERT_EQ(nullptr, expand_dims.axis());
+}
+
+TEST(CircleExpandDimsTest, input_NEG)
+{
+ luci::CircleExpandDims expand_dims;
+ luci::CircleExpandDims node;
+
+ expand_dims.input(&node);
+ expand_dims.axis(&node);
+ ASSERT_NE(nullptr, expand_dims.input());
+ ASSERT_NE(nullptr, expand_dims.axis());
+
+ expand_dims.input(nullptr);
+ expand_dims.axis(nullptr);
+ ASSERT_EQ(nullptr, expand_dims.input());
+ ASSERT_EQ(nullptr, expand_dims.axis());
+}
+
+TEST(CircleExpandDimsTest, arity_NEG)
+{
+ luci::CircleExpandDims expand_dims;
+
+ ASSERT_NO_THROW(expand_dims.arg(1));
+ ASSERT_THROW(expand_dims.arg(2), std::out_of_range);
+}
+
+TEST(CircleExpandDimsTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleExpandDims expand_dims;
+
+ TestVisitor tv;
+ ASSERT_THROW(expand_dims.accept(&tv), std::exception);
+}
+
+TEST(CircleExpandDimsTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleExpandDims expand_dims;
+
+ TestVisitor tv;
+ ASSERT_THROW(expand_dims.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleFill.test.cpp b/compiler/luci/lang/src/Nodes/CircleFill.test.cpp
new file mode 100644
index 000000000..4555da1cb
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleFill.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleFill.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleFillTest, constructor_P)
+{
+ luci::CircleFill fill;
+
+ ASSERT_EQ(fill.dialect(), luci::CircleDialect::get());
+ ASSERT_EQ(fill.opcode(), luci::CircleOpcode::FILL);
+
+ ASSERT_EQ(nullptr, fill.dims());
+ ASSERT_EQ(nullptr, fill.value());
+}
+
+TEST(CircleFillTest, input_NEG)
+{
+ luci::CircleFill fill_node;
+ luci::CircleFill node;
+
+ fill_node.dims(&node);
+ fill_node.value(&node);
+ ASSERT_NE(nullptr, fill_node.dims());
+ ASSERT_NE(nullptr, fill_node.value());
+
+ fill_node.dims(nullptr);
+ fill_node.value(nullptr);
+ ASSERT_EQ(nullptr, fill_node.dims());
+ ASSERT_EQ(nullptr, fill_node.value());
+}
+
+TEST(CircleFillTest, arity_NEG)
+{
+ luci::CircleFill fill_node;
+
+ ASSERT_NO_THROW(fill_node.arg(1));
+ ASSERT_THROW(fill_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleFillTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleFill fill_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(fill_node.accept(&tv), std::exception);
+}
+
+TEST(CircleFillTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleFill fill_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(fill_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleFloor.test.cpp b/compiler/luci/lang/src/Nodes/CircleFloor.test.cpp
new file mode 100644
index 000000000..38d38a0da
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleFloor.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleFloor.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleFloorTest, constructor)
+{
+ luci::CircleFloor floor_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), floor_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::FLOOR, floor_node.opcode());
+
+ ASSERT_EQ(nullptr, floor_node.x());
+}
+
+TEST(CircleFloorTest, input_NEG)
+{
+ luci::CircleFloor floor_node;
+ luci::CircleFloor node;
+
+ floor_node.x(&node);
+ ASSERT_NE(nullptr, floor_node.x());
+
+ floor_node.x(nullptr);
+ ASSERT_EQ(nullptr, floor_node.x());
+}
+
+TEST(CircleFloorTest, arity_NEG)
+{
+ luci::CircleFloor floor_node;
+
+ ASSERT_NO_THROW(floor_node.arg(0));
+ ASSERT_THROW(floor_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleFloorTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleFloor floor_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floor_node.accept(&tv), std::exception);
+}
+
+TEST(CircleFloorTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleFloor floor_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floor_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleFloorDiv.test.cpp b/compiler/luci/lang/src/Nodes/CircleFloorDiv.test.cpp
new file mode 100644
index 000000000..6c52eee73
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleFloorDiv.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleFloorDiv.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleFloorDivTest, constructor_P)
+{
+ luci::CircleFloorDiv floordiv_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), floordiv_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::FLOOR_DIV, floordiv_node.opcode());
+
+ ASSERT_EQ(nullptr, floordiv_node.x());
+ ASSERT_EQ(nullptr, floordiv_node.y());
+}
+
+TEST(CircleFloorDivTest, input_NEG)
+{
+ luci::CircleFloorDiv floordiv_node;
+ luci::CircleFloorDiv node;
+
+ floordiv_node.x(&node);
+ floordiv_node.y(&node);
+ ASSERT_NE(nullptr, floordiv_node.x());
+ ASSERT_NE(nullptr, floordiv_node.y());
+
+ floordiv_node.x(nullptr);
+ floordiv_node.y(nullptr);
+ ASSERT_EQ(nullptr, floordiv_node.x());
+ ASSERT_EQ(nullptr, floordiv_node.y());
+}
+
+TEST(CircleFloorDivTest, arity_NEG)
+{
+ luci::CircleFloorDiv floordiv_node;
+
+ ASSERT_NO_THROW(floordiv_node.arg(1));
+ ASSERT_THROW(floordiv_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleFloorDivTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleFloorDiv floordiv_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floordiv_node.accept(&tv), std::exception);
+}
+
+TEST(CircleFloorDivTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleFloorDiv floordiv_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floordiv_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleFloorMod.test.cpp b/compiler/luci/lang/src/Nodes/CircleFloorMod.test.cpp
new file mode 100644
index 000000000..c3fa187f2
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleFloorMod.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleFloorMod.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleFloorModTest, constructor)
+{
+ luci::CircleFloorMod floormod_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), floormod_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::FLOOR_MOD, floormod_node.opcode());
+
+ ASSERT_EQ(nullptr, floormod_node.x());
+ ASSERT_EQ(nullptr, floormod_node.y());
+}
+
+TEST(CircleFloorModTest, input_NEG)
+{
+ luci::CircleFloorMod floormod_node;
+ luci::CircleFloorMod node;
+
+ floormod_node.x(&node);
+ floormod_node.y(&node);
+ ASSERT_NE(nullptr, floormod_node.x());
+ ASSERT_NE(nullptr, floormod_node.y());
+
+ floormod_node.x(nullptr);
+ floormod_node.y(nullptr);
+ ASSERT_EQ(nullptr, floormod_node.x());
+ ASSERT_EQ(nullptr, floormod_node.y());
+}
+
+TEST(CircleFloorModTest, arity_NEG)
+{
+ luci::CircleFloorMod floormod_node;
+
+ ASSERT_NO_THROW(floormod_node.arg(1));
+ ASSERT_THROW(floormod_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleFloorModTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleFloorMod floormod_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floormod_node.accept(&tv), std::exception);
+}
+
+TEST(CircleFloorModTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleFloorMod floormod_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(floormod_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleFullyConnected.test.cpp b/compiler/luci/lang/src/Nodes/CircleFullyConnected.test.cpp
index 994dcd239..bb0e3c51b 100644
--- a/compiler/luci/lang/src/Nodes/CircleFullyConnected.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleFullyConnected.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleFullyConnected.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,11 +25,66 @@ TEST(CircleFullyConnectedTest, constructor)
{
luci::CircleFullyConnected fc_node;
- ASSERT_EQ(fc_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(fc_node.opcode(), luci::CircleOpcode::FULLY_CONNECTED);
+ ASSERT_EQ(luci::CircleDialect::get(), fc_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::FULLY_CONNECTED, fc_node.opcode());
- ASSERT_EQ(fc_node.input(), nullptr);
- ASSERT_EQ(fc_node.weights(), nullptr);
- ASSERT_EQ(fc_node.bias(), nullptr);
- ASSERT_EQ(fc_node.fusedActivationFunction(), luci::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(nullptr, fc_node.input());
+ ASSERT_EQ(nullptr, fc_node.weights());
+ ASSERT_EQ(nullptr, fc_node.bias());
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, fc_node.fusedActivationFunction());
+}
+
+TEST(CircleFullyConnectedTest, input_NEG)
+{
+ luci::CircleFullyConnected fc_node;
+ luci::CircleFullyConnected node;
+
+ fc_node.input(&node);
+ fc_node.weights(&node);
+ fc_node.bias(&node);
+ ASSERT_NE(nullptr, fc_node.input());
+ ASSERT_NE(nullptr, fc_node.weights());
+ ASSERT_NE(nullptr, fc_node.bias());
+
+ fc_node.input(nullptr);
+ fc_node.weights(nullptr);
+ fc_node.bias(nullptr);
+ ASSERT_EQ(nullptr, fc_node.input());
+ ASSERT_EQ(nullptr, fc_node.weights());
+ ASSERT_EQ(nullptr, fc_node.bias());
+
+ fc_node.fusedActivationFunction(luci::FusedActFunc::RELU);
+ ASSERT_NE(luci::FusedActFunc::UNDEFINED, fc_node.fusedActivationFunction());
+}
+
+TEST(CircleFullyConnectedTest, arity_NEG)
+{
+ luci::CircleFullyConnected fc_node;
+
+ ASSERT_NO_THROW(fc_node.arg(2));
+ ASSERT_THROW(fc_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleFullyConnectedTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleFullyConnected fc_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(fc_node.accept(&tv), std::exception);
+}
+
+TEST(CircleFullyConnectedTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleFullyConnected fc_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(fc_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleGather.test.cpp b/compiler/luci/lang/src/Nodes/CircleGather.test.cpp
index 4eace9a02..5194d6bdd 100644
--- a/compiler/luci/lang/src/Nodes/CircleGather.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleGather.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleGather.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,10 +25,61 @@ TEST(CircleGatherTest, constructor)
{
luci::CircleGather gather_node;
- ASSERT_EQ(gather_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(gather_node.opcode(), luci::CircleOpcode::GATHER);
+ ASSERT_EQ(luci::CircleDialect::get(), gather_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::GATHER, gather_node.opcode());
- ASSERT_EQ(gather_node.input(), nullptr);
- ASSERT_EQ(gather_node.positions(), nullptr);
- ASSERT_EQ(gather_node.axis(), 0);
+ ASSERT_EQ(nullptr, gather_node.params());
+ ASSERT_EQ(nullptr, gather_node.indices());
+ ASSERT_EQ(0, gather_node.axis());
+}
+
+TEST(CircleGatherTest, input_NEG)
+{
+ luci::CircleGather gather_node;
+ luci::CircleGather node;
+
+ gather_node.params(&node);
+ gather_node.indices(&node);
+ ASSERT_NE(nullptr, gather_node.params());
+ ASSERT_NE(nullptr, gather_node.indices());
+
+ gather_node.params(nullptr);
+ gather_node.indices(nullptr);
+ ASSERT_EQ(nullptr, gather_node.params());
+ ASSERT_EQ(nullptr, gather_node.indices());
+
+ gather_node.axis(1);
+ ASSERT_NE(0, gather_node.axis());
+}
+
+TEST(CircleGatherTest, arity_NEG)
+{
+ luci::CircleGather gather_node;
+
+ ASSERT_NO_THROW(gather_node.arg(1));
+ ASSERT_THROW(gather_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleGatherTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleGather gather_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(gather_node.accept(&tv), std::exception);
+}
+
+TEST(CircleGatherTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleGather gather_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(gather_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleGatherNd.test.cpp b/compiler/luci/lang/src/Nodes/CircleGatherNd.test.cpp
new file mode 100644
index 000000000..1402e6e45
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleGatherNd.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleGatherNd.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleGatherNdTest, constructor)
+{
+ luci::CircleGatherNd gather_nd_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), gather_nd_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::GATHER_ND, gather_nd_node.opcode());
+
+ ASSERT_EQ(nullptr, gather_nd_node.params());
+ ASSERT_EQ(nullptr, gather_nd_node.indices());
+}
+
+TEST(CircleGatherNdTest, input_NEG)
+{
+ luci::CircleGatherNd gather_nd_node;
+ luci::CircleGatherNd node;
+
+ gather_nd_node.params(&node);
+ gather_nd_node.indices(&node);
+ ASSERT_NE(nullptr, gather_nd_node.params());
+ ASSERT_NE(nullptr, gather_nd_node.indices());
+
+ gather_nd_node.params(nullptr);
+ gather_nd_node.indices(nullptr);
+ ASSERT_EQ(nullptr, gather_nd_node.params());
+ ASSERT_EQ(nullptr, gather_nd_node.indices());
+}
+
+TEST(CircleGatherNdTest, arity_NEG)
+{
+ luci::CircleGatherNd gather_nd_node;
+
+ ASSERT_NO_THROW(gather_nd_node.arg(1));
+ ASSERT_THROW(gather_nd_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleGatherNdTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleGatherNd gather_nd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(gather_nd_node.accept(&tv), std::exception);
+}
+
+TEST(CircleGatherNdTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleGatherNd gather_nd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(gather_nd_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleGreater.test.cpp b/compiler/luci/lang/src/Nodes/CircleGreater.test.cpp
new file mode 100644
index 000000000..9a2b5f9f9
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleGreater.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleGreater.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleGreaterTest, constructor_P)
+{
+ luci::CircleGreater greater_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), greater_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::GREATER, greater_node.opcode());
+
+ ASSERT_EQ(nullptr, greater_node.x());
+ ASSERT_EQ(nullptr, greater_node.y());
+}
+
+TEST(CircleGreaterTest, input_NEG)
+{
+ luci::CircleGreater greater_node;
+ luci::CircleGreater node;
+
+ greater_node.x(&node);
+ greater_node.y(&node);
+ ASSERT_NE(nullptr, greater_node.x());
+ ASSERT_NE(nullptr, greater_node.y());
+
+ greater_node.x(nullptr);
+ greater_node.y(nullptr);
+ ASSERT_EQ(nullptr, greater_node.x());
+ ASSERT_EQ(nullptr, greater_node.y());
+}
+
+TEST(CircleGreaterTest, arity_NEG)
+{
+ luci::CircleGreater greater_node;
+
+ ASSERT_NO_THROW(greater_node.arg(1));
+ ASSERT_THROW(greater_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleGreaterTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleGreater greater_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(greater_node.accept(&tv), std::exception);
+}
+
+TEST(CircleGreaterTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleGreater greater_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(greater_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleGreaterEqual.test.cpp b/compiler/luci/lang/src/Nodes/CircleGreaterEqual.test.cpp
new file mode 100644
index 000000000..51c22b707
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleGreaterEqual.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleGreaterEqual.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleGreaterEqualTest, constructor_P)
+{
+ luci::CircleGreaterEqual greater_equal_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), greater_equal_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::GREATER_EQUAL, greater_equal_node.opcode());
+
+ ASSERT_EQ(nullptr, greater_equal_node.x());
+ ASSERT_EQ(nullptr, greater_equal_node.y());
+}
+
+TEST(CircleGreaterEqualTest, input_NEG)
+{
+ luci::CircleGreaterEqual greater_equal_node;
+ luci::CircleGreaterEqual node;
+
+ greater_equal_node.x(&node);
+ greater_equal_node.y(&node);
+ ASSERT_NE(nullptr, greater_equal_node.x());
+ ASSERT_NE(nullptr, greater_equal_node.y());
+
+ greater_equal_node.x(nullptr);
+ greater_equal_node.y(nullptr);
+ ASSERT_EQ(nullptr, greater_equal_node.x());
+ ASSERT_EQ(nullptr, greater_equal_node.y());
+}
+
+TEST(CircleGreaterEqualTest, arity_NEG)
+{
+ luci::CircleGreaterEqual greater_equal_node;
+
+ ASSERT_NO_THROW(greater_equal_node.arg(1));
+ ASSERT_THROW(greater_equal_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleGreaterEqualTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleGreaterEqual greater_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(greater_equal_node.accept(&tv), std::exception);
+}
+
+TEST(CircleGreaterEqualTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleGreaterEqual greater_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(greater_equal_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleIf.test.cpp b/compiler/luci/lang/src/Nodes/CircleIf.test.cpp
new file mode 100644
index 000000000..e3c8c9f60
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleIf.test.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleIf.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleIfTest, constructor)
+{
+ luci::CircleIf if_node(2, 2);
+
+ ASSERT_EQ(luci::CircleDialect::get(), if_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::IF, if_node.opcode());
+
+ ASSERT_EQ(2, if_node.input_count());
+ ASSERT_EQ(2, if_node.output_count());
+
+ ASSERT_EQ(nullptr, if_node.input(0));
+ ASSERT_EQ(nullptr, if_node.input(1));
+
+ ASSERT_EQ(-1, if_node.then_branch());
+ ASSERT_EQ(-1, if_node.else_branch());
+}
+
+TEST(CircleIfTestDeath, invalid_arity_NEG)
+{
+ ASSERT_DEBUG_DEATH(luci::CircleIf very_long_name_if_node(0, 1), "");
+}
+
+TEST(CircleIfTestDeath, invalid_output_count_NEG)
+{
+ ASSERT_DEBUG_DEATH(luci::CircleIf if_node(2, 0), "");
+}
+
+TEST(CircleIfTestDeath, invalid_input_get_index_NEG)
+{
+ luci::CircleIf if_node(2, 2);
+
+ EXPECT_ANY_THROW(if_node.input(100));
+}
+
+TEST(CircleIfTestDeath, invalid_input_set_index_NEG)
+{
+ luci::CircleIf if_node(2, 2);
+
+ EXPECT_ANY_THROW(if_node.input(100, nullptr));
+}
+
+TEST(CircleIfTestDeath, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleIf if_node(2, 2);
+
+ TestVisitor tv;
+ ASSERT_THROW(if_node.accept(&tv), std::exception);
+}
+
+TEST(CircleIfTestDeath, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleIf if_node(2, 2);
+
+ TestVisitor tv;
+ ASSERT_THROW(if_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleIfOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleIfOut.test.cpp
new file mode 100644
index 000000000..5154b6b28
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleIfOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleIfOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleIfOutTest, constructor)
+{
+ luci::CircleIfOut ifout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), ifout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLEIFOUT, ifout_node.opcode());
+
+ ASSERT_EQ(nullptr, ifout_node.input());
+ ASSERT_EQ(-1, ifout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleInstanceNorm.test.cpp b/compiler/luci/lang/src/Nodes/CircleInstanceNorm.test.cpp
index b87e81791..88a5b8c6c 100644
--- a/compiler/luci/lang/src/Nodes/CircleInstanceNorm.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleInstanceNorm.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleInstanceNorm.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,12 +25,67 @@ TEST(CircleInstanceNormTest, constructor)
{
luci::CircleInstanceNorm instance_norm;
- ASSERT_EQ(instance_norm.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(instance_norm.opcode(), luci::CircleOpcode::INSTANCE_NORM);
+ ASSERT_EQ(luci::CircleDialect::get(), instance_norm.dialect());
+ ASSERT_EQ(luci::CircleOpcode::INSTANCE_NORM, instance_norm.opcode());
- ASSERT_EQ(instance_norm.input(), nullptr);
- ASSERT_EQ(instance_norm.gamma(), nullptr);
- ASSERT_EQ(instance_norm.beta(), nullptr);
+ ASSERT_EQ(nullptr, instance_norm.input());
+ ASSERT_EQ(nullptr, instance_norm.gamma());
+ ASSERT_EQ(nullptr, instance_norm.beta());
ASSERT_FLOAT_EQ(instance_norm.epsilon(), 1e-05);
- ASSERT_EQ(instance_norm.fusedActivationFunction(), luci::FusedActFunc::UNDEFINED);
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, instance_norm.fusedActivationFunction());
+}
+
+TEST(CircleInstanceNormTest, input_NEG)
+{
+ luci::CircleInstanceNorm instance_norm;
+ luci::CircleInstanceNorm node;
+
+ instance_norm.input(&node);
+ instance_norm.gamma(&node);
+ instance_norm.beta(&node);
+ ASSERT_NE(nullptr, instance_norm.input());
+ ASSERT_NE(nullptr, instance_norm.gamma());
+ ASSERT_NE(nullptr, instance_norm.beta());
+
+ instance_norm.input(nullptr);
+ instance_norm.gamma(nullptr);
+ instance_norm.beta(nullptr);
+ ASSERT_EQ(nullptr, instance_norm.input());
+ ASSERT_EQ(nullptr, instance_norm.gamma());
+ ASSERT_EQ(nullptr, instance_norm.beta());
+
+ instance_norm.fusedActivationFunction(luci::FusedActFunc::RELU);
+ ASSERT_NE(luci::FusedActFunc::UNDEFINED, instance_norm.fusedActivationFunction());
+}
+
+TEST(CircleInstanceNormTest, arity_NEG)
+{
+ luci::CircleInstanceNorm instance_norm;
+
+ ASSERT_NO_THROW(instance_norm.arg(2));
+ ASSERT_THROW(instance_norm.arg(3), std::out_of_range);
+}
+
+TEST(CircleInstanceNormTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleInstanceNorm instance_norm;
+
+ TestVisitor tv;
+ ASSERT_THROW(instance_norm.accept(&tv), std::exception);
+}
+
+TEST(CircleInstanceNormTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleInstanceNorm instance_norm;
+
+ TestVisitor tv;
+ ASSERT_THROW(instance_norm.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleL2Pool2D.test.cpp b/compiler/luci/lang/src/Nodes/CircleL2Pool2D.test.cpp
new file mode 100644
index 000000000..cb779efa5
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleL2Pool2D.test.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleL2Pool2D.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleL2Pool2DTest, constructor)
+{
+ luci::CircleL2Pool2D l2pool2d_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), l2pool2d_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::L2_POOL_2D, l2pool2d_node.opcode());
+
+ ASSERT_EQ(nullptr, l2pool2d_node.value());
+ ASSERT_EQ(1, l2pool2d_node.filter()->h());
+ ASSERT_EQ(1, l2pool2d_node.filter()->w());
+ ASSERT_EQ(1, l2pool2d_node.stride()->h());
+ ASSERT_EQ(1, l2pool2d_node.stride()->w());
+ ASSERT_EQ(luci::FusedActFunc::UNDEFINED, l2pool2d_node.fusedActivationFunction());
+}
+
+TEST(CircleL2Pool2DTest, input_NEG)
+{
+ luci::CircleL2Pool2D l2pool2d_node;
+ luci::CircleL2Pool2D node;
+
+ l2pool2d_node.value(&node);
+ ASSERT_NE(nullptr, l2pool2d_node.value());
+
+ l2pool2d_node.value(nullptr);
+ ASSERT_EQ(nullptr, l2pool2d_node.value());
+
+ l2pool2d_node.stride()->h(2);
+ l2pool2d_node.stride()->w(2);
+ ASSERT_EQ(2, l2pool2d_node.stride()->h());
+ ASSERT_EQ(2, l2pool2d_node.stride()->w());
+
+ l2pool2d_node.filter()->h(2);
+ l2pool2d_node.filter()->w(2);
+ ASSERT_EQ(2, l2pool2d_node.filter()->h());
+ ASSERT_EQ(2, l2pool2d_node.filter()->w());
+
+ l2pool2d_node.fusedActivationFunction(luci::FusedActFunc::RELU);
+ ASSERT_NE(luci::FusedActFunc::UNDEFINED, l2pool2d_node.fusedActivationFunction());
+}
+
+TEST(CircleL2Pool2DTest, arity_NEG)
+{
+ luci::CircleL2Pool2D l2pool2d_node;
+
+ ASSERT_NO_THROW(l2pool2d_node.arg(0));
+ ASSERT_THROW(l2pool2d_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleL2Pool2DTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleL2Pool2D l2pool2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(l2pool2d_node.accept(&tv), std::exception);
+}
+
+TEST(CircleL2Pool2DTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleL2Pool2D l2pool2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(l2pool2d_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLeakyRelu.test.cpp b/compiler/luci/lang/src/Nodes/CircleLeakyRelu.test.cpp
new file mode 100644
index 000000000..bacb444da
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLeakyRelu.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLeakyRelu.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLeakyReluTest, constructor)
+{
+ luci::CircleLeakyRelu relu_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), relu_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LEAKY_RELU, relu_node.opcode());
+
+ ASSERT_EQ(nullptr, relu_node.features());
+
+ ASSERT_EQ(0.2f, relu_node.alpha());
+}
+
+TEST(CircleLeakyReluTest, input_NEG)
+{
+ luci::CircleLeakyRelu relu_node;
+ luci::CircleLeakyRelu node;
+
+ relu_node.features(&node);
+ ASSERT_NE(nullptr, relu_node.features());
+
+ relu_node.features(nullptr);
+ ASSERT_EQ(nullptr, relu_node.features());
+
+ relu_node.alpha(1.2f);
+ ASSERT_NE(0.2f, relu_node.alpha());
+}
+
+TEST(CircleLeakyReluTest, arity_NEG)
+{
+ luci::CircleLeakyRelu relu_node;
+
+ ASSERT_NO_THROW(relu_node.arg(0));
+ ASSERT_THROW(relu_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLeakyReluTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLeakyRelu relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLeakyReluTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLeakyRelu relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLess.test.cpp b/compiler/luci/lang/src/Nodes/CircleLess.test.cpp
new file mode 100644
index 000000000..ec454dfb5
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLess.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLess.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLessTest, constructor_P)
+{
+ luci::CircleLess less_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), less_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LESS, less_node.opcode());
+
+ ASSERT_EQ(nullptr, less_node.x());
+ ASSERT_EQ(nullptr, less_node.y());
+}
+
+TEST(CircleLessTest, input_NEG)
+{
+ luci::CircleLess less_node;
+ luci::CircleLess node;
+
+ less_node.x(&node);
+ less_node.y(&node);
+ ASSERT_NE(nullptr, less_node.x());
+ ASSERT_NE(nullptr, less_node.y());
+
+ less_node.x(nullptr);
+ less_node.y(nullptr);
+ ASSERT_EQ(nullptr, less_node.x());
+ ASSERT_EQ(nullptr, less_node.y());
+}
+
+TEST(CircleLessTest, arity_NEG)
+{
+ luci::CircleLess less_node;
+
+ ASSERT_NO_THROW(less_node.arg(1));
+ ASSERT_THROW(less_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleLessTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLess less_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(less_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLessTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLess less_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(less_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLessEqual.test.cpp b/compiler/luci/lang/src/Nodes/CircleLessEqual.test.cpp
new file mode 100644
index 000000000..baa9202ae
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLessEqual.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLessEqual.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLessEqualTest, constructor_P)
+{
+ luci::CircleLessEqual less_equal_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), less_equal_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LESS_EQUAL, less_equal_node.opcode());
+
+ ASSERT_EQ(nullptr, less_equal_node.x());
+ ASSERT_EQ(nullptr, less_equal_node.y());
+}
+
+TEST(CircleLessEqualTest, input_NEG)
+{
+ luci::CircleLessEqual less_equal_node;
+ luci::CircleLessEqual node;
+
+ less_equal_node.x(&node);
+ less_equal_node.y(&node);
+ ASSERT_NE(nullptr, less_equal_node.x());
+ ASSERT_NE(nullptr, less_equal_node.y());
+
+ less_equal_node.x(nullptr);
+ less_equal_node.y(nullptr);
+ ASSERT_EQ(nullptr, less_equal_node.x());
+ ASSERT_EQ(nullptr, less_equal_node.y());
+}
+
+TEST(CircleLessEqualTest, arity_NEG)
+{
+ luci::CircleLessEqual less_equal_node;
+
+ ASSERT_NO_THROW(less_equal_node.arg(1));
+ ASSERT_THROW(less_equal_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleLessEqualTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLessEqual less_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(less_equal_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLessEqualTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLessEqual less_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(less_equal_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLocalResponseNormalization.test.cpp b/compiler/luci/lang/src/Nodes/CircleLocalResponseNormalization.test.cpp
new file mode 100644
index 000000000..1b1bf67bd
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLocalResponseNormalization.test.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLocalResponseNormalization.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLocalResponseNormalizationTest, constructor_P)
+{
+ luci::CircleLocalResponseNormalization local_response_normalization_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), local_response_normalization_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOCAL_RESPONSE_NORMALIZATION,
+ local_response_normalization_node.opcode());
+
+ ASSERT_EQ(nullptr, local_response_normalization_node.input());
+ ASSERT_EQ(5, local_response_normalization_node.radius());
+ ASSERT_EQ(1.0f, local_response_normalization_node.bias());
+ ASSERT_EQ(1.0f, local_response_normalization_node.alpha());
+ ASSERT_EQ(0.5f, local_response_normalization_node.beta());
+}
+
+TEST(CircleLocalResponseNormalizationTest, input_NEG)
+{
+ luci::CircleLocalResponseNormalization local_response_normalization_node;
+ luci::CircleLocalResponseNormalization node;
+
+ local_response_normalization_node.input(&node);
+ ASSERT_NE(nullptr, local_response_normalization_node.input());
+
+ local_response_normalization_node.input(nullptr);
+ ASSERT_EQ(nullptr, local_response_normalization_node.input());
+
+ local_response_normalization_node.radius(100);
+ local_response_normalization_node.bias(100.0f);
+ local_response_normalization_node.alpha(100.0f);
+ local_response_normalization_node.beta(100.0f);
+ ASSERT_NE(5, local_response_normalization_node.radius());
+ ASSERT_NE(1.0f, local_response_normalization_node.bias());
+ ASSERT_NE(1.0f, local_response_normalization_node.alpha());
+ ASSERT_NE(0.5f, local_response_normalization_node.beta());
+}
+
+TEST(CircleLocalResponseNormalizationTest, arity_NEG)
+{
+ luci::CircleLocalResponseNormalization local_response_normalization_node;
+
+ ASSERT_NO_THROW(local_response_normalization_node.arg(0));
+ ASSERT_THROW(local_response_normalization_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLocalResponseNormalizationTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLocalResponseNormalization local_response_normalization_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(local_response_normalization_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLocalResponseNormalizationTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLocalResponseNormalization local_response_normalization_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(local_response_normalization_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLog.test.cpp b/compiler/luci/lang/src/Nodes/CircleLog.test.cpp
new file mode 100644
index 000000000..0bb9ee76e
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLog.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLog.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLogTest, constructor)
+{
+ luci::CircleLog log_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), log_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOG, log_node.opcode());
+
+ ASSERT_EQ(nullptr, log_node.x());
+}
+
+TEST(CircleLogTest, input_NEG)
+{
+ luci::CircleLog log_node;
+ luci::CircleLog node;
+
+ log_node.x(&node);
+ ASSERT_NE(nullptr, log_node.x());
+
+ log_node.x(nullptr);
+ ASSERT_EQ(nullptr, log_node.x());
+}
+
+TEST(CircleLogTest, arity_NEG)
+{
+ luci::CircleLog log_node;
+
+ ASSERT_NO_THROW(log_node.arg(0));
+ ASSERT_THROW(log_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLogTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLog log_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(log_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLog log_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(log_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLogSoftmax.test.cpp b/compiler/luci/lang/src/Nodes/CircleLogSoftmax.test.cpp
new file mode 100644
index 000000000..70977ae4f
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLogSoftmax.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLogSoftmax.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLogSoftmaxTest, constructor)
+{
+ luci::CircleLogSoftmax log_softmax_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), log_softmax_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOG_SOFTMAX, log_softmax_node.opcode());
+
+ ASSERT_EQ(nullptr, log_softmax_node.logits());
+}
+
+TEST(CircleLogSoftmaxTest, input_NEG)
+{
+ luci::CircleLogSoftmax log_softmax_node;
+ luci::CircleLogSoftmax node;
+
+ log_softmax_node.logits(&node);
+ ASSERT_NE(nullptr, log_softmax_node.logits());
+
+ log_softmax_node.logits(nullptr);
+ ASSERT_EQ(nullptr, log_softmax_node.logits());
+}
+
+TEST(CircleLogSoftmaxTest, arity_NEG)
+{
+ luci::CircleLogSoftmax log_softmax_node;
+
+ ASSERT_NO_THROW(log_softmax_node.arg(0));
+ ASSERT_THROW(log_softmax_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLogSoftmaxTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLogSoftmax log_softmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(log_softmax_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogSoftmaxTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLogSoftmax log_softmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(log_softmax_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLogicalAnd.test.cpp b/compiler/luci/lang/src/Nodes/CircleLogicalAnd.test.cpp
new file mode 100644
index 000000000..db378f022
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLogicalAnd.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLogicalAnd.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLogicalAndTest, constructor_P)
+{
+ luci::CircleLogicalAnd and_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), and_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOGICAL_AND, and_node.opcode());
+
+ ASSERT_EQ(nullptr, and_node.x());
+ ASSERT_EQ(nullptr, and_node.y());
+}
+
+TEST(CircleLogicalAndTest, input_NEG)
+{
+ luci::CircleLogicalAnd and_node;
+ luci::CircleLogicalAnd node;
+
+ and_node.x(&node);
+ and_node.y(&node);
+ ASSERT_NE(nullptr, and_node.x());
+ ASSERT_NE(nullptr, and_node.y());
+
+ and_node.x(nullptr);
+ and_node.y(nullptr);
+ ASSERT_EQ(nullptr, and_node.x());
+ ASSERT_EQ(nullptr, and_node.y());
+}
+
+TEST(CircleLogicalAndTest, arity_NEG)
+{
+ luci::CircleLogicalAnd and_node;
+
+ ASSERT_NO_THROW(and_node.arg(1));
+ ASSERT_THROW(and_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleLogicalAndTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalAnd and_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(and_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogicalAndTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalAnd and_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(and_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleLogicalNot.test.cpp b/compiler/luci/lang/src/Nodes/CircleLogicalNot.test.cpp
index 360dd4711..0c2c02938 100644
--- a/compiler/luci/lang/src/Nodes/CircleLogicalNot.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleLogicalNot.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleLogicalNot.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleLogicalNotTest, constructor_P)
{
luci::CircleLogicalNot not_node;
- ASSERT_EQ(not_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(not_node.opcode(), luci::CircleOpcode::LOGICAL_NOT);
+ ASSERT_EQ(luci::CircleDialect::get(), not_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOGICAL_NOT, not_node.opcode());
- ASSERT_EQ(not_node.x(), nullptr);
+ ASSERT_EQ(nullptr, not_node.x());
+}
+
+TEST(CircleLogicalNotTest, input_NEG)
+{
+ luci::CircleLogicalNot not_node;
+ luci::CircleLogicalNot node;
+
+ not_node.x(&node);
+ ASSERT_NE(nullptr, not_node.x());
+
+ not_node.x(nullptr);
+ ASSERT_EQ(nullptr, not_node.x());
+}
+
+TEST(CircleLogicalNotTest, arity_NEG)
+{
+ luci::CircleLogicalNot not_node;
+
+ ASSERT_NO_THROW(not_node.arg(0));
+ ASSERT_THROW(not_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLogicalNotTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalNot not_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(not_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogicalNotTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalNot not_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(not_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleLogicalOr.test.cpp b/compiler/luci/lang/src/Nodes/CircleLogicalOr.test.cpp
index 039db4afc..a08b863c7 100644
--- a/compiler/luci/lang/src/Nodes/CircleLogicalOr.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleLogicalOr.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleLogicalOr.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleLogicalOrTest, constructor_P)
{
luci::CircleLogicalOr or_node;
- ASSERT_EQ(or_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(or_node.opcode(), luci::CircleOpcode::LOGICAL_OR);
+ ASSERT_EQ(luci::CircleDialect::get(), or_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOGICAL_OR, or_node.opcode());
- ASSERT_EQ(or_node.x(), nullptr);
- ASSERT_EQ(or_node.y(), nullptr);
+ ASSERT_EQ(nullptr, or_node.x());
+ ASSERT_EQ(nullptr, or_node.y());
+}
+
+TEST(CircleLogicalOrTest, input_NEG)
+{
+ luci::CircleLogicalOr or_node;
+ luci::CircleLogicalOr node;
+
+ or_node.x(&node);
+ or_node.y(&node);
+ ASSERT_NE(nullptr, or_node.x());
+ ASSERT_NE(nullptr, or_node.y());
+
+ or_node.x(nullptr);
+ or_node.y(nullptr);
+ ASSERT_EQ(nullptr, or_node.x());
+ ASSERT_EQ(nullptr, or_node.y());
+}
+
+TEST(CircleLogicalOrTest, arity_NEG)
+{
+ luci::CircleLogicalOr or_node;
+
+ ASSERT_NO_THROW(or_node.arg(1));
+ ASSERT_THROW(or_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleLogicalOrTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalOr or_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(or_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogicalOrTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLogicalOr or_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(or_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleLogistic.test.cpp b/compiler/luci/lang/src/Nodes/CircleLogistic.test.cpp
new file mode 100644
index 000000000..18efd869d
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleLogistic.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleLogistic.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleLogisticTest, constructor)
+{
+ luci::CircleLogistic logistic_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), logistic_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::LOGISTIC, logistic_node.opcode());
+
+ ASSERT_EQ(nullptr, logistic_node.x());
+}
+
+TEST(CircleLogisticTest, input_NEG)
+{
+ luci::CircleLogistic logistic_node;
+ luci::CircleLogistic node;
+
+ logistic_node.x(&node);
+ ASSERT_NE(nullptr, logistic_node.x());
+
+ logistic_node.x(nullptr);
+ ASSERT_EQ(nullptr, logistic_node.x());
+}
+
+TEST(CircleLogisticTest, arity_NEG)
+{
+ luci::CircleLogistic logistic_node;
+
+ ASSERT_NO_THROW(logistic_node.arg(0));
+ ASSERT_THROW(logistic_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleLogisticTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleLogistic logistic_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(logistic_node.accept(&tv), std::exception);
+}
+
+TEST(CircleLogisticTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleLogistic logistic_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(logistic_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMatrixDiag.test.cpp b/compiler/luci/lang/src/Nodes/CircleMatrixDiag.test.cpp
new file mode 100644
index 000000000..9209cf1a4
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleMatrixDiag.test.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleMatrixDiag.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleMatrixDiagTest, constructor_P)
+{
+ luci::CircleMatrixDiag matrix_diag_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), matrix_diag_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MATRIX_DIAG, matrix_diag_node.opcode());
+
+ ASSERT_EQ(nullptr, matrix_diag_node.diagonal());
+}
+
+TEST(CircleMatrixDiagTest, input_NEG)
+{
+ luci::CircleMatrixDiag matrix_diag_node;
+ luci::CircleMatrixDiag node;
+
+ matrix_diag_node.diagonal(&node);
+
+ ASSERT_NE(nullptr, matrix_diag_node.diagonal());
+
+ matrix_diag_node.diagonal(nullptr);
+
+ ASSERT_EQ(nullptr, matrix_diag_node.diagonal());
+}
+
+TEST(CircleMatrixDiagTest, arity_NEG)
+{
+ luci::CircleMatrixDiag matrix_diag_node;
+
+ ASSERT_NO_THROW(matrix_diag_node.arg(0));
+ ASSERT_THROW(matrix_diag_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleMatrixDiagTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMatrixDiag matrix_diag_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(matrix_diag_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMatrixDiagTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMatrixDiag matrix_diag_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(matrix_diag_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMatrixSetDiag.test.cpp b/compiler/luci/lang/src/Nodes/CircleMatrixSetDiag.test.cpp
new file mode 100644
index 000000000..9dea9852e
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleMatrixSetDiag.test.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleMatrixSetDiag.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleMatrixSetDiagTest, constructor_P)
+{
+ luci::CircleMatrixSetDiag matrix_set_diag_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), matrix_set_diag_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MATRIX_SET_DIAG, matrix_set_diag_node.opcode());
+
+ ASSERT_EQ(nullptr, matrix_set_diag_node.input());
+ ASSERT_EQ(nullptr, matrix_set_diag_node.diagonal());
+}
+
+TEST(CircleMatrixSetDiagTest, input_NEG)
+{
+ luci::CircleMatrixSetDiag matrix_set_diag_node;
+ luci::CircleMatrixSetDiag node;
+
+ matrix_set_diag_node.input(&node);
+ matrix_set_diag_node.diagonal(&node);
+
+ ASSERT_NE(nullptr, matrix_set_diag_node.input());
+ ASSERT_NE(nullptr, matrix_set_diag_node.diagonal());
+
+ matrix_set_diag_node.input(nullptr);
+ matrix_set_diag_node.diagonal(nullptr);
+
+ ASSERT_EQ(nullptr, matrix_set_diag_node.input());
+ ASSERT_EQ(nullptr, matrix_set_diag_node.diagonal());
+}
+
+TEST(CircleMatrixSetDiagTest, arity_NEG)
+{
+ luci::CircleMatrixSetDiag matrix_set_diag_node;
+
+ ASSERT_NO_THROW(matrix_set_diag_node.arg(0));
+ ASSERT_NO_THROW(matrix_set_diag_node.arg(1));
+ ASSERT_THROW(matrix_set_diag_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMatrixSetDiagTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMatrixSetDiag matrix_set_diag_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(matrix_set_diag_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMatrixSetDiagTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMatrixSetDiag matrix_set_diag_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(matrix_set_diag_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMaxPool2D.test.cpp b/compiler/luci/lang/src/Nodes/CircleMaxPool2D.test.cpp
index 874ecec0e..cb6c016e3 100644
--- a/compiler/luci/lang/src/Nodes/CircleMaxPool2D.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleMaxPool2D.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleMaxPool2D.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,10 +25,66 @@ TEST(CircleMaxPool2DTest, constructor_P)
{
luci::CircleMaxPool2D maxpool2d_node;
- ASSERT_EQ(maxpool2d_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(maxpool2d_node.opcode(), luci::CircleOpcode::MAX_POOL_2D);
+ ASSERT_EQ(luci::CircleDialect::get(), maxpool2d_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MAX_POOL_2D, maxpool2d_node.opcode());
- ASSERT_EQ(maxpool2d_node.value(), nullptr);
- ASSERT_NE(maxpool2d_node.filter(), nullptr);
- ASSERT_NE(maxpool2d_node.stride(), nullptr);
+ ASSERT_EQ(nullptr, maxpool2d_node.value());
+ ASSERT_EQ(luci::Padding::UNDEFINED, maxpool2d_node.padding());
+ ASSERT_EQ(1, maxpool2d_node.filter()->h());
+ ASSERT_EQ(1, maxpool2d_node.filter()->w());
+ ASSERT_EQ(1, maxpool2d_node.stride()->h());
+ ASSERT_EQ(1, maxpool2d_node.stride()->w());
+}
+
+TEST(CircleMaxPool2DTest, input_NEG)
+{
+ luci::CircleMaxPool2D maxpool2d_node;
+ luci::CircleMaxPool2D node;
+
+ maxpool2d_node.value(&node);
+ ASSERT_NE(nullptr, maxpool2d_node.value());
+
+ maxpool2d_node.value(nullptr);
+ ASSERT_EQ(nullptr, maxpool2d_node.value());
+
+ maxpool2d_node.filter()->h(2);
+ maxpool2d_node.filter()->w(2);
+ maxpool2d_node.stride()->h(2);
+ maxpool2d_node.stride()->w(2);
+ ASSERT_NE(1, maxpool2d_node.filter()->h());
+ ASSERT_NE(1, maxpool2d_node.filter()->w());
+ ASSERT_NE(1, maxpool2d_node.stride()->h());
+ ASSERT_NE(1, maxpool2d_node.stride()->w());
+}
+
+TEST(CircleMaxPool2DTest, arity_NEG)
+{
+ luci::CircleMaxPool2D maxpool2d_node;
+
+ ASSERT_NO_THROW(maxpool2d_node.arg(0));
+ ASSERT_THROW(maxpool2d_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleMaxPool2DTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMaxPool2D maxpool2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(maxpool2d_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMaxPool2DTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMaxPool2D maxpool2d_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(maxpool2d_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleMaximum.test.cpp b/compiler/luci/lang/src/Nodes/CircleMaximum.test.cpp
index efe62f11a..3fc6f1114 100644
--- a/compiler/luci/lang/src/Nodes/CircleMaximum.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleMaximum.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleMaximum.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleMaximumTest, constructor_P)
{
luci::CircleMaximum max_node;
- ASSERT_EQ(max_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(max_node.opcode(), luci::CircleOpcode::MAXIMUM);
+ ASSERT_EQ(luci::CircleDialect::get(), max_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MAXIMUM, max_node.opcode());
- ASSERT_EQ(max_node.x(), nullptr);
- ASSERT_EQ(max_node.y(), nullptr);
+ ASSERT_EQ(nullptr, max_node.x());
+ ASSERT_EQ(nullptr, max_node.y());
+}
+
+TEST(CircleMaximumTest, input_NEG)
+{
+ luci::CircleMaximum max_node;
+ luci::CircleMaximum node;
+
+ max_node.x(&node);
+ max_node.y(&node);
+ ASSERT_NE(nullptr, max_node.x());
+ ASSERT_NE(nullptr, max_node.y());
+
+ max_node.x(nullptr);
+ max_node.y(nullptr);
+ ASSERT_EQ(nullptr, max_node.x());
+ ASSERT_EQ(nullptr, max_node.y());
+}
+
+TEST(CircleMaximumTest, arity_NEG)
+{
+ luci::CircleMaximum max_node;
+
+ ASSERT_NO_THROW(max_node.arg(1));
+ ASSERT_THROW(max_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMaximumTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMaximum max_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(max_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMaximumTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMaximum max_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(max_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleMean.test.cpp b/compiler/luci/lang/src/Nodes/CircleMean.test.cpp
new file mode 100644
index 000000000..743063968
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleMean.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleMean.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleMeanTest, constructor)
+{
+ luci::CircleMean mean_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), mean_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MEAN, mean_node.opcode());
+
+ ASSERT_EQ(nullptr, mean_node.input());
+ ASSERT_EQ(nullptr, mean_node.reduction_indices());
+
+ ASSERT_FALSE(mean_node.keep_dims());
+}
+
+TEST(CircleMeanTest, input_NEG)
+{
+ luci::CircleMean mean_node;
+ luci::CircleMean node;
+
+ mean_node.input(&node);
+ mean_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, mean_node.input());
+ ASSERT_NE(nullptr, mean_node.reduction_indices());
+
+ mean_node.input(nullptr);
+ mean_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, mean_node.input());
+ ASSERT_EQ(nullptr, mean_node.reduction_indices());
+
+ mean_node.keep_dims(true);
+ ASSERT_TRUE(mean_node.keep_dims());
+}
+
+TEST(CircleMeanTest, arity_NEG)
+{
+ luci::CircleMean mean_node;
+
+ ASSERT_NO_THROW(mean_node.arg(1));
+ ASSERT_THROW(mean_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMeanTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMean mean_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(mean_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMeanTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMean mean_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(mean_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMinimum.test.cpp b/compiler/luci/lang/src/Nodes/CircleMinimum.test.cpp
new file mode 100644
index 000000000..19fe69fb7
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleMinimum.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleMinimum.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleMinimumTest, constructor_P)
+{
+ luci::CircleMinimum min_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), min_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MINIMUM, min_node.opcode());
+
+ ASSERT_EQ(nullptr, min_node.x());
+ ASSERT_EQ(nullptr, min_node.y());
+}
+
+TEST(CircleMinimumTest, input_NEG)
+{
+ luci::CircleMinimum min_node;
+ luci::CircleMinimum node;
+
+ min_node.x(&node);
+ min_node.y(&node);
+ ASSERT_NE(nullptr, min_node.x());
+ ASSERT_NE(nullptr, min_node.y());
+
+ min_node.x(nullptr);
+ min_node.y(nullptr);
+ ASSERT_EQ(nullptr, min_node.x());
+ ASSERT_EQ(nullptr, min_node.y());
+}
+
+TEST(CircleMinimumTest, arity_NEG)
+{
+ luci::CircleMinimum min_node;
+
+ ASSERT_NO_THROW(min_node.arg(1));
+ ASSERT_THROW(min_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMinimumTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMinimum min_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(min_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMinimumTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMinimum min_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(min_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMirrorPad.test.cpp b/compiler/luci/lang/src/Nodes/CircleMirrorPad.test.cpp
new file mode 100644
index 000000000..9ba6bf58a
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleMirrorPad.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleMirrorPad.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleMirrorPadTest, constructor_P)
+{
+ luci::CircleMirrorPad pad_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), pad_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MIRROR_PAD, pad_node.opcode());
+
+ ASSERT_EQ(nullptr, pad_node.input());
+ ASSERT_EQ(nullptr, pad_node.paddings());
+
+ ASSERT_EQ(luci::MirrorPadMode::REFLECT, pad_node.mode());
+}
+
+TEST(CircleMirrorPadTest, input_NEG)
+{
+ luci::CircleMirrorPad pad_node;
+ luci::CircleMirrorPad node;
+
+ pad_node.input(&node);
+ pad_node.paddings(&node);
+ ASSERT_NE(nullptr, pad_node.input());
+ ASSERT_NE(nullptr, pad_node.paddings());
+
+ pad_node.input(nullptr);
+ pad_node.paddings(nullptr);
+ ASSERT_EQ(nullptr, pad_node.input());
+ ASSERT_EQ(nullptr, pad_node.paddings());
+
+ pad_node.mode(luci::MirrorPadMode::SYMMETRIC);
+ ASSERT_NE(luci::MirrorPadMode::REFLECT, pad_node.mode());
+}
+
+TEST(CircleMirrorPadTest, arity_NEG)
+{
+ luci::CircleMirrorPad pad_node;
+
+ ASSERT_NO_THROW(pad_node.arg(1));
+ ASSERT_THROW(pad_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMirrorPadTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMirrorPad pad_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pad_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMirrorPadTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMirrorPad pad_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pad_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleMul.test.cpp b/compiler/luci/lang/src/Nodes/CircleMul.test.cpp
index f9eca42f9..3c26d08ca 100644
--- a/compiler/luci/lang/src/Nodes/CircleMul.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleMul.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleMul.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleMulTest, constructor_P)
{
luci::CircleMul mul_node;
- ASSERT_EQ(mul_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(mul_node.opcode(), luci::CircleOpcode::MUL);
+ ASSERT_EQ(luci::CircleDialect::get(), mul_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::MUL, mul_node.opcode());
- ASSERT_EQ(mul_node.x(), nullptr);
- ASSERT_EQ(mul_node.y(), nullptr);
+ ASSERT_EQ(nullptr, mul_node.x());
+ ASSERT_EQ(nullptr, mul_node.y());
+}
+
+TEST(CircleMulTest, input_NEG)
+{
+ luci::CircleMul mul_node;
+ luci::CircleMul node;
+
+ mul_node.x(&node);
+ mul_node.y(&node);
+ ASSERT_NE(nullptr, mul_node.x());
+ ASSERT_NE(nullptr, mul_node.y());
+
+ mul_node.x(nullptr);
+ mul_node.y(nullptr);
+ ASSERT_EQ(nullptr, mul_node.x());
+ ASSERT_EQ(nullptr, mul_node.y());
+}
+
+TEST(CircleMulTest, arity_NEG)
+{
+ luci::CircleMul mul_node;
+
+ ASSERT_NO_THROW(mul_node.arg(1));
+ ASSERT_THROW(mul_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleMulTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleMul mul_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(mul_node.accept(&tv), std::exception);
+}
+
+TEST(CircleMulTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleMul mul_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(mul_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleNeg.test.cpp b/compiler/luci/lang/src/Nodes/CircleNeg.test.cpp
new file mode 100644
index 000000000..4bcfa48a6
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleNeg.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleNeg.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleNegTest, constructor)
+{
+ luci::CircleNeg neg_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), neg_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::NEG, neg_node.opcode());
+
+ ASSERT_EQ(nullptr, neg_node.x());
+}
+
+TEST(CircleNegTest, input_NEG)
+{
+ luci::CircleNeg neg_node;
+ luci::CircleNeg node;
+
+ neg_node.x(&node);
+ ASSERT_NE(nullptr, neg_node.x());
+
+ neg_node.x(nullptr);
+ ASSERT_EQ(nullptr, neg_node.x());
+}
+
+TEST(CircleNegTest, arity_NEG)
+{
+ luci::CircleNeg neg_node;
+
+ ASSERT_NO_THROW(neg_node.arg(0));
+ ASSERT_THROW(neg_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleNegTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleNeg neg_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(neg_node.accept(&tv), std::exception);
+}
+
+TEST(CircleNegTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleNeg neg_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(neg_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleNotEqual.test.cpp b/compiler/luci/lang/src/Nodes/CircleNotEqual.test.cpp
new file mode 100644
index 000000000..e464a7b96
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleNotEqual.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleNotEqual.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleNotEqualTest, constructor_P)
+{
+ luci::CircleNotEqual not_equal_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), not_equal_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::NOT_EQUAL, not_equal_node.opcode());
+
+ ASSERT_EQ(nullptr, not_equal_node.x());
+ ASSERT_EQ(nullptr, not_equal_node.y());
+}
+
+TEST(CircleNotEqualTest, input_NEG)
+{
+ luci::CircleNotEqual not_equal_node;
+ luci::CircleNotEqual node;
+
+ not_equal_node.x(&node);
+ not_equal_node.y(&node);
+ ASSERT_NE(nullptr, not_equal_node.x());
+ ASSERT_NE(nullptr, not_equal_node.y());
+
+ not_equal_node.x(nullptr);
+ not_equal_node.y(nullptr);
+ ASSERT_EQ(nullptr, not_equal_node.x());
+ ASSERT_EQ(nullptr, not_equal_node.y());
+}
+
+TEST(CircleNotEqualTest, arity_NEG)
+{
+ luci::CircleNotEqual not_equal_node;
+
+ ASSERT_NO_THROW(not_equal_node.arg(1));
+ ASSERT_THROW(not_equal_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleNotEqualTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleNotEqual not_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(not_equal_node.accept(&tv), std::exception);
+}
+
+TEST(CircleNotEqualTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleNotEqual not_equal_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(not_equal_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleOneHot.test.cpp b/compiler/luci/lang/src/Nodes/CircleOneHot.test.cpp
new file mode 100644
index 000000000..18e1045cc
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleOneHot.test.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleOneHot.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleOneHotTest, constructor)
+{
+ luci::CircleOneHot one_hot_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), one_hot_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ONE_HOT, one_hot_node.opcode());
+
+ ASSERT_EQ(nullptr, one_hot_node.indices());
+ ASSERT_EQ(nullptr, one_hot_node.depth());
+ ASSERT_EQ(nullptr, one_hot_node.on_value());
+ ASSERT_EQ(nullptr, one_hot_node.off_value());
+ ASSERT_EQ(-1, one_hot_node.axis());
+}
+
+TEST(CircleOneHotTest, input_NEG)
+{
+ luci::CircleOneHot one_hot_node;
+ luci::CircleOneHot node;
+
+ one_hot_node.indices(&node);
+ one_hot_node.depth(&node);
+ one_hot_node.on_value(&node);
+ one_hot_node.off_value(&node);
+ ASSERT_NE(nullptr, one_hot_node.indices());
+ ASSERT_NE(nullptr, one_hot_node.depth());
+ ASSERT_NE(nullptr, one_hot_node.on_value());
+ ASSERT_NE(nullptr, one_hot_node.off_value());
+
+ one_hot_node.indices(nullptr);
+ one_hot_node.depth(nullptr);
+ one_hot_node.on_value(nullptr);
+ one_hot_node.off_value(nullptr);
+ ASSERT_EQ(nullptr, one_hot_node.indices());
+ ASSERT_EQ(nullptr, one_hot_node.depth());
+ ASSERT_EQ(nullptr, one_hot_node.on_value());
+ ASSERT_EQ(nullptr, one_hot_node.off_value());
+
+ one_hot_node.axis(1);
+ ASSERT_NE(-1, one_hot_node.axis());
+}
+
+TEST(CircleOneHotTest, arity_NEG)
+{
+ luci::CircleOneHot one_hot_node;
+
+ ASSERT_NO_THROW(one_hot_node.arg(3));
+ ASSERT_THROW(one_hot_node.arg(4), std::out_of_range);
+}
+
+TEST(CircleOneHotTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleOneHot one_hot_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(one_hot_node.accept(&tv), std::exception);
+}
+
+TEST(CircleOneHotTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleOneHot one_hot_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(one_hot_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CirclePRelu.test.cpp b/compiler/luci/lang/src/Nodes/CirclePRelu.test.cpp
new file mode 100644
index 000000000..8355c6d97
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CirclePRelu.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CirclePRelu.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CirclePReluTest, constructor_P)
+{
+ luci::CirclePRelu prelu_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), prelu_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::PRELU, prelu_node.opcode());
+
+ ASSERT_EQ(nullptr, prelu_node.input());
+ ASSERT_EQ(nullptr, prelu_node.alpha());
+}
+
+TEST(CirclePReluTest, input_NEG)
+{
+ luci::CirclePRelu prelu_node;
+ luci::CirclePRelu node;
+
+ prelu_node.input(&node);
+ prelu_node.alpha(&node);
+ ASSERT_NE(nullptr, prelu_node.input());
+ ASSERT_NE(nullptr, prelu_node.alpha());
+
+ prelu_node.input(nullptr);
+ prelu_node.alpha(nullptr);
+ ASSERT_EQ(nullptr, prelu_node.input());
+ ASSERT_EQ(nullptr, prelu_node.alpha());
+}
+
+TEST(CirclePReluTest, arity_NEG)
+{
+ luci::CirclePRelu prelu_node;
+
+ ASSERT_NO_THROW(prelu_node.arg(1));
+ ASSERT_THROW(prelu_node.arg(2), std::out_of_range);
+}
+
+TEST(CirclePReluTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CirclePRelu prelu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(prelu_node.accept(&tv), std::exception);
+}
+
+TEST(CirclePReluTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CirclePRelu prelu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(prelu_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CirclePack.test.cpp b/compiler/luci/lang/src/Nodes/CirclePack.test.cpp
index 5c9a96f7c..5e64f0d89 100644
--- a/compiler/luci/lang/src/Nodes/CirclePack.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CirclePack.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CirclePack.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,12 +25,60 @@ TEST(CirclePackTest, constructor)
{
luci::CirclePack pack_node(3);
- ASSERT_EQ(pack_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(pack_node.opcode(), luci::CircleOpcode::PACK);
+ ASSERT_EQ(luci::CircleDialect::get(), pack_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::PACK, pack_node.opcode());
- ASSERT_EQ(pack_node.axis(), 0);
- ASSERT_EQ(pack_node.values_count(), 3);
- ASSERT_EQ(pack_node.values(0), nullptr);
- ASSERT_EQ(pack_node.values(1), nullptr);
- ASSERT_EQ(pack_node.values(2), nullptr);
+ ASSERT_EQ(0, pack_node.axis());
+ ASSERT_EQ(3, pack_node.values_count());
+ ASSERT_EQ(nullptr, pack_node.values(0));
+ ASSERT_EQ(nullptr, pack_node.values(1));
+ ASSERT_EQ(nullptr, pack_node.values(2));
+}
+
+TEST(CirclePackTest, input_NEG)
+{
+ luci::CirclePack pack_node(2);
+ luci::CirclePack node(2);
+
+ pack_node.values(0, &node);
+ pack_node.values(1, &node);
+ ASSERT_NE(nullptr, pack_node.values(0));
+ ASSERT_NE(nullptr, pack_node.values(1));
+
+ pack_node.values(0, nullptr);
+ pack_node.values(1, nullptr);
+ ASSERT_EQ(nullptr, pack_node.values(0));
+ ASSERT_EQ(nullptr, pack_node.values(1));
+}
+
+TEST(CirclePackTest, arity_NEG)
+{
+ luci::CirclePack pack_node(5);
+
+ ASSERT_NO_THROW(pack_node.arg(4));
+ ASSERT_THROW(pack_node.arg(5), std::out_of_range);
+}
+
+TEST(CirclePackTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CirclePack pack_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(pack_node.accept(&tv), std::exception);
+}
+
+TEST(CirclePackTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CirclePack pack_node(2);
+
+ TestVisitor tv;
+ ASSERT_THROW(pack_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CirclePad.test.cpp b/compiler/luci/lang/src/Nodes/CirclePad.test.cpp
index 3a23fa0f0..12c66b7ea 100644
--- a/compiler/luci/lang/src/Nodes/CirclePad.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CirclePad.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CirclePad.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CirclePadTest, constructor_P)
{
luci::CirclePad pad_node;
- ASSERT_EQ(pad_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(pad_node.opcode(), luci::CircleOpcode::PAD);
+ ASSERT_EQ(luci::CircleDialect::get(), pad_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::PAD, pad_node.opcode());
- ASSERT_EQ(pad_node.input(), nullptr);
- ASSERT_EQ(pad_node.paddings(), nullptr);
+ ASSERT_EQ(nullptr, pad_node.input());
+ ASSERT_EQ(nullptr, pad_node.paddings());
+}
+
+TEST(CirclePadTest, input_NEG)
+{
+ luci::CirclePad pad_node;
+ luci::CirclePad node;
+
+ pad_node.input(&node);
+ pad_node.paddings(&node);
+ ASSERT_NE(nullptr, pad_node.input());
+ ASSERT_NE(nullptr, pad_node.paddings());
+
+ pad_node.input(nullptr);
+ pad_node.paddings(nullptr);
+ ASSERT_EQ(nullptr, pad_node.input());
+ ASSERT_EQ(nullptr, pad_node.paddings());
+}
+
+TEST(CirclePadTest, arity_NEG)
+{
+ luci::CirclePad pad_node;
+
+ ASSERT_NO_THROW(pad_node.arg(1));
+ ASSERT_THROW(pad_node.arg(2), std::out_of_range);
+}
+
+TEST(CirclePadTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CirclePad pad_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pad_node.accept(&tv), std::exception);
+}
+
+TEST(CirclePadTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CirclePad pad_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pad_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CirclePow.test.cpp b/compiler/luci/lang/src/Nodes/CirclePow.test.cpp
new file mode 100644
index 000000000..67ba0882b
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CirclePow.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CirclePow.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CirclePowTest, constructor_P)
+{
+ luci::CirclePow pow_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), pow_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::POW, pow_node.opcode());
+
+ ASSERT_EQ(nullptr, pow_node.x());
+ ASSERT_EQ(nullptr, pow_node.y());
+}
+
+TEST(CirclePowTest, input_NEG)
+{
+ luci::CirclePow pow_node;
+ luci::CirclePow node;
+
+ pow_node.x(&node);
+ pow_node.y(&node);
+ ASSERT_NE(nullptr, pow_node.x());
+ ASSERT_NE(nullptr, pow_node.y());
+
+ pow_node.x(nullptr);
+ pow_node.y(nullptr);
+ ASSERT_EQ(nullptr, pow_node.x());
+ ASSERT_EQ(nullptr, pow_node.y());
+}
+
+TEST(CirclePowTest, arity_NEG)
+{
+ luci::CirclePow pow_node;
+
+ ASSERT_NO_THROW(pow_node.arg(1));
+ ASSERT_THROW(pow_node.arg(2), std::out_of_range);
+}
+
+TEST(CirclePowTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CirclePow pow_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pow_node.accept(&tv), std::exception);
+}
+
+TEST(CirclePowTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CirclePow pow_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(pow_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleRange.test.cpp b/compiler/luci/lang/src/Nodes/CircleRange.test.cpp
new file mode 100644
index 000000000..dd54dfd6e
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleRange.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleRange.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleRangeTest, constructor)
+{
+ luci::CircleRange range_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), range_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RANGE, range_node.opcode());
+
+ ASSERT_EQ(nullptr, range_node.start());
+ ASSERT_EQ(nullptr, range_node.limit());
+ ASSERT_EQ(nullptr, range_node.delta());
+}
+
+TEST(CircleRangeTest, input_NEG)
+{
+ luci::CircleRange range_node;
+ luci::CircleRange node;
+
+ range_node.start(&node);
+ range_node.limit(&node);
+ range_node.delta(&node);
+ ASSERT_NE(nullptr, range_node.start());
+ ASSERT_NE(nullptr, range_node.limit());
+ ASSERT_NE(nullptr, range_node.delta());
+
+ range_node.start(nullptr);
+ range_node.limit(nullptr);
+ range_node.delta(nullptr);
+ ASSERT_EQ(nullptr, range_node.start());
+ ASSERT_EQ(nullptr, range_node.limit());
+ ASSERT_EQ(nullptr, range_node.delta());
+}
+
+TEST(CircleRangeTest, arity_NEG)
+{
+ luci::CircleRange range_node;
+
+ ASSERT_NO_THROW(range_node.arg(2));
+ ASSERT_THROW(range_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleRangeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleRange range_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(range_node.accept(&tv), std::exception);
+}
+
+TEST(CircleRangeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleRange range_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(range_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleRank.test.cpp b/compiler/luci/lang/src/Nodes/CircleRank.test.cpp
new file mode 100644
index 000000000..e64eae235
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleRank.test.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleRank.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleRankTest, constructor_P)
+{
+ luci::CircleRank rank_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), rank_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RANK, rank_node.opcode());
+
+ ASSERT_EQ(nullptr, rank_node.input());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReduceAny.test.cpp b/compiler/luci/lang/src/Nodes/CircleReduceAny.test.cpp
new file mode 100644
index 000000000..cd5c6b746
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReduceAny.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReduceAny.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReduceAnyTest, constructor)
+{
+ luci::CircleReduceAny reduce_any_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), reduce_any_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REDUCE_ANY, reduce_any_node.opcode());
+
+ ASSERT_EQ(nullptr, reduce_any_node.input());
+ ASSERT_EQ(nullptr, reduce_any_node.reduction_indices());
+
+ ASSERT_FALSE(reduce_any_node.keep_dims());
+}
+
+TEST(CircleReduceAnyTest, input_NEG)
+{
+ luci::CircleReduceAny reduce_any_node;
+ luci::CircleReduceAny node;
+
+ reduce_any_node.input(&node);
+ reduce_any_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, reduce_any_node.input());
+ ASSERT_NE(nullptr, reduce_any_node.reduction_indices());
+
+ reduce_any_node.input(nullptr);
+ reduce_any_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, reduce_any_node.input());
+ ASSERT_EQ(nullptr, reduce_any_node.reduction_indices());
+
+ reduce_any_node.keep_dims(true);
+ ASSERT_TRUE(reduce_any_node.keep_dims());
+}
+
+TEST(CircleReduceAnyTest, arity_NEG)
+{
+ luci::CircleReduceAny reduce_any_node;
+
+ ASSERT_NO_THROW(reduce_any_node.arg(1));
+ ASSERT_THROW(reduce_any_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleReduceAnyTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReduceAny reduce_any_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_any_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReduceAnyTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReduceAny reduce_any_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_any_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReduceMax.test.cpp b/compiler/luci/lang/src/Nodes/CircleReduceMax.test.cpp
new file mode 100644
index 000000000..bdd1818e0
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReduceMax.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReduceMax.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReduceMaxTest, constructor_P)
+{
+ luci::CircleReduceMax reduce_max_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), reduce_max_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REDUCE_MAX, reduce_max_node.opcode());
+
+ ASSERT_EQ(nullptr, reduce_max_node.input());
+ ASSERT_EQ(nullptr, reduce_max_node.reduction_indices());
+
+ ASSERT_FALSE(reduce_max_node.keep_dims());
+}
+
+TEST(CircleReduceMaxTest, input_NEG)
+{
+ luci::CircleReduceMax reduce_max_node;
+ luci::CircleReduceMax node;
+
+ reduce_max_node.input(&node);
+ reduce_max_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, reduce_max_node.input());
+ ASSERT_NE(nullptr, reduce_max_node.reduction_indices());
+
+ reduce_max_node.input(nullptr);
+ reduce_max_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, reduce_max_node.input());
+ ASSERT_EQ(nullptr, reduce_max_node.reduction_indices());
+
+ reduce_max_node.keep_dims(true);
+ ASSERT_TRUE(reduce_max_node.keep_dims());
+}
+
+TEST(CircleReduceMaxTest, arity_NEG)
+{
+ luci::CircleReduceMax reduce_max_node;
+
+ ASSERT_NO_THROW(reduce_max_node.arg(1));
+ ASSERT_THROW(reduce_max_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleReduceMaxTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReduceMax reduce_max_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_max_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReduceMaxTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReduceMax reduce_max_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_max_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReduceMin.test.cpp b/compiler/luci/lang/src/Nodes/CircleReduceMin.test.cpp
new file mode 100644
index 000000000..ba99ae648
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReduceMin.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReduceMin.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReduceMinTest, constructor_P)
+{
+ luci::CircleReduceMin reduce_min_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), reduce_min_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REDUCE_MIN, reduce_min_node.opcode());
+
+ ASSERT_EQ(nullptr, reduce_min_node.input());
+ ASSERT_EQ(nullptr, reduce_min_node.reduction_indices());
+
+ ASSERT_FALSE(reduce_min_node.keep_dims());
+}
+
+TEST(CircleReduceMinTest, input_NEG)
+{
+ luci::CircleReduceMin reduce_min_node;
+ luci::CircleReduceMin node;
+
+ reduce_min_node.input(&node);
+ reduce_min_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, reduce_min_node.input());
+ ASSERT_NE(nullptr, reduce_min_node.reduction_indices());
+
+ reduce_min_node.input(nullptr);
+ reduce_min_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, reduce_min_node.input());
+ ASSERT_EQ(nullptr, reduce_min_node.reduction_indices());
+
+ reduce_min_node.keep_dims(true);
+ ASSERT_TRUE(reduce_min_node.keep_dims());
+}
+
+TEST(CircleReduceMinTest, arity_NEG)
+{
+ luci::CircleReduceMin reduce_min_node;
+
+ ASSERT_NO_THROW(reduce_min_node.arg(1));
+ ASSERT_THROW(reduce_min_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleReduceMinTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReduceMin reduce_min_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_min_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReduceMinTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReduceMin reduce_min_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_min_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReduceProd.test.cpp b/compiler/luci/lang/src/Nodes/CircleReduceProd.test.cpp
new file mode 100644
index 000000000..f60b2905b
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReduceProd.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReduceProd.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReduceProdTest, constructor)
+{
+ luci::CircleReduceProd reduce_prod_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), reduce_prod_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REDUCE_PROD, reduce_prod_node.opcode());
+
+ ASSERT_EQ(nullptr, reduce_prod_node.input());
+ ASSERT_EQ(nullptr, reduce_prod_node.reduction_indices());
+
+ ASSERT_FALSE(reduce_prod_node.keep_dims());
+}
+
+TEST(CircleReduceProdTest, input_NEG)
+{
+ luci::CircleReduceProd reduce_prod_node;
+ luci::CircleReduceProd node;
+
+ reduce_prod_node.input(&node);
+ reduce_prod_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, reduce_prod_node.input());
+ ASSERT_NE(nullptr, reduce_prod_node.reduction_indices());
+
+ reduce_prod_node.input(nullptr);
+ reduce_prod_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, reduce_prod_node.input());
+ ASSERT_EQ(nullptr, reduce_prod_node.reduction_indices());
+
+ reduce_prod_node.keep_dims(true);
+ ASSERT_TRUE(reduce_prod_node.keep_dims());
+}
+
+TEST(CircleReduceProdTest, arity_NEG)
+{
+ luci::CircleReduceProd reduce_prod_node;
+
+ ASSERT_NO_THROW(reduce_prod_node.arg(1));
+ ASSERT_THROW(reduce_prod_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleReduceProdTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReduceProd reduce_prod_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_prod_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReduceProdTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReduceProd reduce_prod_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reduce_prod_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleRelu.test.cpp b/compiler/luci/lang/src/Nodes/CircleRelu.test.cpp
index 19ea88aa6..35796509c 100644
--- a/compiler/luci/lang/src/Nodes/CircleRelu.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleRelu.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleRelu.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleReluTest, constructor_P)
{
luci::CircleRelu relu_node;
- ASSERT_EQ(relu_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(relu_node.opcode(), luci::CircleOpcode::RELU);
+ ASSERT_EQ(luci::CircleDialect::get(), relu_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RELU, relu_node.opcode());
- ASSERT_EQ(relu_node.features(), nullptr);
+ ASSERT_EQ(nullptr, relu_node.features());
+}
+
+TEST(CircleReluTest, input_NEG)
+{
+ luci::CircleRelu relu_node;
+ luci::CircleRelu node;
+
+ relu_node.features(&node);
+ ASSERT_NE(nullptr, relu_node.features());
+
+ relu_node.features(nullptr);
+ ASSERT_EQ(nullptr, relu_node.features());
+}
+
+TEST(CircleReluTest, arity_NEG)
+{
+ luci::CircleRelu relu_node;
+
+ ASSERT_NO_THROW(relu_node.arg(0));
+ ASSERT_THROW(relu_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleReluTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleRelu relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReluTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleRelu relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleRelu6.test.cpp b/compiler/luci/lang/src/Nodes/CircleRelu6.test.cpp
index 74bf2e86a..647a5d7ba 100644
--- a/compiler/luci/lang/src/Nodes/CircleRelu6.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleRelu6.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleRelu6.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleRelu6Test, constructor_P)
{
luci::CircleRelu6 relu6_node;
- ASSERT_EQ(relu6_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(relu6_node.opcode(), luci::CircleOpcode::RELU6);
+ ASSERT_EQ(luci::CircleDialect::get(), relu6_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RELU6, relu6_node.opcode());
- ASSERT_EQ(relu6_node.features(), nullptr);
+ ASSERT_EQ(nullptr, relu6_node.features());
+}
+
+TEST(CircleRelu6Test, input_NEG)
+{
+ luci::CircleRelu6 relu6_node;
+ luci::CircleRelu6 node;
+
+ relu6_node.features(&node);
+ ASSERT_NE(nullptr, relu6_node.features());
+
+ relu6_node.features(nullptr);
+ ASSERT_EQ(nullptr, relu6_node.features());
+}
+
+TEST(CircleRelu6Test, arity_NEG)
+{
+ luci::CircleRelu6 relu6_node;
+
+ ASSERT_NO_THROW(relu6_node.arg(0));
+ ASSERT_THROW(relu6_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleRelu6Test, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleRelu6 relu6_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu6_node.accept(&tv), std::exception);
+}
+
+TEST(CircleRelu6Test, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleRelu6 relu6_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu6_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleReluN1To1.test.cpp b/compiler/luci/lang/src/Nodes/CircleReluN1To1.test.cpp
new file mode 100644
index 000000000..8de84ac42
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReluN1To1.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReluN1To1.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReluN1To1Test, constructor)
+{
+ luci::CircleReluN1To1 relu_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), relu_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RELU_N1_TO_1, relu_node.opcode());
+
+ ASSERT_EQ(nullptr, relu_node.features());
+}
+
+TEST(CircleReluN1To1Test, input_NEG)
+{
+ luci::CircleReluN1To1 relu_node;
+ luci::CircleReluN1To1 node;
+
+ relu_node.features(&node);
+ ASSERT_NE(nullptr, relu_node.features());
+
+ relu_node.features(nullptr);
+ ASSERT_EQ(nullptr, relu_node.features());
+}
+
+TEST(CircleReluN1To1Test, arity_NEG)
+{
+ luci::CircleReluN1To1 relu_node;
+
+ ASSERT_NO_THROW(relu_node.arg(0));
+ ASSERT_THROW(relu_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleReluN1To1Test, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReluN1To1 relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReluN1To1Test, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReluN1To1 relu_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(relu_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReshape.test.cpp b/compiler/luci/lang/src/Nodes/CircleReshape.test.cpp
index 7bc2d32a4..236fde28b 100644
--- a/compiler/luci/lang/src/Nodes/CircleReshape.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleReshape.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleReshape.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,12 +25,12 @@ TEST(CircleReshapeTest, constructor_P)
{
luci::CircleReshape reshape;
- ASSERT_EQ(reshape.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(reshape.opcode(), luci::CircleOpcode::RESHAPE);
+ ASSERT_EQ(luci::CircleDialect::get(), reshape.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RESHAPE, reshape.opcode());
- ASSERT_EQ(reshape.tensor(), nullptr);
- ASSERT_EQ(reshape.shape(), nullptr);
- ASSERT_EQ(reshape.newShape()->rank(), 0);
+ ASSERT_EQ(nullptr, reshape.tensor());
+ ASSERT_EQ(nullptr, reshape.shape());
+ ASSERT_EQ(0, reshape.newShape()->rank());
}
TEST(CircleReshapeTest, alloc_new_shape_P)
@@ -37,12 +38,60 @@ TEST(CircleReshapeTest, alloc_new_shape_P)
luci::CircleReshape reshape;
reshape.newShape()->rank(2);
- ASSERT_EQ(reshape.newShape()->rank(), 2);
+ ASSERT_EQ(2, reshape.newShape()->rank());
reshape.newShape()->dim(0) = 0;
reshape.newShape()->dim(1) = 1;
auto &const_reshape = const_cast<const luci::CircleReshape &>(reshape);
- ASSERT_EQ(const_reshape.newShape()->dim(0), 0);
- ASSERT_EQ(const_reshape.newShape()->dim(1), 1);
+ ASSERT_EQ(0, const_reshape.newShape()->dim(0));
+ ASSERT_EQ(1, const_reshape.newShape()->dim(1));
+}
+
+TEST(CircleReshapeTest, input_NEG)
+{
+ luci::CircleReshape reshape_node;
+ luci::CircleReshape node;
+
+ reshape_node.tensor(&node);
+ reshape_node.shape(&node);
+ ASSERT_NE(nullptr, reshape_node.tensor());
+ ASSERT_NE(nullptr, reshape_node.shape());
+
+ reshape_node.tensor(nullptr);
+ reshape_node.shape(nullptr);
+ ASSERT_EQ(nullptr, reshape_node.tensor());
+ ASSERT_EQ(nullptr, reshape_node.shape());
+}
+
+TEST(CircleReshapeTest, arity_NEG)
+{
+ luci::CircleReshape reshape_node;
+
+ ASSERT_NO_THROW(reshape_node.arg(1));
+ ASSERT_THROW(reshape_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleReshapeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleReshape reshape_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reshape_node.accept(&tv), std::exception);
+}
+
+TEST(CircleReshapeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleReshape reshape_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(reshape_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleResizeBilinear.test.cpp b/compiler/luci/lang/src/Nodes/CircleResizeBilinear.test.cpp
new file mode 100644
index 000000000..a1481a640
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleResizeBilinear.test.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleResizeBilinear.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleResizeBilinearTest, constructor)
+{
+ luci::CircleResizeBilinear resize;
+
+ ASSERT_EQ(luci::CircleDialect::get(), resize.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RESIZE_BILINEAR, resize.opcode());
+
+ ASSERT_EQ(nullptr, resize.input());
+ ASSERT_EQ(nullptr, resize.size());
+ ASSERT_FALSE(resize.align_corners());
+ ASSERT_FALSE(resize.half_pixel_centers());
+}
+
+TEST(CircleResizeBilinearTest, input_NEG)
+{
+ luci::CircleResizeBilinear resize_node;
+ luci::CircleResizeBilinear node;
+
+ resize_node.input(&node);
+ resize_node.size(&node);
+ ASSERT_NE(nullptr, resize_node.input());
+ ASSERT_NE(nullptr, resize_node.size());
+
+ resize_node.input(nullptr);
+ resize_node.size(nullptr);
+ ASSERT_EQ(nullptr, resize_node.input());
+ ASSERT_EQ(nullptr, resize_node.size());
+
+ resize_node.align_corners(true);
+ ASSERT_TRUE(resize_node.align_corners());
+ resize_node.half_pixel_centers(true);
+ ASSERT_TRUE(resize_node.half_pixel_centers());
+}
+
+TEST(CircleResizeBilinearTest, arity_NEG)
+{
+ luci::CircleResizeBilinear resize_node;
+
+ ASSERT_NO_THROW(resize_node.arg(1));
+ ASSERT_THROW(resize_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleResizeBilinearTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleResizeBilinear resize_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(resize_node.accept(&tv), std::exception);
+}
+
+TEST(CircleResizeBilinearTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleResizeBilinear resize_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(resize_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleResizeNearestNeighbor.test.cpp b/compiler/luci/lang/src/Nodes/CircleResizeNearestNeighbor.test.cpp
new file mode 100644
index 000000000..00e0ae9ea
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleResizeNearestNeighbor.test.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleResizeNearestNeighbor.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleResizeNearestNeightborTest, constructor)
+{
+ luci::CircleResizeNearestNeighbor resize;
+
+ ASSERT_EQ(luci::CircleDialect::get(), resize.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RESIZE_NEAREST_NEIGHBOR, resize.opcode());
+
+ ASSERT_EQ(nullptr, resize.input());
+ ASSERT_EQ(nullptr, resize.size());
+ ASSERT_FALSE(resize.align_corners());
+}
+
+TEST(CircleResizeNearestNeightborTest, input_NEG)
+{
+ luci::CircleResizeNearestNeighbor resize_node;
+ luci::CircleResizeNearestNeighbor node;
+
+ resize_node.input(&node);
+ resize_node.size(&node);
+ ASSERT_NE(nullptr, resize_node.input());
+ ASSERT_NE(nullptr, resize_node.size());
+
+ resize_node.input(nullptr);
+ resize_node.size(nullptr);
+ ASSERT_EQ(nullptr, resize_node.input());
+ ASSERT_EQ(nullptr, resize_node.size());
+
+ resize_node.align_corners(true);
+ ASSERT_TRUE(resize_node.align_corners());
+}
+
+TEST(CircleResizeNearestNeightborTest, arity_NEG)
+{
+ luci::CircleResizeNearestNeighbor resize_node;
+
+ ASSERT_NO_THROW(resize_node.arg(1));
+ ASSERT_THROW(resize_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleResizeNearestNeightborTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleResizeNearestNeighbor resize_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(resize_node.accept(&tv), std::exception);
+}
+
+TEST(CircleResizeNearestNeightborTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleResizeNearestNeighbor resize_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(resize_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReverseSequence.test.cpp b/compiler/luci/lang/src/Nodes/CircleReverseSequence.test.cpp
new file mode 100644
index 000000000..b1cc6d6d6
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReverseSequence.test.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReverseSequence.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReverseSequenceTest, constructor_P)
+{
+ luci::CircleReverseSequence std_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), std_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REVERSE_SEQUENCE, std_node.opcode());
+
+ ASSERT_EQ(nullptr, std_node.input());
+ ASSERT_EQ(nullptr, std_node.seq_lengths());
+
+ ASSERT_EQ(0, std_node.seq_axis());
+ ASSERT_EQ(0, std_node.batch_axis());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleReverseV2.test.cpp b/compiler/luci/lang/src/Nodes/CircleReverseV2.test.cpp
new file mode 100644
index 000000000..cc568e81a
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleReverseV2.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleReverseV2.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleReverseV2, constructor_P)
+{
+ luci::CircleReverseV2 std_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), std_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::REVERSE_V2, std_node.opcode());
+
+ ASSERT_EQ(nullptr, std_node.tensor());
+ ASSERT_EQ(nullptr, std_node.axis());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleRound.test.cpp b/compiler/luci/lang/src/Nodes/CircleRound.test.cpp
new file mode 100644
index 000000000..2f4518daf
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleRound.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleRound.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleRoundTest, constructor_P)
+{
+ luci::CircleRound round_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), round_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ROUND, round_node.opcode());
+
+ ASSERT_EQ(nullptr, round_node.x());
+}
+
+TEST(CircleRoundTest, input_NEG)
+{
+ luci::CircleRound round_node;
+ luci::CircleRound node;
+
+ round_node.x(&node);
+ ASSERT_NE(nullptr, round_node.x());
+
+ round_node.x(nullptr);
+ ASSERT_EQ(nullptr, round_node.x());
+}
+
+TEST(CircleRoundTest, arity_NEG)
+{
+ luci::CircleRound round_node;
+
+ ASSERT_NO_THROW(round_node.arg(0));
+ ASSERT_THROW(round_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleRoundTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleRound round_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(round_node.accept(&tv), std::exception);
+}
+
+TEST(CircleRoundTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleRound round_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(round_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleRsqrt.test.cpp b/compiler/luci/lang/src/Nodes/CircleRsqrt.test.cpp
index 51f6bab36..d038979c1 100644
--- a/compiler/luci/lang/src/Nodes/CircleRsqrt.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleRsqrt.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleRsqrt.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleRsqrtTest, constructor)
{
luci::CircleRsqrt rsqrt_node;
- ASSERT_EQ(rsqrt_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(rsqrt_node.opcode(), luci::CircleOpcode::RSQRT);
+ ASSERT_EQ(luci::CircleDialect::get(), rsqrt_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::RSQRT, rsqrt_node.opcode());
- ASSERT_EQ(rsqrt_node.x(), nullptr);
+ ASSERT_EQ(nullptr, rsqrt_node.x());
+}
+
+TEST(CircleRsqrtTest, input_NEG)
+{
+ luci::CircleRsqrt rsqrt_node;
+ luci::CircleRsqrt node;
+
+ rsqrt_node.x(&node);
+ ASSERT_NE(nullptr, rsqrt_node.x());
+
+ rsqrt_node.x(nullptr);
+ ASSERT_EQ(nullptr, rsqrt_node.x());
+}
+
+TEST(CircleRsqrtTest, arity_NEG)
+{
+ luci::CircleRsqrt rsqrt_node;
+
+ ASSERT_NO_THROW(rsqrt_node.arg(0));
+ ASSERT_THROW(rsqrt_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleRsqrtTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleRsqrt rsqrt_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(rsqrt_node.accept(&tv), std::exception);
+}
+
+TEST(CircleRsqrtTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleRsqrt rsqrt_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(rsqrt_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleScatterNd.test.cpp b/compiler/luci/lang/src/Nodes/CircleScatterNd.test.cpp
new file mode 100644
index 000000000..165f26b44
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleScatterNd.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleScatterNd.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleScatterNdTest, constructor_P)
+{
+ luci::CircleScatterNd scatter_nd_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), scatter_nd_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SCATTER_ND, scatter_nd_node.opcode());
+
+ ASSERT_EQ(nullptr, scatter_nd_node.indices());
+ ASSERT_EQ(nullptr, scatter_nd_node.updates());
+ ASSERT_EQ(nullptr, scatter_nd_node.shape());
+}
+
+TEST(CircleScatterNdTest, input_NEG)
+{
+ luci::CircleScatterNd scatter_nd_node;
+ luci::CircleScatterNd node;
+
+ scatter_nd_node.indices(&node);
+ scatter_nd_node.updates(&node);
+ scatter_nd_node.shape(&node);
+ ASSERT_NE(nullptr, scatter_nd_node.indices());
+ ASSERT_NE(nullptr, scatter_nd_node.updates());
+ ASSERT_NE(nullptr, scatter_nd_node.shape());
+
+ scatter_nd_node.indices(nullptr);
+ scatter_nd_node.updates(nullptr);
+ scatter_nd_node.shape(nullptr);
+ ASSERT_EQ(nullptr, scatter_nd_node.indices());
+ ASSERT_EQ(nullptr, scatter_nd_node.updates());
+ ASSERT_EQ(nullptr, scatter_nd_node.shape());
+}
+
+TEST(CircleScatterNdTest, arity_NEG)
+{
+ luci::CircleScatterNd scatter_nd_node;
+
+ ASSERT_NO_THROW(scatter_nd_node.arg(2));
+ ASSERT_THROW(scatter_nd_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleScatterNdTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleScatterNd scatter_nd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(scatter_nd_node.accept(&tv), std::exception);
+}
+
+TEST(CircleScatterNdTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleScatterNd scatter_nd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(scatter_nd_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSegmentSum.test.cpp b/compiler/luci/lang/src/Nodes/CircleSegmentSum.test.cpp
new file mode 100644
index 000000000..90469b7e2
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSegmentSum.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSegmentSum.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSegmentSumTest, constructor)
+{
+ luci::CircleSegmentSum segment_sum_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), segment_sum_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SEGMENT_SUM, segment_sum_node.opcode());
+
+ ASSERT_EQ(nullptr, segment_sum_node.input());
+ ASSERT_EQ(nullptr, segment_sum_node.segment_ids());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSelect.test.cpp b/compiler/luci/lang/src/Nodes/CircleSelect.test.cpp
new file mode 100644
index 000000000..7eeb538af
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSelect.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSelect.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSelectTest, constructor)
+{
+ luci::CircleSelect select_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), select_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SELECT, select_node.opcode());
+
+ ASSERT_EQ(nullptr, select_node.condition());
+ ASSERT_EQ(nullptr, select_node.t());
+ ASSERT_EQ(nullptr, select_node.e());
+}
+
+TEST(CircleSelectTest, input_NEG)
+{
+ luci::CircleSelect select_node;
+ luci::CircleSelect node;
+
+ select_node.condition(&node);
+ select_node.t(&node);
+ select_node.e(&node);
+ ASSERT_NE(nullptr, select_node.condition());
+ ASSERT_NE(nullptr, select_node.t());
+ ASSERT_NE(nullptr, select_node.e());
+
+ select_node.condition(nullptr);
+ select_node.t(nullptr);
+ select_node.e(nullptr);
+ ASSERT_EQ(nullptr, select_node.condition());
+ ASSERT_EQ(nullptr, select_node.t());
+ ASSERT_EQ(nullptr, select_node.e());
+}
+
+TEST(CircleSelectTest, arity_NEG)
+{
+ luci::CircleSelect select_node;
+
+ ASSERT_NO_THROW(select_node.arg(2));
+ ASSERT_THROW(select_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleSelectTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSelect select_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(select_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSelectTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSelect select_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(select_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSelectV2.test.cpp b/compiler/luci/lang/src/Nodes/CircleSelectV2.test.cpp
new file mode 100644
index 000000000..eea5fb83f
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSelectV2.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSelectV2.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSelectV2Test, constructor)
+{
+ luci::CircleSelectV2 select_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), select_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SELECT_V2, select_node.opcode());
+
+ ASSERT_EQ(nullptr, select_node.condition());
+ ASSERT_EQ(nullptr, select_node.t());
+ ASSERT_EQ(nullptr, select_node.e());
+}
+
+TEST(CircleSelectV2Test, input_NEG)
+{
+ luci::CircleSelectV2 select_v2_node;
+ luci::CircleSelectV2 node;
+
+ select_v2_node.condition(&node);
+ select_v2_node.t(&node);
+ select_v2_node.e(&node);
+ ASSERT_NE(nullptr, select_v2_node.condition());
+ ASSERT_NE(nullptr, select_v2_node.t());
+ ASSERT_NE(nullptr, select_v2_node.e());
+
+ select_v2_node.condition(nullptr);
+ select_v2_node.t(nullptr);
+ select_v2_node.e(nullptr);
+ ASSERT_EQ(nullptr, select_v2_node.condition());
+ ASSERT_EQ(nullptr, select_v2_node.t());
+ ASSERT_EQ(nullptr, select_v2_node.e());
+}
+
+TEST(CircleSelectV2Test, arity_NEG)
+{
+ luci::CircleSelectV2 select_v2_node;
+
+ ASSERT_NO_THROW(select_v2_node.arg(2));
+ ASSERT_THROW(select_v2_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleSelectV2Test, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSelectV2 select_v2_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(select_v2_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSelectV2Test, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSelectV2 select_v2_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(select_v2_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleShape.test.cpp b/compiler/luci/lang/src/Nodes/CircleShape.test.cpp
new file mode 100644
index 000000000..18271d2b2
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleShape.test.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleShape.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleShapeTest, constructor)
+{
+ luci::CircleShape shape_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), shape_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SHAPE, shape_node.opcode());
+
+ ASSERT_EQ(nullptr, shape_node.input());
+ ASSERT_EQ(loco::DataType::S32, shape_node.out_type());
+}
+
+TEST(CircleShapeTest, input_NEG)
+{
+ luci::CircleShape shape_node;
+ luci::CircleShape node;
+
+ shape_node.input(&node);
+ ASSERT_NE(nullptr, shape_node.input());
+
+ shape_node.input(nullptr);
+ ASSERT_EQ(nullptr, shape_node.input());
+
+ shape_node.out_type(loco::DataType::U8);
+ ASSERT_NE(loco::DataType::S32, shape_node.out_type());
+}
+
+TEST(CircleShapeTest, arity_NEG)
+{
+ luci::CircleShape shape_node;
+
+ ASSERT_NO_THROW(shape_node.arg(0));
+ ASSERT_THROW(shape_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleShapeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleShape shape_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(shape_node.accept(&tv), std::exception);
+}
+
+TEST(CircleShapeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleShape shape_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(shape_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSin.test.cpp b/compiler/luci/lang/src/Nodes/CircleSin.test.cpp
new file mode 100644
index 000000000..e01932d4f
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSin.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSin.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSinTest, constructor)
+{
+ luci::CircleSin sin_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), sin_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SIN, sin_node.opcode());
+
+ ASSERT_EQ(nullptr, sin_node.x());
+}
+
+TEST(CircleSinTest, input_NEG)
+{
+ luci::CircleSin sin_node;
+ luci::CircleSin node;
+
+ sin_node.x(&node);
+ ASSERT_NE(nullptr, sin_node.x());
+
+ sin_node.x(nullptr);
+ ASSERT_EQ(nullptr, sin_node.x());
+}
+
+TEST(CircleSinTest, arity_NEG)
+{
+ luci::CircleSin sin_node;
+
+ ASSERT_NO_THROW(sin_node.arg(0));
+ ASSERT_THROW(sin_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSinTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSin sin_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sin_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSinTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSin sin_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sin_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSlice.test.cpp b/compiler/luci/lang/src/Nodes/CircleSlice.test.cpp
new file mode 100644
index 000000000..5563a34b9
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSlice.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSlice.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSliceTest, constructor)
+{
+ luci::CircleSlice s_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), s_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SLICE, s_node.opcode());
+
+ ASSERT_EQ(nullptr, s_node.input());
+ ASSERT_EQ(nullptr, s_node.begin());
+ ASSERT_EQ(nullptr, s_node.size());
+}
+
+TEST(CircleSliceTest, input_NEG)
+{
+ luci::CircleSlice s_node;
+ luci::CircleSlice node;
+
+ s_node.input(&node);
+ s_node.begin(&node);
+ s_node.size(&node);
+ ASSERT_NE(nullptr, s_node.input());
+ ASSERT_NE(nullptr, s_node.begin());
+ ASSERT_NE(nullptr, s_node.size());
+
+ s_node.input(nullptr);
+ s_node.begin(nullptr);
+ s_node.size(nullptr);
+ ASSERT_EQ(nullptr, s_node.input());
+ ASSERT_EQ(nullptr, s_node.begin());
+ ASSERT_EQ(nullptr, s_node.size());
+}
+
+TEST(CircleSliceTest, arity_NEG)
+{
+ luci::CircleSlice s_node;
+
+ ASSERT_NO_THROW(s_node.arg(2));
+ ASSERT_THROW(s_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleSliceTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSlice s_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(s_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSliceTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSlice s_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(s_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSoftmax.test.cpp b/compiler/luci/lang/src/Nodes/CircleSoftmax.test.cpp
index 7e994490c..b15c009f2 100644
--- a/compiler/luci/lang/src/Nodes/CircleSoftmax.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleSoftmax.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleSoftmax.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleSoftmaxTest, constructor_P)
{
luci::CircleSoftmax softmax_node;
- ASSERT_EQ(softmax_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(softmax_node.opcode(), luci::CircleOpcode::SOFTMAX);
+ ASSERT_EQ(luci::CircleDialect::get(), softmax_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SOFTMAX, softmax_node.opcode());
- ASSERT_EQ(softmax_node.logits(), nullptr);
+ ASSERT_EQ(nullptr, softmax_node.logits());
+}
+
+TEST(CircleSoftmaxTest, input_NEG)
+{
+ luci::CircleSoftmax softmax_node;
+ luci::CircleSoftmax node;
+
+ softmax_node.logits(&node);
+ ASSERT_NE(nullptr, softmax_node.logits());
+
+ softmax_node.logits(nullptr);
+ ASSERT_EQ(nullptr, softmax_node.logits());
+}
+
+TEST(CircleSoftmaxTest, arity_NEG)
+{
+ luci::CircleSoftmax softmax_node;
+
+ ASSERT_NO_THROW(softmax_node.arg(0));
+ ASSERT_THROW(softmax_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSoftmaxTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSoftmax softmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(softmax_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSoftmaxTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSoftmax softmax_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(softmax_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleSpaceToBatchND.test.cpp b/compiler/luci/lang/src/Nodes/CircleSpaceToBatchND.test.cpp
new file mode 100644
index 000000000..8b4ac8f2b
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSpaceToBatchND.test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSpaceToBatchND.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSpaceToBatchNDTest, constructor)
+{
+ luci::CircleSpaceToBatchND stb_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), stb_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SPACE_TO_BATCH_ND, stb_node.opcode());
+
+ ASSERT_EQ(nullptr, stb_node.input());
+ ASSERT_EQ(nullptr, stb_node.block_shape());
+ ASSERT_EQ(nullptr, stb_node.paddings());
+}
+
+TEST(CircleSpaceToBatchNDTest, input_NEG)
+{
+ luci::CircleSpaceToBatchND stb_node;
+ luci::CircleSpaceToBatchND node;
+
+ stb_node.input(&node);
+ stb_node.block_shape(&node);
+ stb_node.paddings(&node);
+ ASSERT_NE(nullptr, stb_node.input());
+ ASSERT_NE(nullptr, stb_node.block_shape());
+ ASSERT_NE(nullptr, stb_node.paddings());
+
+ stb_node.input(nullptr);
+ stb_node.block_shape(nullptr);
+ stb_node.paddings(nullptr);
+ ASSERT_EQ(nullptr, stb_node.input());
+ ASSERT_EQ(nullptr, stb_node.block_shape());
+ ASSERT_EQ(nullptr, stb_node.paddings());
+}
+
+TEST(CircleSpaceToBatchNDTest, arity_NEG)
+{
+ luci::CircleSpaceToBatchND stb_node;
+
+ ASSERT_NO_THROW(stb_node.arg(2));
+ ASSERT_THROW(stb_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleSpaceToBatchNDTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSpaceToBatchND stb_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(stb_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSpaceToBatchNDTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSpaceToBatchND stb_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(stb_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSpaceToDepth.test.cpp b/compiler/luci/lang/src/Nodes/CircleSpaceToDepth.test.cpp
new file mode 100644
index 000000000..d49a2ce85
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSpaceToDepth.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSpaceToDepth.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSpaceToDepthTest, constructor)
+{
+ luci::CircleSpaceToDepth std_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), std_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SPACE_TO_DEPTH, std_node.opcode());
+
+ ASSERT_EQ(nullptr, std_node.input());
+}
+
+TEST(CircleSpaceToDepthTest, input_NEG)
+{
+ luci::CircleSpaceToDepth std_node;
+ luci::CircleSpaceToDepth node;
+
+ std_node.input(&node);
+ ASSERT_NE(nullptr, std_node.input());
+
+ std_node.input(nullptr);
+ ASSERT_EQ(nullptr, std_node.input());
+}
+
+TEST(CircleSpaceToDepthTest, arity_NEG)
+{
+ luci::CircleSpaceToDepth std_node;
+
+ ASSERT_NO_THROW(std_node.arg(0));
+ ASSERT_THROW(std_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSpaceToDepthTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSpaceToDepth std_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(std_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSpaceToDepthTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSpaceToDepth std_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(std_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSparseToDense.test.cpp b/compiler/luci/lang/src/Nodes/CircleSparseToDense.test.cpp
new file mode 100644
index 000000000..de3cf6e9a
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSparseToDense.test.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSparseToDense.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSparseToDenseTest, constructor)
+{
+ luci::CircleSparseToDense stb_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), stb_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SPARSE_TO_DENSE, stb_node.opcode());
+
+ ASSERT_EQ(nullptr, stb_node.indices());
+ ASSERT_EQ(nullptr, stb_node.output_shape());
+ ASSERT_EQ(nullptr, stb_node.values());
+ ASSERT_EQ(nullptr, stb_node.default_value());
+
+ ASSERT_EQ(true, stb_node.validate_indices());
+}
+
+TEST(CircleSparseToDenseTest, input_NEG)
+{
+ luci::CircleSparseToDense stb_node;
+ luci::CircleSparseToDense node;
+
+ stb_node.indices(&node);
+ stb_node.output_shape(&node);
+ stb_node.values(&node);
+ stb_node.default_value(&node);
+ ASSERT_NE(nullptr, stb_node.indices());
+ ASSERT_NE(nullptr, stb_node.output_shape());
+ ASSERT_NE(nullptr, stb_node.values());
+ ASSERT_NE(nullptr, stb_node.default_value());
+
+ stb_node.indices(nullptr);
+ stb_node.output_shape(nullptr);
+ stb_node.values(nullptr);
+ stb_node.default_value(nullptr);
+ ASSERT_EQ(nullptr, stb_node.indices());
+ ASSERT_EQ(nullptr, stb_node.output_shape());
+ ASSERT_EQ(nullptr, stb_node.values());
+ ASSERT_EQ(nullptr, stb_node.default_value());
+}
+
+TEST(CircleSparseToDenseTest, arity_NEG)
+{
+ luci::CircleSparseToDense stb_node;
+
+ ASSERT_NO_THROW(stb_node.arg(3));
+ ASSERT_THROW(stb_node.arg(4), std::out_of_range);
+}
+
+TEST(CircleSparseToDenseTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSparseToDense stb_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(stb_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSparseToDenseTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSparseToDense stb_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(stb_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSplit.test.cpp b/compiler/luci/lang/src/Nodes/CircleSplit.test.cpp
new file mode 100644
index 000000000..acf8c4410
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSplit.test.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSplit.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSplitTest, constructor)
+{
+ luci::CircleSplit split_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), split_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SPLIT, split_node.opcode());
+
+ ASSERT_EQ(nullptr, split_node.input());
+ ASSERT_EQ(nullptr, split_node.split_dim());
+ ASSERT_EQ(0, split_node.num_split());
+}
+
+TEST(CircleSplitTest, input_NEG)
+{
+ luci::CircleSplit split_node;
+ luci::CircleSplit node;
+
+ split_node.input(&node);
+ split_node.split_dim(&node);
+ ASSERT_NE(nullptr, split_node.input());
+ ASSERT_NE(nullptr, split_node.split_dim());
+
+ split_node.input(nullptr);
+ split_node.split_dim(nullptr);
+ ASSERT_EQ(nullptr, split_node.input());
+ ASSERT_EQ(nullptr, split_node.split_dim());
+
+ split_node.num_split(100);
+ ASSERT_NE(0, split_node.num_split());
+}
+
+TEST(CircleSplitTest, arity_NEG)
+{
+ luci::CircleSplit split_node;
+
+ ASSERT_NO_THROW(split_node.arg(1));
+ ASSERT_THROW(split_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleSplitTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSplit split_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(split_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSplitTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSplit split_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(split_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSplitOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleSplitOut.test.cpp
new file mode 100644
index 000000000..e93715825
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSplitOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSplitOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSplitOutTest, constructor)
+{
+ luci::CircleSplitOut vout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), vout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLESPLITOUT, vout_node.opcode());
+
+ ASSERT_EQ(nullptr, vout_node.input());
+ ASSERT_EQ(-1, vout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSplitV.test.cpp b/compiler/luci/lang/src/Nodes/CircleSplitV.test.cpp
new file mode 100644
index 000000000..1f01608a3
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSplitV.test.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSplitV.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSplitVTest, constructor)
+{
+ luci::CircleSplitV splitv_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), splitv_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SPLIT_V, splitv_node.opcode());
+
+ ASSERT_EQ(nullptr, splitv_node.input());
+ ASSERT_EQ(nullptr, splitv_node.size_splits());
+ ASSERT_EQ(nullptr, splitv_node.split_dim());
+ ASSERT_EQ(0, splitv_node.num_split());
+}
+
+TEST(CircleSplitVTest, input_NEG)
+{
+ luci::CircleSplitV splitv_node;
+ luci::CircleSplitV node;
+
+ splitv_node.input(&node);
+ splitv_node.size_splits(&node);
+ splitv_node.split_dim(&node);
+ ASSERT_NE(nullptr, splitv_node.input());
+ ASSERT_NE(nullptr, splitv_node.size_splits());
+ ASSERT_NE(nullptr, splitv_node.split_dim());
+
+ splitv_node.input(nullptr);
+ splitv_node.size_splits(nullptr);
+ splitv_node.split_dim(nullptr);
+ ASSERT_EQ(nullptr, splitv_node.input());
+ ASSERT_EQ(nullptr, splitv_node.size_splits());
+ ASSERT_EQ(nullptr, splitv_node.split_dim());
+
+ splitv_node.num_split(100);
+ ASSERT_NE(0, splitv_node.num_split());
+}
+
+TEST(CircleSplitVTest, arity_NEG)
+{
+ luci::CircleSplitV splitv_node;
+
+ ASSERT_NO_THROW(splitv_node.arg(2));
+ ASSERT_THROW(splitv_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleSplitVTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSplitV splitv_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(splitv_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSplitVTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSplitV splitv_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(splitv_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSplitVOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleSplitVOut.test.cpp
new file mode 100644
index 000000000..2a4fe3267
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSplitVOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSplitVOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSplitVOutTest, constructor)
+{
+ luci::CircleSplitVOut vout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), vout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLESPLITVOUT, vout_node.opcode());
+
+ ASSERT_EQ(nullptr, vout_node.input());
+ ASSERT_EQ(-1, vout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSqrt.test.cpp b/compiler/luci/lang/src/Nodes/CircleSqrt.test.cpp
index 6cfb3bc94..f4222fd67 100644
--- a/compiler/luci/lang/src/Nodes/CircleSqrt.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleSqrt.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleSqrt.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,8 +25,52 @@ TEST(CircleSqrtTest, constructor_P)
{
luci::CircleSqrt sqrt_node;
- ASSERT_EQ(sqrt_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(sqrt_node.opcode(), luci::CircleOpcode::SQRT);
+ ASSERT_EQ(luci::CircleDialect::get(), sqrt_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SQRT, sqrt_node.opcode());
- ASSERT_EQ(sqrt_node.x(), nullptr);
+ ASSERT_EQ(nullptr, sqrt_node.x());
+}
+
+TEST(CircleSqrtTest, input_NEG)
+{
+ luci::CircleSqrt sqrt_node;
+ luci::CircleSqrt node;
+
+ sqrt_node.x(&node);
+ ASSERT_NE(nullptr, sqrt_node.x());
+
+ sqrt_node.x(nullptr);
+ ASSERT_EQ(nullptr, sqrt_node.x());
+}
+
+TEST(CircleSqrtTest, arity_NEG)
+{
+ luci::CircleSqrt sqrt_node;
+
+ ASSERT_NO_THROW(sqrt_node.arg(0));
+ ASSERT_THROW(sqrt_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSqrtTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSqrt sqrt_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sqrt_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSqrtTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSqrt sqrt_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sqrt_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleSquare.test.cpp b/compiler/luci/lang/src/Nodes/CircleSquare.test.cpp
new file mode 100644
index 000000000..3b0a86eed
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSquare.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSquare.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSquareTest, constructor_P)
+{
+ luci::CircleSquare square_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), square_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SQUARE, square_node.opcode());
+
+ ASSERT_EQ(nullptr, square_node.x());
+}
+
+TEST(CircleSquareTest, input_NEG)
+{
+ luci::CircleSquare square_node;
+ luci::CircleSquare node;
+
+ square_node.x(&node);
+ ASSERT_NE(nullptr, square_node.x());
+
+ square_node.x(nullptr);
+ ASSERT_EQ(nullptr, square_node.x());
+}
+
+TEST(CircleSquareTest, arity_NEG)
+{
+ luci::CircleSquare square_node;
+
+ ASSERT_NO_THROW(square_node.arg(0));
+ ASSERT_THROW(square_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSquareTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSquare square_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(square_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSquareTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSquare square_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(square_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSquaredDifference.test.cpp b/compiler/luci/lang/src/Nodes/CircleSquaredDifference.test.cpp
index 71df189b9..ea632218b 100644
--- a/compiler/luci/lang/src/Nodes/CircleSquaredDifference.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleSquaredDifference.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleSquaredDifference.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleSquaredDifferenceTest, constructor_P)
{
luci::CircleSquaredDifference sd_node;
- ASSERT_EQ(sd_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(sd_node.opcode(), luci::CircleOpcode::SQUARED_DIFFERENCE);
+ ASSERT_EQ(luci::CircleDialect::get(), sd_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SQUARED_DIFFERENCE, sd_node.opcode());
- ASSERT_EQ(sd_node.x(), nullptr);
- ASSERT_EQ(sd_node.y(), nullptr);
+ ASSERT_EQ(nullptr, sd_node.x());
+ ASSERT_EQ(nullptr, sd_node.y());
+}
+
+TEST(CircleSquaredDifferenceTest, input_NEG)
+{
+ luci::CircleSquaredDifference sd_node;
+ luci::CircleSquaredDifference node;
+
+ sd_node.x(&node);
+ sd_node.y(&node);
+ ASSERT_NE(nullptr, sd_node.x());
+ ASSERT_NE(nullptr, sd_node.y());
+
+ sd_node.x(nullptr);
+ sd_node.y(nullptr);
+ ASSERT_EQ(nullptr, sd_node.x());
+ ASSERT_EQ(nullptr, sd_node.y());
+}
+
+TEST(CircleSquaredDifferenceTest, arity_NEG)
+{
+ luci::CircleSquaredDifference sd_node;
+
+ ASSERT_NO_THROW(sd_node.arg(1));
+ ASSERT_THROW(sd_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleSquaredDifferenceTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSquaredDifference sd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sd_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSquaredDifferenceTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSquaredDifference sd_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sd_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleSqueeze.test.cpp b/compiler/luci/lang/src/Nodes/CircleSqueeze.test.cpp
new file mode 100644
index 000000000..6dc3d03cd
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSqueeze.test.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSqueeze.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSqueezeTest, constructor_P)
+{
+ luci::CircleSqueeze squeeze;
+
+ ASSERT_EQ(luci::CircleDialect::get(), squeeze.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SQUEEZE, squeeze.opcode());
+
+ ASSERT_EQ(nullptr, squeeze.input());
+ ASSERT_EQ(0, squeeze.squeeze_dims().size());
+}
+
+TEST(CircleSqueezeTest, squeeze_dims)
+{
+ luci::CircleSqueeze squeeze;
+
+ squeeze.squeeze_dims({1, 2});
+
+ ASSERT_EQ(1, squeeze.squeeze_dims().at(0));
+ ASSERT_EQ(2, squeeze.squeeze_dims().at(1));
+}
+
+TEST(CircleSqueezeTest, input_NEG)
+{
+ luci::CircleSqueeze squeeze_node;
+ luci::CircleSqueeze node;
+
+ squeeze_node.input(&node);
+ ASSERT_NE(nullptr, squeeze_node.input());
+
+ squeeze_node.input(nullptr);
+ ASSERT_EQ(nullptr, squeeze_node.input());
+}
+
+TEST(CircleSqueezeTest, arity_NEG)
+{
+ luci::CircleSqueeze squeeze_node;
+
+ ASSERT_NO_THROW(squeeze_node.arg(0));
+ ASSERT_THROW(squeeze_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleSqueezeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSqueeze squeeze_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(squeeze_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSqueezeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSqueeze squeeze_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(squeeze_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleStridedSlice.test.cpp b/compiler/luci/lang/src/Nodes/CircleStridedSlice.test.cpp
new file mode 100644
index 000000000..1982e7b38
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleStridedSlice.test.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleStridedSlice.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleStridedSliceTest, constructor)
+{
+ luci::CircleStridedSlice ss_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), ss_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::STRIDED_SLICE, ss_node.opcode());
+
+ ASSERT_EQ(nullptr, ss_node.input());
+ ASSERT_EQ(nullptr, ss_node.begin());
+ ASSERT_EQ(nullptr, ss_node.end());
+ ASSERT_EQ(nullptr, ss_node.strides());
+
+ ASSERT_EQ(0, ss_node.begin_mask());
+ ASSERT_EQ(0, ss_node.end_mask());
+ ASSERT_EQ(0, ss_node.ellipsis_mask());
+ ASSERT_EQ(0, ss_node.new_axis_mask());
+ ASSERT_EQ(0, ss_node.shrink_axis_mask());
+}
+
+TEST(CircleStridedSliceTest, input_NEG)
+{
+ luci::CircleStridedSlice ss_node;
+ luci::CircleStridedSlice node;
+
+ ss_node.input(&node);
+ ss_node.begin(&node);
+ ss_node.end(&node);
+ ss_node.strides(&node);
+ ASSERT_NE(nullptr, ss_node.input());
+ ASSERT_NE(nullptr, ss_node.begin());
+ ASSERT_NE(nullptr, ss_node.end());
+ ASSERT_NE(nullptr, ss_node.strides());
+
+ ss_node.input(nullptr);
+ ss_node.begin(nullptr);
+ ss_node.end(nullptr);
+ ss_node.strides(nullptr);
+ ASSERT_EQ(nullptr, ss_node.input());
+ ASSERT_EQ(nullptr, ss_node.begin());
+ ASSERT_EQ(nullptr, ss_node.end());
+ ASSERT_EQ(nullptr, ss_node.strides());
+
+ ss_node.begin_mask(1);
+ ss_node.end_mask(1);
+ ss_node.ellipsis_mask(1);
+ ss_node.new_axis_mask(1);
+ ss_node.shrink_axis_mask(1);
+ ASSERT_NE(0, ss_node.begin_mask());
+ ASSERT_NE(0, ss_node.end_mask());
+ ASSERT_NE(0, ss_node.ellipsis_mask());
+ ASSERT_NE(0, ss_node.new_axis_mask());
+ ASSERT_NE(0, ss_node.shrink_axis_mask());
+}
+
+TEST(CircleStridedSliceTest, arity_NEG)
+{
+ luci::CircleStridedSlice ss_node;
+
+ ASSERT_NO_THROW(ss_node.arg(3));
+ ASSERT_THROW(ss_node.arg(4), std::out_of_range);
+}
+
+TEST(CircleStridedSliceTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleStridedSlice ss_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(ss_node.accept(&tv), std::exception);
+}
+
+TEST(CircleStridedSliceTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleStridedSlice ss_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(ss_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleSub.test.cpp b/compiler/luci/lang/src/Nodes/CircleSub.test.cpp
index ebb29446a..92c674bd0 100644
--- a/compiler/luci/lang/src/Nodes/CircleSub.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleSub.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleSub.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleSubTest, constructor_P)
{
luci::CircleSub sub_node;
- ASSERT_EQ(sub_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(sub_node.opcode(), luci::CircleOpcode::SUB);
+ ASSERT_EQ(luci::CircleDialect::get(), sub_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SUB, sub_node.opcode());
- ASSERT_EQ(sub_node.x(), nullptr);
- ASSERT_EQ(sub_node.y(), nullptr);
+ ASSERT_EQ(nullptr, sub_node.x());
+ ASSERT_EQ(nullptr, sub_node.y());
+}
+
+TEST(CircleSubTest, input_NEG)
+{
+ luci::CircleSub sub_node;
+ luci::CircleSub node;
+
+ sub_node.x(&node);
+ sub_node.y(&node);
+ ASSERT_NE(nullptr, sub_node.x());
+ ASSERT_NE(nullptr, sub_node.y());
+
+ sub_node.x(nullptr);
+ sub_node.y(nullptr);
+ ASSERT_EQ(nullptr, sub_node.x());
+ ASSERT_EQ(nullptr, sub_node.y());
+}
+
+TEST(CircleSubTest, arity_NEG)
+{
+ luci::CircleSub sub_node;
+
+ ASSERT_NO_THROW(sub_node.arg(1));
+ ASSERT_THROW(sub_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleSubTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSub sub_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sub_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSubTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSub sub_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sub_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleSum.test.cpp b/compiler/luci/lang/src/Nodes/CircleSum.test.cpp
new file mode 100644
index 000000000..84b51d671
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleSum.test.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleSum.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleSumTest, constructor_P)
+{
+ luci::CircleSum sum_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), sum_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::SUM, sum_node.opcode());
+
+ ASSERT_EQ(nullptr, sum_node.input());
+ ASSERT_EQ(nullptr, sum_node.reduction_indices());
+ ASSERT_EQ(false, sum_node.keep_dims());
+}
+
+TEST(CircleSumTest, input_NEG)
+{
+ luci::CircleSum sum_node;
+ luci::CircleSum node;
+
+ sum_node.input(&node);
+ sum_node.reduction_indices(&node);
+ ASSERT_NE(nullptr, sum_node.input());
+ ASSERT_NE(nullptr, sum_node.reduction_indices());
+
+ sum_node.input(nullptr);
+ sum_node.reduction_indices(nullptr);
+ ASSERT_EQ(nullptr, sum_node.input());
+ ASSERT_EQ(nullptr, sum_node.reduction_indices());
+
+ sum_node.keep_dims(true);
+ ASSERT_TRUE(sum_node.keep_dims());
+}
+
+TEST(CircleSumTest, arity_NEG)
+{
+ luci::CircleSum sum_node;
+
+ ASSERT_NO_THROW(sum_node.arg(1));
+ ASSERT_THROW(sum_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleSumTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleSum sum_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sum_node.accept(&tv), std::exception);
+}
+
+TEST(CircleSumTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleSum sum_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(sum_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleTanh.test.cpp b/compiler/luci/lang/src/Nodes/CircleTanh.test.cpp
new file mode 100644
index 000000000..257ecb24d
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleTanh.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleTanh.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleTanhTest, constructor)
+{
+ luci::CircleTanh tanh_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), tanh_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::TANH, tanh_node.opcode());
+
+ ASSERT_EQ(nullptr, tanh_node.x());
+}
+
+TEST(CircleTanhTest, input_NEG)
+{
+ luci::CircleTanh neg_node;
+ luci::CircleTanh node;
+
+ neg_node.x(&node);
+ ASSERT_NE(nullptr, neg_node.x());
+
+ neg_node.x(nullptr);
+ ASSERT_EQ(nullptr, neg_node.x());
+}
+
+TEST(CircleTanhTest, arity_NEG)
+{
+ luci::CircleTanh neg_node;
+
+ ASSERT_NO_THROW(neg_node.arg(0));
+ ASSERT_THROW(neg_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleTanhTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleTanh neg_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(neg_node.accept(&tv), std::exception);
+}
+
+TEST(CircleTanhTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleTanh neg_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(neg_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleTile.test.cpp b/compiler/luci/lang/src/Nodes/CircleTile.test.cpp
new file mode 100644
index 000000000..1695165cc
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleTile.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleTile.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleTileTest, constructor)
+{
+ luci::CircleTile tile_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), tile_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::TILE, tile_node.opcode());
+
+ ASSERT_EQ(nullptr, tile_node.input());
+ ASSERT_EQ(nullptr, tile_node.multiples());
+}
+
+TEST(CircleTileTest, input_NEG)
+{
+ luci::CircleTile tile_node;
+ luci::CircleTile node;
+
+ tile_node.input(&node);
+ tile_node.multiples(&node);
+ ASSERT_NE(nullptr, tile_node.input());
+ ASSERT_NE(nullptr, tile_node.multiples());
+
+ tile_node.input(nullptr);
+ tile_node.multiples(nullptr);
+ ASSERT_EQ(nullptr, tile_node.input());
+ ASSERT_EQ(nullptr, tile_node.multiples());
+}
+
+TEST(CircleTileTest, arity_NEG)
+{
+ luci::CircleTile tile_node;
+
+ ASSERT_NO_THROW(tile_node.arg(1));
+ ASSERT_THROW(tile_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleTileTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleTile tile_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(tile_node.accept(&tv), std::exception);
+}
+
+TEST(CircleTileTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleTile tile_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(tile_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleTopKV2.test.cpp b/compiler/luci/lang/src/Nodes/CircleTopKV2.test.cpp
new file mode 100644
index 000000000..31478d3af
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleTopKV2.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleTopKV2.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleTopKV2Test, constructor)
+{
+ luci::CircleTopKV2 topkv2_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), topkv2_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::TOPK_V2, topkv2_node.opcode());
+
+ ASSERT_EQ(nullptr, topkv2_node.input());
+ ASSERT_EQ(nullptr, topkv2_node.k());
+}
+
+TEST(CircleTopKV2Test, input_NEG)
+{
+ luci::CircleTopKV2 topkv2_node;
+ luci::CircleTopKV2 node;
+
+ topkv2_node.input(&node);
+ topkv2_node.k(&node);
+ ASSERT_NE(nullptr, topkv2_node.input());
+ ASSERT_NE(nullptr, topkv2_node.k());
+
+ topkv2_node.input(nullptr);
+ topkv2_node.k(nullptr);
+ ASSERT_EQ(nullptr, topkv2_node.input());
+ ASSERT_EQ(nullptr, topkv2_node.k());
+}
+
+TEST(CircleTopKV2Test, arity_NEG)
+{
+ luci::CircleTopKV2 topkv2_node;
+
+ ASSERT_NO_THROW(topkv2_node.arg(1));
+ ASSERT_THROW(topkv2_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleTopKV2Test, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleTopKV2 topkv2_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(topkv2_node.accept(&tv), std::exception);
+}
+
+TEST(CircleTopKV2Test, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleTopKV2 topkv2_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(topkv2_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleTopKV2Out.test.cpp b/compiler/luci/lang/src/Nodes/CircleTopKV2Out.test.cpp
new file mode 100644
index 000000000..d0835a27d
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleTopKV2Out.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleTopKV2Out.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleTopKV2OutTest, constructor)
+{
+ luci::CircleTopKV2Out topout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), topout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLETOPKV2OUT, topout_node.opcode());
+
+ ASSERT_EQ(nullptr, topout_node.input());
+ ASSERT_EQ(-1, topout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleTranspose.test.cpp b/compiler/luci/lang/src/Nodes/CircleTranspose.test.cpp
index 7233869e6..f4db3f37b 100644
--- a/compiler/luci/lang/src/Nodes/CircleTranspose.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleTranspose.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleTranspose.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,9 +25,57 @@ TEST(CircleTransposeTest, constructor_P)
{
luci::CircleTranspose tr_node;
- ASSERT_EQ(tr_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(tr_node.opcode(), luci::CircleOpcode::TRANSPOSE);
+ ASSERT_EQ(luci::CircleDialect::get(), tr_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::TRANSPOSE, tr_node.opcode());
- ASSERT_EQ(tr_node.a(), nullptr);
- ASSERT_EQ(tr_node.perm(), nullptr);
+ ASSERT_EQ(nullptr, tr_node.a());
+ ASSERT_EQ(nullptr, tr_node.perm());
+}
+
+TEST(CircleTransposeTest, input_NEG)
+{
+ luci::CircleTranspose tr_node;
+ luci::CircleTranspose node;
+
+ tr_node.a(&node);
+ tr_node.perm(&node);
+ ASSERT_NE(nullptr, tr_node.a());
+ ASSERT_NE(nullptr, tr_node.perm());
+
+ tr_node.a(nullptr);
+ tr_node.perm(nullptr);
+ ASSERT_EQ(nullptr, tr_node.a());
+ ASSERT_EQ(nullptr, tr_node.perm());
+}
+
+TEST(CircleTransposeTest, arity_NEG)
+{
+ luci::CircleTranspose tr_node;
+
+ ASSERT_NO_THROW(tr_node.arg(1));
+ ASSERT_THROW(tr_node.arg(2), std::out_of_range);
+}
+
+TEST(CircleTransposeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleTranspose tr_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(tr_node.accept(&tv), std::exception);
+}
+
+TEST(CircleTransposeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleTranspose tr_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(tr_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleTransposeConv.test.cpp b/compiler/luci/lang/src/Nodes/CircleTransposeConv.test.cpp
index 9615082d9..429169744 100644
--- a/compiler/luci/lang/src/Nodes/CircleTransposeConv.test.cpp
+++ b/compiler/luci/lang/src/Nodes/CircleTransposeConv.test.cpp
@@ -17,6 +17,7 @@
#include "luci/IR/Nodes/CircleTransposeConv.h"
#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
#include <gtest/gtest.h>
@@ -24,10 +25,74 @@ TEST(CircleTransposeConvTest, constructor_P)
{
luci::CircleTransposeConv trc_node;
- ASSERT_EQ(trc_node.dialect(), luci::CircleDialect::get());
- ASSERT_EQ(trc_node.opcode(), luci::CircleOpcode::TRANSPOSE_CONV);
+ ASSERT_EQ(luci::CircleDialect::get(), trc_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::TRANSPOSE_CONV, trc_node.opcode());
- ASSERT_EQ(trc_node.inputSizes(), nullptr);
- ASSERT_EQ(trc_node.filter(), nullptr);
- ASSERT_EQ(trc_node.outBackprop(), nullptr);
+ ASSERT_EQ(nullptr, trc_node.inputSizes());
+ ASSERT_EQ(nullptr, trc_node.filter());
+ ASSERT_EQ(nullptr, trc_node.outBackprop());
+
+ ASSERT_EQ(luci::Padding::UNDEFINED, trc_node.padding());
+ ASSERT_EQ(1, trc_node.stride()->h());
+ ASSERT_EQ(1, trc_node.stride()->w());
+}
+
+TEST(CircleTransposeConvTest, input_NEG)
+{
+ luci::CircleTransposeConv trc_node;
+ luci::CircleTransposeConv node;
+
+ trc_node.inputSizes(&node);
+ trc_node.filter(&node);
+ trc_node.outBackprop(&node);
+ ASSERT_NE(nullptr, trc_node.inputSizes());
+ ASSERT_NE(nullptr, trc_node.filter());
+ ASSERT_NE(nullptr, trc_node.outBackprop());
+
+ trc_node.inputSizes(nullptr);
+ trc_node.filter(nullptr);
+ trc_node.outBackprop(nullptr);
+ ASSERT_EQ(nullptr, trc_node.inputSizes());
+ ASSERT_EQ(nullptr, trc_node.filter());
+ ASSERT_EQ(nullptr, trc_node.outBackprop());
+
+ trc_node.padding(luci::Padding::SAME);
+ ASSERT_NE(luci::Padding::UNDEFINED, trc_node.padding());
+
+ trc_node.stride()->h(2);
+ trc_node.stride()->w(2);
+ ASSERT_EQ(2, trc_node.stride()->h());
+ ASSERT_EQ(2, trc_node.stride()->w());
+}
+
+TEST(CircleTransposeConvTest, arity_NEG)
+{
+ luci::CircleTransposeConv trc_node;
+
+ ASSERT_NO_THROW(trc_node.arg(2));
+ ASSERT_THROW(trc_node.arg(3), std::out_of_range);
+}
+
+TEST(CircleTransposeConvTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleTransposeConv trc_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(trc_node.accept(&tv), std::exception);
+}
+
+TEST(CircleTransposeConvTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleTransposeConv trc_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(trc_node.accept(&tv), std::exception);
}
diff --git a/compiler/luci/lang/src/Nodes/CircleUnpack.test.cpp b/compiler/luci/lang/src/Nodes/CircleUnpack.test.cpp
new file mode 100644
index 000000000..4323028e4
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleUnpack.test.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleUnpack.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleUnpackTest, constructor)
+{
+ luci::CircleUnpack unpack_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), unpack_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::UNPACK, unpack_node.opcode());
+
+ ASSERT_EQ(nullptr, unpack_node.value());
+ ASSERT_EQ(0, unpack_node.num());
+ ASSERT_EQ(0, unpack_node.axis());
+}
+
+TEST(CircleUnpackTest, input_NEG)
+{
+ luci::CircleUnpack unpack_node;
+ luci::CircleUnpack node;
+
+ unpack_node.value(&node);
+ ASSERT_NE(nullptr, unpack_node.value());
+
+ unpack_node.value(nullptr);
+ ASSERT_EQ(nullptr, unpack_node.value());
+
+ unpack_node.num(1);
+ unpack_node.axis(1);
+ ASSERT_NE(0, unpack_node.num());
+ ASSERT_NE(0, unpack_node.axis());
+}
+
+TEST(CircleUnpackTest, arity_NEG)
+{
+ luci::CircleUnpack unpack_node;
+
+ ASSERT_NO_THROW(unpack_node.arg(0));
+ ASSERT_THROW(unpack_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleUnpackTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleUnpack unpack_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(unpack_node.accept(&tv), std::exception);
+}
+
+TEST(CircleUnpackTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleUnpack unpack_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(unpack_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleUnpackOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleUnpackOut.test.cpp
new file mode 100644
index 000000000..7b8a41bf7
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleUnpackOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleUnpackOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleUnpackOutTest, constructor)
+{
+ luci::CircleUnpackOut unpackout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), unpackout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLEUNPACKOUT, unpackout_node.opcode());
+
+ ASSERT_EQ(nullptr, unpackout_node.input());
+ ASSERT_EQ(0, unpackout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleWhere.test.cpp b/compiler/luci/lang/src/Nodes/CircleWhere.test.cpp
new file mode 100644
index 000000000..287eda460
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleWhere.test.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleWhere.h"
+#include "luci/IR/Nodes/CircleInput.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleWhereTest, constructor_P)
+{
+ luci::CircleWhere where_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), where_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::WHERE, where_node.opcode());
+
+ ASSERT_EQ(1, where_node.arity());
+ ASSERT_EQ(nullptr, where_node.condition());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp b/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp
new file mode 100644
index 000000000..19290c0a2
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleWhile.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleWhileTest, constructor)
+{
+ luci::CircleWhile while_node(2, 2);
+
+ ASSERT_EQ(luci::CircleDialect::get(), while_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::WHILE, while_node.opcode());
+
+ ASSERT_EQ(2, while_node.input_count());
+ ASSERT_EQ(2, while_node.output_count());
+
+ ASSERT_EQ(nullptr, while_node.input(0));
+ ASSERT_EQ(nullptr, while_node.input(1));
+
+ ASSERT_EQ(-1, while_node.cond_branch());
+ ASSERT_EQ(-1, while_node.body_branch());
+}
+
+TEST(CircleWhileTestDeath, invalid_arity_NEG)
+{
+ ASSERT_DEBUG_DEATH(luci::CircleWhile very_long_name_while_node(0, 1), "");
+}
+
+TEST(CircleWhileTestDeath, invalid_output_count_NEG)
+{
+ ASSERT_DEBUG_DEATH(luci::CircleWhile while_node(2, 0), "");
+}
+
+TEST(CircleWhileTestDeath, invalid_input_get_index_NEG)
+{
+ luci::CircleWhile while_node(2, 2);
+
+ EXPECT_ANY_THROW(while_node.input(100));
+}
+
+TEST(CircleWhileTestDeath, invalid_input_set_index_NEG)
+{
+ luci::CircleWhile while_node(2, 2);
+
+ EXPECT_ANY_THROW(while_node.input(100, nullptr));
+}
+
+TEST(CircleWhileTestDeath, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleWhile while_node(2, 2);
+
+ TestVisitor tv;
+ ASSERT_THROW(while_node.accept(&tv), std::exception);
+}
+
+TEST(CircleWhileTestDeath, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleWhile while_node(2, 2);
+
+ TestVisitor tv;
+ ASSERT_THROW(while_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleWhileOut.test.cpp b/compiler/luci/lang/src/Nodes/CircleWhileOut.test.cpp
new file mode 100644
index 000000000..1800e4098
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleWhileOut.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleWhileOut.h"
+
+#include "luci/IR/CircleDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleWhileOutTest, constructor)
+{
+ luci::CircleWhileOut whileout_node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), whileout_node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::CIRCLEWHILEOUT, whileout_node.opcode());
+
+ ASSERT_EQ(nullptr, whileout_node.input());
+ ASSERT_EQ(-1, whileout_node.index());
+}
diff --git a/compiler/luci/lang/src/Nodes/CircleZerosLike.test.cpp b/compiler/luci/lang/src/Nodes/CircleZerosLike.test.cpp
new file mode 100644
index 000000000..3368c8e3f
--- /dev/null
+++ b/compiler/luci/lang/src/Nodes/CircleZerosLike.test.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/IR/Nodes/CircleZerosLike.h"
+
+#include "luci/IR/CircleDialect.h"
+#include "luci/IR/CircleNodeVisitor.h"
+
+#include <gtest/gtest.h>
+
+TEST(CircleZerosLikeTest, constructor_P)
+{
+ luci::CircleZerosLike node;
+
+ ASSERT_EQ(luci::CircleDialect::get(), node.dialect());
+ ASSERT_EQ(luci::CircleOpcode::ZEROS_LIKE, node.opcode());
+
+ ASSERT_EQ(nullptr, node.input());
+}
+
+TEST(CircleZerosLikeTest, input_NEG)
+{
+ luci::CircleZerosLike zeros_node;
+ luci::CircleZerosLike node;
+
+ zeros_node.input(&node);
+ ASSERT_NE(nullptr, zeros_node.input());
+
+ zeros_node.input(nullptr);
+ ASSERT_EQ(nullptr, zeros_node.input());
+}
+
+TEST(CircleZerosLikeTest, arity_NEG)
+{
+ luci::CircleZerosLike zeros_node;
+
+ ASSERT_NO_THROW(zeros_node.arg(0));
+ ASSERT_THROW(zeros_node.arg(1), std::out_of_range);
+}
+
+TEST(CircleZerosLikeTest, visit_mutable_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeMutableVisitor<void>
+ {
+ };
+
+ luci::CircleZerosLike zeros_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(zeros_node.accept(&tv), std::exception);
+}
+
+TEST(CircleZerosLikeTest, visit_NEG)
+{
+ struct TestVisitor final : public luci::CircleNodeVisitor<void>
+ {
+ };
+
+ luci::CircleZerosLike zeros_node;
+
+ TestVisitor tv;
+ ASSERT_THROW(zeros_node.accept(&tv), std::exception);
+}
diff --git a/compiler/luci/log/CMakeLists.txt b/compiler/luci/log/CMakeLists.txt
index af2e7a768..5e822871b 100644
--- a/compiler/luci/log/CMakeLists.txt
+++ b/compiler/luci/log/CMakeLists.txt
@@ -6,4 +6,5 @@ target_include_directories(luci_log PUBLIC include)
target_link_libraries(luci_log PUBLIC hermes)
target_link_libraries(luci_log PRIVATE hermes_std)
target_link_libraries(luci_log PRIVATE nncc_common)
+target_link_libraries(luci_log PRIVATE luci_env)
install(TARGETS luci_log DESTINATION lib)
diff --git a/compiler/luci/log/include/luci/Log.h b/compiler/luci/log/include/luci/Log.h
index 51299a082..e148810d8 100644
--- a/compiler/luci/log/include/luci/Log.h
+++ b/compiler/luci/log/include/luci/Log.h
@@ -35,7 +35,7 @@ public:
/**
* @brief Logger Configuration
*
- * Users are able to turn logging on/off via MOCO_LOG environment variable.
+ * Users are able to turn logging on/off via LUCI_LOG environment variable.
*/
class LoggerConfig final : public hermes::Config
{
@@ -47,7 +47,9 @@ public:
void configure(const Logger *, hermes::Source::Setting &) const;
private:
- bool _enabled;
+ bool _show_warn = true;
+ bool _show_info = false;
+ int _show_verbose = 0;
};
} // namespace luci
@@ -64,8 +66,10 @@ private:
*/
#define LOGGER(name) ::luci::Logger name{::luci::LoggingContext::get()};
-// TODO Support FATAL, ERROR, WARN, and VERBOSE
+// TODO Support FATAL, ERROR
#define INFO(name) HERMES_INFO(name)
+#define WARN(name) HERMES_WARN(name)
+#define VERBOSE(name, lv) HERMES_VERBOSE(name, lv)
// WARNING!
//
diff --git a/compiler/luci/log/src/Log.cpp b/compiler/luci/log/src/Log.cpp
index 7e1634009..c26bf307b 100644
--- a/compiler/luci/log/src/Log.cpp
+++ b/compiler/luci/log/src/Log.cpp
@@ -16,6 +16,8 @@
#include "luci/Log.h"
+#include <luci/UserSettings.h>
+
#include <cassert>
#include <cstdlib>
#include <iostream>
@@ -36,6 +38,11 @@ template <> bool safecast<bool>(const char *s, const bool &value)
return (s == nullptr) ? value : (std::stoi(s) != 0);
}
+template <> int safecast<int>(const char *s, const int &value)
+{
+ return (s == nullptr) ? value : std::stoi(s);
+}
+
} // namespace
//
@@ -57,8 +64,16 @@ namespace luci
LoggerConfig::LoggerConfig()
{
- // Turn on logging if LUCI_LOG is set as non-zero value
- _enabled = safecast<bool>(std::getenv("LUCI_LOG"), false);
+ auto settings = luci::UserSettings::settings();
+
+ _show_warn = !settings->get(luci::UserSettings::Key::MuteWarnings);
+
+ // Turn on info logging if LUCI_LOG is set as non-zero value
+ _show_info = safecast<bool>(std::getenv("LUCI_LOG"), false);
+
+ // Turn on verbose logging if LUCI_LOG is set to some level
+ // VERBOSE(l, 1) will be visible with LUCI_LOG=2 and VERBOSE(l, 2) with LUCI_LOG=3 and so on
+ _show_verbose = safecast<int>(std::getenv("LUCI_LOG"), 0);
}
void LoggerConfig::configure(const hermes::Source *source, hermes::Source::Setting &setting) const
@@ -72,15 +87,24 @@ void LoggerConfig::configure(const hermes::Source *source, hermes::Source::Setti
void LoggerConfig::configure(const Logger *, hermes::Source::Setting &setting) const
{
- if (_enabled)
+ setting.filter(hermes::SeverityCategory::FATAL).reject_all();
+ setting.filter(hermes::SeverityCategory::ERROR).reject_all();
+ setting.filter(hermes::SeverityCategory::WARN).reject_all();
+ setting.filter(hermes::SeverityCategory::INFO).reject_all();
+ setting.filter(hermes::SeverityCategory::VERBOSE).reject_all();
+
+ // TODO enable FATAL and ERROR
+ if (_show_warn)
+ {
+ setting.filter(hermes::SeverityCategory::WARN).accept_all();
+ }
+ if (_show_info)
{
- // Enable all catagories
- setting.accept_all();
+ setting.filter(hermes::SeverityCategory::INFO).accept_all();
}
- else
+ if (_show_verbose)
{
- // Disable all catagories
- setting.reject_all();
+ setting.filter(hermes::SeverityCategory::VERBOSE).accept_upto(_show_verbose);
}
}
diff --git a/compiler/luci/logex/src/FormattedGraph.cpp b/compiler/luci/logex/src/FormattedGraph.cpp
index 894ebc151..4725ee3df 100644
--- a/compiler/luci/logex/src/FormattedGraph.cpp
+++ b/compiler/luci/logex/src/FormattedGraph.cpp
@@ -78,6 +78,8 @@ const char *to_str(loco::DataType type)
}
}
+const char *to_str(bool value) { return value ? "true" : "false"; }
+
const char *to_str(luci::FusedActFunc fused)
{
switch (fused)
@@ -108,6 +110,19 @@ const char *to_str(luci::Padding padding)
}
}
+const char *to_str(luci::MirrorPadMode mode)
+{
+ switch (mode)
+ {
+ case luci::MirrorPadMode::REFLECT:
+ return "REFLECT";
+ case luci::MirrorPadMode::SYMMETRIC:
+ return "SYMMETRIC";
+ default:
+ return "Error";
+ }
+}
+
std::string to_str(const luci::Stride *stride)
{
return pepper::str(stride->h(), ",", stride->w());
@@ -180,43 +195,187 @@ private:
#define IMPLEMENT(CLASS) bool summary(const CLASS *, locop::NodeSummary &) const final;
IMPLEMENT(luci::CircleAbs)
IMPLEMENT(luci::CircleAdd)
+ IMPLEMENT(luci::CircleAddN)
IMPLEMENT(luci::CircleArgMax)
+ IMPLEMENT(luci::CircleArgMin)
IMPLEMENT(luci::CircleAveragePool2D)
+ IMPLEMENT(luci::CircleBatchMatMul)
IMPLEMENT(luci::CircleBatchToSpaceND)
+ IMPLEMENT(luci::CircleCast)
+ IMPLEMENT(luci::CircleCeil)
IMPLEMENT(luci::CircleConcatenation)
IMPLEMENT(luci::CircleConst)
IMPLEMENT(luci::CircleConv2D)
IMPLEMENT(luci::CircleCos)
+ IMPLEMENT(luci::CircleCustom)
+ IMPLEMENT(luci::CircleDepthToSpace)
IMPLEMENT(luci::CircleDepthwiseConv2D)
IMPLEMENT(luci::CircleDiv)
+ IMPLEMENT(luci::CircleElu)
IMPLEMENT(luci::CircleExp)
+ IMPLEMENT(luci::CircleExpandDims)
+ IMPLEMENT(luci::CircleFill)
+ IMPLEMENT(luci::CircleFloor)
+ IMPLEMENT(luci::CircleFloorDiv)
+ IMPLEMENT(luci::CircleFloorMod)
IMPLEMENT(luci::CircleFullyConnected)
+ IMPLEMENT(luci::CircleGather)
+ IMPLEMENT(luci::CircleGatherNd)
+ IMPLEMENT(luci::CircleGreater)
+ IMPLEMENT(luci::CircleGreaterEqual)
+ IMPLEMENT(luci::CircleIf)
+ IMPLEMENT(luci::CircleL2Normalize)
+ IMPLEMENT(luci::CircleLeakyRelu)
+ IMPLEMENT(luci::CircleLess)
+ IMPLEMENT(luci::CircleLessEqual)
+ IMPLEMENT(luci::CircleLocalResponseNormalization)
+ IMPLEMENT(luci::CircleLog)
+ IMPLEMENT(luci::CircleLogicalAnd)
IMPLEMENT(luci::CircleLogicalNot)
IMPLEMENT(luci::CircleLogicalOr)
+ IMPLEMENT(luci::CircleLogistic)
+ IMPLEMENT(luci::CircleLogSoftmax)
+ IMPLEMENT(luci::CircleMatrixDiag)
+ IMPLEMENT(luci::CircleMatrixSetDiag)
IMPLEMENT(luci::CircleMaximum)
IMPLEMENT(luci::CircleMaxPool2D)
IMPLEMENT(luci::CircleMean)
+ IMPLEMENT(luci::CircleMinimum)
+ IMPLEMENT(luci::CircleMirrorPad)
IMPLEMENT(luci::CircleMul)
+ IMPLEMENT(luci::CircleNeg)
+ IMPLEMENT(luci::CircleNotEqual)
+ IMPLEMENT(luci::CircleOneHot)
IMPLEMENT(luci::CirclePack)
IMPLEMENT(luci::CirclePad)
+ IMPLEMENT(luci::CirclePow)
+ IMPLEMENT(luci::CirclePRelu)
+ IMPLEMENT(luci::CircleRange)
+ IMPLEMENT(luci::CircleRank)
+ IMPLEMENT(luci::CircleReduceAny)
+ IMPLEMENT(luci::CircleReduceMax)
+ IMPLEMENT(luci::CircleReduceMin)
+ IMPLEMENT(luci::CircleReduceProd)
IMPLEMENT(luci::CircleRelu)
IMPLEMENT(luci::CircleRelu6)
+ IMPLEMENT(luci::CircleReluN1To1)
IMPLEMENT(luci::CircleReshape)
+ IMPLEMENT(luci::CircleResizeBilinear)
+ IMPLEMENT(luci::CircleResizeNearestNeighbor)
+ IMPLEMENT(luci::CircleReverseSequence)
+ IMPLEMENT(luci::CircleReverseV2)
+ IMPLEMENT(luci::CircleRound)
IMPLEMENT(luci::CircleRsqrt)
+ IMPLEMENT(luci::CircleScatterNd)
+ IMPLEMENT(luci::CircleSegmentSum)
+ IMPLEMENT(luci::CircleSelect)
+ IMPLEMENT(luci::CircleSelectV2)
+ IMPLEMENT(luci::CircleShape)
+ IMPLEMENT(luci::CircleSin)
+ IMPLEMENT(luci::CircleSlice)
IMPLEMENT(luci::CircleSoftmax)
+ IMPLEMENT(luci::CircleSpaceToBatchND)
+ IMPLEMENT(luci::CircleSpaceToDepth)
+ IMPLEMENT(luci::CircleSparseToDense)
+ IMPLEMENT(luci::CircleSplit)
+ IMPLEMENT(luci::CircleSplitV)
IMPLEMENT(luci::CircleSqrt)
+ IMPLEMENT(luci::CircleSquare)
IMPLEMENT(luci::CircleSquaredDifference)
+ IMPLEMENT(luci::CircleSqueeze)
+ IMPLEMENT(luci::CircleStridedSlice)
IMPLEMENT(luci::CircleSub)
+ IMPLEMENT(luci::CircleSum)
+ IMPLEMENT(luci::CircleTanh)
+ IMPLEMENT(luci::CircleTile)
+ IMPLEMENT(luci::CircleTopKV2)
IMPLEMENT(luci::CircleTranspose)
IMPLEMENT(luci::CircleTransposeConv)
+ IMPLEMENT(luci::CircleUnpack)
+ IMPLEMENT(luci::CircleWhere)
+ IMPLEMENT(luci::CircleWhile)
+ IMPLEMENT(luci::CircleZerosLike)
// Circle Only
+ IMPLEMENT(luci::CircleBCQFullyConnected)
+ IMPLEMENT(luci::CircleBCQGather)
IMPLEMENT(luci::CircleInstanceNorm)
// Virtual nodes
IMPLEMENT(luci::CircleInput)
IMPLEMENT(luci::CircleOutput)
+ IMPLEMENT(luci::CircleIfOut)
+ IMPLEMENT(luci::CircleSplitOut)
+ IMPLEMENT(luci::CircleSplitVOut)
+ IMPLEMENT(luci::CircleTopKV2Out)
+ IMPLEMENT(luci::CircleUnpackOut)
+ IMPLEMENT(luci::CircleWhileOut)
#undef IMPLEMENT
};
+template <class CIRCLENODE>
+bool use_x(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("x", tbl->lookup(node->x()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_input(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("input", tbl->lookup(node->input()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_features(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("features", tbl->lookup(node->features()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_xy(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("x", tbl->lookup(node->x()));
+ s.args().append("y", tbl->lookup(node->y()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_xy_act(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ assert(node->fusedActivationFunction() != luci::FusedActFunc::UNDEFINED);
+
+ s.args().append("x", tbl->lookup(node->x()));
+ s.args().append("y", tbl->lookup(node->y()));
+ s.args().append("fused_activation_function", to_str(node->fusedActivationFunction()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_reducer(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("input", tbl->lookup(node->input()));
+ s.args().append("reduction_indices", tbl->lookup(node->reduction_indices()));
+ s.args().append("keep_dims", node->keep_dims() ? "true" : "false");
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+template <class CIRCLENODE>
+bool use_ido(const locop::SymbolTable *tbl, const CIRCLENODE *node, locop::NodeSummary &s)
+{
+ s.args().append("input", tbl->lookup(node->input()));
+ s.args().append("dimension", tbl->lookup(node->dimension()));
+ s.args().append("output_type", to_str(node->output_type()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
bool CircleNodeSummaryBuilderBase::build(const loco::Node *node, locop::NodeSummary &s) const
{
if (node->dialect() != luci::CircleDialect::get())
@@ -236,29 +395,31 @@ bool CircleNodeSummaryBuilderBase::build(const loco::Node *node, locop::NodeSumm
bool CircleNodeSummaryBuilder::summary(const luci::CircleAbs *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.state(locop::NodeSummary::State::Complete);
- return true;
+ return use_x(tbl(), node, s);
}
bool CircleNodeSummaryBuilder::summary(const luci::CircleAdd *node, locop::NodeSummary &s) const
{
- assert(node->fusedActivationFunction() != luci::FusedActFunc::UNDEFINED);
+ return use_xy_act(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleAddN *node, locop::NodeSummary &s) const
+{
+ for (uint32_t i = 0; i < node->arity(); ++i)
+ s.args().append("inputs", tbl()->lookup(node->inputs(i)));
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
- s.args().append("fused_activation_function", to_str(node->fusedActivationFunction()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
bool CircleNodeSummaryBuilder::summary(const luci::CircleArgMax *node, locop::NodeSummary &s) const
{
- s.args().append("input", tbl()->lookup(node->input()));
- s.args().append("dimension", tbl()->lookup(node->dimension()));
- s.args().append("output_type", to_str(node->output_type()));
- s.state(locop::NodeSummary::State::Complete);
- return true;
+ return use_ido(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleArgMin *node, locop::NodeSummary &s) const
+{
+ return use_ido(tbl(), node, s);
}
bool CircleNodeSummaryBuilder::summary(const luci::CircleAveragePool2D *node,
@@ -277,6 +438,17 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleAveragePool2D *node,
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleBatchMatMul *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("x", tbl()->lookup(node->x()));
+ s.args().append("y", tbl()->lookup(node->y()));
+ s.args().append("adj_x", to_str(node->adj_x()));
+ s.args().append("adj_y", to_str(node->adj_y()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleBatchToSpaceND *node,
locop::NodeSummary &s) const
{
@@ -289,6 +461,20 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleBatchToSpaceND *node,
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleCast *node, locop::NodeSummary &s) const
+{
+ s.args().append("x", tbl()->lookup(node->x()));
+ s.args().append("in_data_type", to_str(node->in_data_type()));
+ s.args().append("out_data_type", to_str(node->out_data_type()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleCeil *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleConcatenation *node,
locop::NodeSummary &s) const
{
@@ -318,6 +504,8 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleConv2D *node, locop::No
s.args().append("bias", tbl()->lookup(node->bias()));
s.args().append("stride(h,w)", to_str(node->stride()));
+ s.args().append("dilation(h,w)", to_str(node->dilation()));
+
s.args().append("padding", to_str(node->padding()));
s.args().append("fused", to_str(node->fusedActivationFunction()));
@@ -328,8 +516,28 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleConv2D *node, locop::No
bool CircleNodeSummaryBuilder::summary(const luci::CircleCos *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleCustom *node, locop::NodeSummary &s) const
+{
+ for (uint32_t i = 0; i < node->numInputs(); i++)
+ {
+ s.args().append("input" + std::to_string(i), tbl()->lookup(node->inputs(i)));
+ }
+ s.args().append("custom_code", node->custom_code());
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleDepthToSpace *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("block_size", std::to_string(node->block_size()));
+
s.state(locop::NodeSummary::State::Complete);
+
return true;
}
@@ -344,6 +552,7 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleDepthwiseConv2D *node,
s.args().append("bias", tbl()->lookup(node->bias()));
s.args().append("stride(h,w)", to_str(node->stride()));
+ s.args().append("dilation(h,w)", to_str(node->dilation()));
s.args().append("padding", to_str(node->padding()));
s.args().append("depthMultiplier", std::to_string(node->depthMultiplier()));
s.args().append("fused", to_str(node->fusedActivationFunction()));
@@ -355,15 +564,49 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleDepthwiseConv2D *node,
bool CircleNodeSummaryBuilder::summary(const luci::CircleDiv *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleElu *node, locop::NodeSummary &s) const
+{
+ return use_features(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleExp *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleExpandDims *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("axis", tbl()->lookup(node->axis()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleExp *node, locop::NodeSummary &s) const
+bool CircleNodeSummaryBuilder::summary(const luci::CircleFloor *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleFloorDiv *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleFloorMod *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleFill *node, locop::NodeSummary &s) const
+{
+ s.args().append("dims", tbl()->lookup(node->dims()));
+ s.args().append("value", tbl()->lookup(node->value()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
@@ -383,31 +626,157 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleFullyConnected *node,
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleLogicalNot *node,
+bool CircleNodeSummaryBuilder::summary(const luci::CircleGather *node, locop::NodeSummary &s) const
+{
+ s.args().append("params", tbl()->lookup(node->params()));
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.args().append("axis", pepper::str(node->axis()));
+
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleGatherNd *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("params", tbl()->lookup(node->params()));
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleGreater *node, locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleGreaterEqual *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleIf *node, locop::NodeSummary &s) const
+{
+ s.args().append("cond", tbl()->lookup(node->cond()));
+ for (uint32_t i = 0; i < node->input_count(); ++i)
+ s.args().append("input", tbl()->lookup(node->input(i)));
+
+ if (node->then_graph() != nullptr)
+ s.args().append("then_graph", node->then_graph()->name());
+ else
+ s.args().append("then_branch", pepper::str(node->then_branch()));
+
+ if (node->else_graph() != nullptr)
+ s.args().append("else_graph", node->else_graph()->name());
+ else
+ s.args().append("else_branch", pepper::str(node->else_branch()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleL2Normalize *node,
locop::NodeSummary &s) const
{
s.args().append("x", tbl()->lookup(node->x()));
+ s.args().append("fused_activation_function", to_str(node->fusedActivationFunction()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLess *node, locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLessEqual *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLeakyRelu *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("features", tbl()->lookup(node->features()));
+ s.args().append("alpha", std::to_string(node->alpha()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLocalResponseNormalization *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("radius", pepper::str(node->radius()));
+ s.args().append("bias", pepper::str(node->bias()));
+ s.args().append("alpha", pepper::str(node->alpha()));
+ s.args().append("beta", pepper::str(node->beta()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLog *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLogicalAnd *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLogicalNot *node,
+ locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleLogicalOr *node,
locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLogistic *node,
+ locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleLogSoftmax *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("logits", tbl()->lookup(node->logits()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleMaximum *node, locop::NodeSummary &s) const
+bool CircleNodeSummaryBuilder::summary(const luci::CircleMatrixDiag *node,
+ locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
+ s.args().append("diagonal", tbl()->lookup(node->diagonal()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleMatrixSetDiag *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("diagonal", tbl()->lookup(node->diagonal()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleMaximum *node, locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleMaxPool2D *node,
locop::NodeSummary &s) const
{
@@ -426,20 +795,48 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleMaxPool2D *node,
bool CircleNodeSummaryBuilder::summary(const luci::CircleMean *node, locop::NodeSummary &s) const
{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleMinimum *node, locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleMirrorPad *node,
+ locop::NodeSummary &s) const
+{
s.args().append("input", tbl()->lookup(node->input()));
- s.args().append("reduction_indices", tbl()->lookup(node->reduction_indices()));
- s.args().append("keep_dims", node->keep_dims() ? "true" : "false");
+ s.args().append("paddings", tbl()->lookup(node->paddings()));
+ s.args().append("mode", to_str(node->mode()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
bool CircleNodeSummaryBuilder::summary(const luci::CircleMul *node, locop::NodeSummary &s) const
{
- assert(node->fusedActivationFunction() != luci::FusedActFunc::UNDEFINED);
+ return use_xy_act(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleNeg *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleNotEqual *node,
+ locop::NodeSummary &s) const
+{
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleOneHot *node, locop::NodeSummary &s) const
+{
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.args().append("depth", tbl()->lookup(node->depth()));
+ s.args().append("on_value", tbl()->lookup(node->on_value()));
+ s.args().append("off_value", tbl()->lookup(node->off_value()));
+ s.args().append("axis", pepper::str(node->axis()));
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
- s.args().append("fused_activation_function", to_str(node->fusedActivationFunction()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
@@ -462,20 +859,74 @@ bool CircleNodeSummaryBuilder::summary(const luci::CirclePad *node, locop::NodeS
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleRelu *node, locop::NodeSummary &s) const
+bool CircleNodeSummaryBuilder::summary(const luci::CirclePow *node, locop::NodeSummary &s) const
{
- s.args().append("features", tbl()->lookup(node->features()));
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CirclePRelu *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("alpha", tbl()->lookup(node->alpha()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleRelu6 *node, locop::NodeSummary &s) const
+bool CircleNodeSummaryBuilder::summary(const luci::CircleRange *node, locop::NodeSummary &s) const
{
- s.args().append("features", tbl()->lookup(node->features()));
+ s.args().append("start", tbl()->lookup(node->start()));
+ s.args().append("limit", tbl()->lookup(node->limit()));
+ s.args().append("delta", tbl()->lookup(node->delta()));
+
s.state(locop::NodeSummary::State::Complete);
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleRank *node, locop::NodeSummary &s) const
+{
+ return use_input(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReduceAny *node,
+ locop::NodeSummary &s) const
+{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReduceMax *node,
+ locop::NodeSummary &s) const
+{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReduceMin *node,
+ locop::NodeSummary &s) const
+{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReduceProd *node,
+ locop::NodeSummary &s) const
+{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleRelu *node, locop::NodeSummary &s) const
+{
+ return use_features(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleRelu6 *node, locop::NodeSummary &s) const
+{
+ return use_features(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReluN1To1 *node,
+ locop::NodeSummary &s) const
+{
+ return use_features(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleReshape *node, locop::NodeSummary &s) const
{
s.args().append("tensor", tbl()->lookup(node->tensor()));
@@ -485,9 +936,113 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleReshape *node, locop::N
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleResizeBilinear *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("size", tbl()->lookup(node->size()));
+ s.args().append("align_corners", node->align_corners() ? "true" : "false");
+ s.args().append("half_pixel_centers", node->half_pixel_centers() ? "true" : "false");
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleResizeNearestNeighbor *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("size", tbl()->lookup(node->size()));
+ s.args().append("align_corners", node->align_corners() ? "true" : "false");
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReverseSequence *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("seq_lengths", tbl()->lookup(node->seq_lengths()));
+ s.args().append("seq_axis", std::to_string(node->seq_axis()));
+ s.args().append("batch_axis", std::to_string(node->batch_axis()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleReverseV2 *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("tensor", tbl()->lookup(node->tensor()));
+ s.args().append("axis", tbl()->lookup(node->axis()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleRound *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleRsqrt *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleScatterNd *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.args().append("updates", tbl()->lookup(node->updates()));
+ s.args().append("shape", tbl()->lookup(node->shape()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSegmentSum *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("segment_ids", tbl()->lookup(node->segment_ids()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSelect *node, locop::NodeSummary &s) const
+{
+ s.args().append("condition", tbl()->lookup(node->condition()));
+ s.args().append("t", tbl()->lookup(node->t()));
+ s.args().append("e", tbl()->lookup(node->e()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSelectV2 *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("condition", tbl()->lookup(node->condition()));
+ s.args().append("t", tbl()->lookup(node->t()));
+ s.args().append("e", tbl()->lookup(node->e()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleShape *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("out_type", to_str(node->out_type()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSin *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSlice *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("begin", tbl()->lookup(node->begin()));
+ s.args().append("size", tbl()->lookup(node->size()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
@@ -500,31 +1055,151 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleSoftmax *node, locop::N
return true;
}
-bool CircleNodeSummaryBuilder::summary(const luci::CircleSqrt *node, locop::NodeSummary &s) const
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSpaceToBatchND *node,
+ locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("block_shape", tbl()->lookup(node->block_shape()));
+ s.args().append("paddings", tbl()->lookup(node->paddings()));
+
s.state(locop::NodeSummary::State::Complete);
+
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSpaceToDepth *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("block_size", pepper::str(node->block_size()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSparseToDense *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.args().append("output_shape", tbl()->lookup(node->output_shape()));
+ s.args().append("values", tbl()->lookup(node->values()));
+ s.args().append("default_value", tbl()->lookup(node->default_value()));
+
+ s.args().append("Validate_indices", pepper::str(node->validate_indices()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSplit *node, locop::NodeSummary &s) const
+{
+ s.args().append("split_dim", tbl()->lookup(node->split_dim()));
+ s.args().append("input", tbl()->lookup(node->input()));
+
+ s.args().append("num_split", pepper::str(node->num_split()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSplitV *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("size_splits", tbl()->lookup(node->size_splits()));
+ s.args().append("split_dim", tbl()->lookup(node->split_dim()));
+
+ s.args().append("num_split", pepper::str(node->num_split()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSqrt *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSquare *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleSquaredDifference *node,
locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSqueeze *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+
+ std::stringstream ss{"("};
+ for (size_t i = 0; i < node->squeeze_dims().size(); ++i)
+ {
+ if (i != 0)
+ ss << ", ";
+ ss << node->squeeze_dims()[i];
+ }
+ ss << ")";
+
+ s.args().append("squeeze_dims", ss.str());
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleStridedSlice *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("begin", tbl()->lookup(node->begin()));
+ s.args().append("end", tbl()->lookup(node->end()));
+ s.args().append("strides", tbl()->lookup(node->strides()));
+
+ s.args().append("begin_mask", pepper::str(node->begin_mask()));
+ s.args().append("end_mask", pepper::str(node->end_mask()));
+ s.args().append("ellipsis_mask", pepper::str(node->ellipsis_mask()));
+ s.args().append("new_axis_mask", pepper::str(node->new_axis_mask()));
+ s.args().append("shrink_axis_mask", pepper::str(node->shrink_axis_mask()));
+
s.state(locop::NodeSummary::State::Complete);
return true;
}
bool CircleNodeSummaryBuilder::summary(const luci::CircleSub *node, locop::NodeSummary &s) const
{
- s.args().append("x", tbl()->lookup(node->x()));
- s.args().append("y", tbl()->lookup(node->y()));
+ return use_xy(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSum *node, locop::NodeSummary &s) const
+{
+ return use_reducer(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleTanh *node, locop::NodeSummary &s) const
+{
+ return use_x(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleTile *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("multiples", tbl()->lookup(node->multiples()));
s.state(locop::NodeSummary::State::Complete);
return true;
}
-// TODO TFLTanh
+bool CircleNodeSummaryBuilder::summary(const luci::CircleTopKV2 *node, locop::NodeSummary &s) const
+{
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("k", tbl()->lookup(node->k()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
bool CircleNodeSummaryBuilder::summary(const luci::CircleTranspose *node,
locop::NodeSummary &s) const
@@ -552,6 +1227,97 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleTransposeConv *node,
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleUnpack *node, locop::NodeSummary &s) const
+{
+ s.args().append("value", tbl()->lookup(node->value()));
+
+ s.args().append("num", pepper::str(node->num()));
+ s.args().append("axis", pepper::str(node->axis()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleWhere *node, locop::NodeSummary &s) const
+{
+ s.args().append("condition", tbl()->lookup(node->condition()));
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleWhile *node, locop::NodeSummary &s) const
+{
+ for (uint32_t i = 0; i < node->input_count(); ++i)
+ s.args().append("input", tbl()->lookup(node->input(i)));
+
+ if (node->cond_graph() != nullptr)
+ s.args().append("cond_graph", node->cond_graph()->name());
+ else
+ s.args().append("cond_branch", pepper::str(node->cond_branch()));
+
+ if (node->body_graph() != nullptr)
+ s.args().append("body_graph", node->body_graph()->name());
+ else
+ s.args().append("body_branch", pepper::str(node->body_branch()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleZerosLike *node,
+ locop::NodeSummary &s) const
+{
+ return use_input(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSplitOut *node,
+ locop::NodeSummary &s) const
+{
+ return use_input(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleSplitVOut *node,
+ locop::NodeSummary &s) const
+{
+ return use_input(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleTopKV2Out *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("topkv2", tbl()->lookup(node->input()));
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleUnpackOut *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("unpack", tbl()->lookup(node->input()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleIfOut *node, locop::NodeSummary &s) const
+{
+ return use_input(tbl(), node, s);
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleWhileOut *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("while", tbl()->lookup(node->input()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleInput *, locop::NodeSummary &s) const
{
s.state(locop::NodeSummary::State::Complete);
@@ -566,6 +1332,40 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleOutput *node, locop::No
return true;
}
+bool CircleNodeSummaryBuilder::summary(const luci::CircleBCQFullyConnected *node,
+ locop::NodeSummary &s) const
+{
+ assert(node->fusedActivationFunction() != luci::FusedActFunc::UNDEFINED);
+
+ s.args().append("input", tbl()->lookup(node->input()));
+ s.args().append("weights_scales", tbl()->lookup(node->weights_scales()));
+ s.args().append("weights_binary", tbl()->lookup(node->weights_binary()));
+ s.args().append("bias", tbl()->lookup(node->bias()));
+ s.args().append("weights_clusters", tbl()->lookup(node->weights_clusters()));
+
+ s.args().append("fused", to_str(node->fusedActivationFunction()));
+ s.args().append("weights_hidden_size", pepper::str(node->weights_hidden_size()));
+
+ s.state(locop::NodeSummary::State::Complete);
+
+ return true;
+}
+
+bool CircleNodeSummaryBuilder::summary(const luci::CircleBCQGather *node,
+ locop::NodeSummary &s) const
+{
+ s.args().append("input_scales", tbl()->lookup(node->input_scales()));
+ s.args().append("input_binary", tbl()->lookup(node->input_binary()));
+ s.args().append("indices", tbl()->lookup(node->indices()));
+ s.args().append("input_clusters", tbl()->lookup(node->input_clusters()));
+
+ s.args().append("axis", pepper::str(node->axis()));
+ s.args().append("input_hidden_size", pepper::str(node->input_hidden_size()));
+
+ s.state(locop::NodeSummary::State::Complete);
+ return true;
+}
+
bool CircleNodeSummaryBuilder::summary(const luci::CircleInstanceNorm *node,
locop::NodeSummary &s) const
{
@@ -603,4 +1403,4 @@ bool NodeSummaryBuilder::build(const loco::Node *node, locop::NodeSummary &s) co
return false;
}
-} // namespace exo
+} // namespace luci
diff --git a/compiler/luci/pass/CMakeLists.txt b/compiler/luci/pass/CMakeLists.txt
index 93130ce60..2c5fb3407 100644
--- a/compiler/luci/pass/CMakeLists.txt
+++ b/compiler/luci/pass/CMakeLists.txt
@@ -1,6 +1,6 @@
file(GLOB_RECURSE SOURCES "src/*.cpp")
-#file(GLOB_RECURSE TESTS "src/*.test.cpp")
-#list(REMOVE_ITEM SOURCES ${TESTS})
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
add_library(luci_pass SHARED ${SOURCES})
target_include_directories(luci_pass PRIVATE src)
@@ -16,14 +16,14 @@ target_link_libraries(luci_pass PRIVATE nncc_common)
target_link_libraries(luci_pass PRIVATE oops)
install(TARGETS luci_pass DESTINATION lib)
-# TODO enable for tests
-#if(NOT ENABLE_TEST)
-# return()
-#endif(NOT ENABLE_TEST)
-#
-#nnas_find_package(GTest REQUIRED)
-#
-#GTest_AddTest(luci_pass_test ${TESTS})
-#target_include_directories(luci_pass_test PRIVATE src)
-#target_link_libraries(luci_pass_test luci_pass)
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
+nnas_find_package(GTest REQUIRED)
+
+GTest_AddTest(luci_pass_test ${TESTS})
+target_include_directories(luci_pass_test PRIVATE src)
+target_link_libraries(luci_pass_test luci_pass)
+target_link_libraries(luci_pass_test luci_lang)
#target_link_libraries(luci_pass_test oops)
diff --git a/compiler/luci/pass/include/luci/CircleOptimizer.h b/compiler/luci/pass/include/luci/CircleOptimizer.h
index a969cca85..312749f83 100644
--- a/compiler/luci/pass/include/luci/CircleOptimizer.h
+++ b/compiler/luci/pass/include/luci/CircleOptimizer.h
@@ -32,11 +32,28 @@ public:
{
enum Algorithm
{
+ FuseBCQ,
FuseInstanceNorm,
+ ResolveCustomOpAdd,
+ ResolveCustomOpBatchMatMul,
+ ResolveCustomOpMatMul,
+ QuantizeDequantizeWeights,
+ QuantizeWithMinMax,
};
+ enum AlgorithmParameters
+ {
+ Quantize_input_dtype,
+ Quantize_output_dtype,
+ Quantize_granularity // layer-wise or channel-wise
+ };
+
+ virtual ~Options() = default;
+
virtual void enable(Algorithm) = 0;
virtual bool query(Algorithm) = 0;
+ virtual void param(AlgorithmParameters, const std::string &) = 0;
+ virtual const std::string param(AlgorithmParameters) const = 0;
};
public:
@@ -46,6 +63,8 @@ public:
public:
void optimize(loco::Graph *) const;
+ void quantize(loco::Graph *) const;
+
private:
std::unique_ptr<Options> _options;
};
diff --git a/compiler/luci/pass/include/luci/Pass/FuseBCQPass.h b/compiler/luci/pass/include/luci/Pass/FuseBCQPass.h
new file mode 100644
index 000000000..4404a9fc9
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/FuseBCQPass.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_FUSE_BCQ_PASS_H__
+#define __LUCI_FUSE_BCQ_PASS_H__
+
+#include <logo/Pass.h>
+
+namespace luci
+{
+
+/**
+ * @brief Class to fuse certain pattern of subgraph into CircleBCQFullyConnected or CircleBCQGather
+ *
+ */
+struct FuseBCQPass final : public logo::Pass
+{
+ const char *name(void) const final { return "luci::FuseBCQPass"; }
+
+ bool run(loco::Graph *g) final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_FUSE_BCQ_PASS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/QuantizationParameters.h b/compiler/luci/pass/include/luci/Pass/QuantizationParameters.h
new file mode 100644
index 000000000..5c9cd427f
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/QuantizationParameters.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_QUANTIZATION_PARAMETERS_H__
+#define __LUCI_QUANTIZATION_PARAMETERS_H__
+
+namespace luci
+{
+
+enum QuantizationGranularity
+{
+ LayerWise = 0,
+ ChannelWise = 1,
+};
+
+} // namespace luci
+
+#endif // __LUCI_QUANTIZATION_PARAMETERS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/QuantizeDequantizeWeightsPass.h b/compiler/luci/pass/include/luci/Pass/QuantizeDequantizeWeightsPass.h
new file mode 100644
index 000000000..713b88f9d
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/QuantizeDequantizeWeightsPass.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_QUANTIZE_DEQUANTIZE_WEIGHTS_PASS_H__
+#define __LUCI_QUANTIZE_DEQUANTIZE_WEIGHTS_PASS_H__
+
+#include <loco.h>
+
+#include <logo/Pass.h>
+
+#include <luci/Pass/QuantizationParameters.h>
+
+namespace luci
+{
+
+/**
+ * @brief Pass to quantize weights
+ */
+class QuantizeDequantizeWeightsPass : public logo::Pass
+{
+public:
+ QuantizeDequantizeWeightsPass(loco::DataType input_dtype, loco::DataType output_dtype,
+ QuantizationGranularity granularity)
+ : _input_dtype{input_dtype}, _output_dtype{output_dtype}, _granularity{granularity}
+ {
+ // DO NOTHING
+ }
+ virtual const char *name(void) const { return "luci::QuantizeDequantizeWeightsPass"; }
+
+public:
+ bool run(loco::Graph *graph);
+
+private:
+ loco::DataType _input_dtype;
+ loco::DataType _output_dtype;
+ QuantizationGranularity _granularity;
+};
+
+} // namespace luci
+
+#endif //__LUCI_QUANTIZE_DEQUANTIZE_WEIGHTS_PASS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/QuantizeWithMinMaxPass.h b/compiler/luci/pass/include/luci/Pass/QuantizeWithMinMaxPass.h
new file mode 100644
index 000000000..bb0d0ff40
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/QuantizeWithMinMaxPass.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_QUANTIZE_WITH_MINMAX_PASS_H__
+#define __LUCI_QUANTIZE_WITH_MINMAX_PASS_H__
+
+#include <loco.h>
+
+#include <logo/Pass.h>
+
+#include <luci/Pass/QuantizationParameters.h>
+
+namespace luci
+{
+
+/**
+ * @brief Pass to quantize activation, weights, and bias
+ */
+class QuantizeWithMinMaxPass : public logo::Pass
+{
+public:
+ QuantizeWithMinMaxPass(loco::DataType input_dtype, loco::DataType output_dtype,
+ QuantizationGranularity granularity)
+ : _input_dtype{input_dtype}, _output_dtype{output_dtype}, _granularity{granularity}
+ {
+ // DO NOTHING
+ }
+ virtual const char *name(void) const { return "luci::QuantizeWithMinMaxPass"; }
+
+public:
+ bool run(loco::Graph *graph);
+
+private:
+ loco::DataType _input_dtype;
+ loco::DataType _output_dtype;
+ QuantizationGranularity _granularity;
+};
+
+} // namespace luci
+
+#endif //__LUCI_QUANTIZE_WITH_MINMAX_PASS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/ResolveCustomOpAddPass.h b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpAddPass.h
new file mode 100644
index 000000000..35a335028
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpAddPass.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_RESOLVE_CUSTOM_OP_ADD_PASS_H__
+#define __LUCI_RESOLVE_CUSTOM_OP_ADD_PASS_H__
+
+#include <logo/Pass.h>
+
+namespace luci
+{
+
+/**
+ * @brief Class to resolve certain custom op of subgraph into add op in circle schema.
+ */
+struct ResolveCustomOpAddPass final : public logo::Pass
+{
+ const char *name(void) const final { return "luci::ResolveCustomOpAddPass"; }
+
+ bool run(loco::Graph *g) final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_RESOLVE_CUSTOM_OP_ADD_PASS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/ResolveCustomOpBatchMatMulPass.h b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpBatchMatMulPass.h
new file mode 100644
index 000000000..7c48c8d16
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpBatchMatMulPass.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_RESOLVE_CUSTOM_OP_BATCHMATMUL_PASS_H__
+#define __LUCI_RESOLVE_CUSTOM_OP_BATCHMATMUL_PASS_H__
+
+#include <logo/Pass.h>
+
+namespace luci
+{
+
+/**
+ * @brief Class to resolve certain custom op of subgraph into batchmatmul op in circle schema.
+ */
+struct ResolveCustomOpBatchMatMulPass final : public logo::Pass
+{
+ const char *name(void) const final { return "luci::ResolveCustomOpBatchMatMulPass"; }
+
+ bool run(loco::Graph *g) final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_RESOLVE_CUSTOM_OP_BATCHMATMUL_PASS_H__
diff --git a/compiler/luci/pass/include/luci/Pass/ResolveCustomOpMatMulPass.h b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpMatMulPass.h
new file mode 100644
index 000000000..701deba91
--- /dev/null
+++ b/compiler/luci/pass/include/luci/Pass/ResolveCustomOpMatMulPass.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_RESOLVE_CUSTOM_OP_MATMUL_PASS_H__
+#define __LUCI_RESOLVE_CUSTOM_OP_MATMUL_PASS_H__
+
+#include <logo/Pass.h>
+
+namespace luci
+{
+
+/**
+ * @brief Class to resolve certain custom op of subgraph into matmul op in circle schema.
+ */
+struct ResolveCustomOpMatMulPass final : public logo::Pass
+{
+ const char *name(void) const final { return "luci::ResolveCustomOpMatMulPass"; }
+
+ bool run(loco::Graph *g) final;
+};
+
+} // namespace luci
+
+#endif // __LUCI_RESOLVE_CUSTOM_OP_MATMUL_PASS_H__
diff --git a/compiler/luci/pass/src/CircleOptimizer.cpp b/compiler/luci/pass/src/CircleOptimizer.cpp
index dcb05a0b5..90fbe9009 100644
--- a/compiler/luci/pass/src/CircleOptimizer.cpp
+++ b/compiler/luci/pass/src/CircleOptimizer.cpp
@@ -16,16 +16,23 @@
#include "luci/CircleOptimizer.h"
+#include "luci/Pass/FuseBCQPass.h"
#include "luci/Pass/FuseInstanceNormPass.h"
+#include "luci/Pass/ResolveCustomOpAddPass.h"
+#include "luci/Pass/ResolveCustomOpBatchMatMulPass.h"
+#include "luci/Pass/ResolveCustomOpMatMulPass.h"
+#include "luci/Pass/QuantizeWithMinMaxPass.h"
+#include "luci/Pass/QuantizeDequantizeWeightsPass.h"
// TODO add more passes
#include "luci/Pass/ShapeInferencePass.h"
#include "luci/Pass/TypeInferencePass.h"
// logo passes
-#include <logo/RemoveDeadNodePass.h>
+#include <logo/RemoveDeadNodeWithQueryPass.h>
#include "ProgressReporter.h"
+#include "CircleOptimizerUtils.h"
#include <logo/Phase.h>
@@ -36,18 +43,39 @@ namespace
using namespace luci;
-class OptimizeOptionsImpl : public luci::CircleOptimizer::Options
+class OptimizeOptionsImpl final : public luci::CircleOptimizer::Options
{
public:
void enable(Algorithm) final;
+ void param(AlgorithmParameters, const std::string &) final;
+ const std::string param(AlgorithmParameters) const final;
bool query(Algorithm) final;
private:
std::vector<Algorithm> _algorithms;
+ std::map<AlgorithmParameters, const std::string> _algorithm_params;
};
void OptimizeOptionsImpl::enable(Algorithm algo) { _algorithms.push_back(algo); }
+void OptimizeOptionsImpl::param(AlgorithmParameters param, const std::string &str)
+{
+ _algorithm_params.insert(std::pair<AlgorithmParameters, const std::string>(param, str));
+}
+
+const std::string OptimizeOptionsImpl::param(AlgorithmParameters param) const
+{
+ auto param_str = _algorithm_params.find(param);
+ if (param_str != _algorithm_params.end())
+ {
+ return param_str->second;
+ }
+ else
+ {
+ return std::string();
+ }
+}
+
bool OptimizeOptionsImpl::query(Algorithm algo)
{
std::vector<Algorithm>::iterator it = std::find(_algorithms.begin(), _algorithms.end(), algo);
@@ -77,14 +105,31 @@ void CircleOptimizer::optimize(loco::Graph *g) const
logo::Phase phase;
/* TRANSFORM DECLARATION BEGIN */
+ if (_options->query(Options::Algorithm::ResolveCustomOpAdd))
+ {
+ phase.emplace_back(std::make_unique<luci::ResolveCustomOpAddPass>());
+ }
+ if (_options->query(Options::Algorithm::ResolveCustomOpBatchMatMul))
+ {
+ phase.emplace_back(std::make_unique<luci::ResolveCustomOpBatchMatMulPass>());
+ }
+ if (_options->query(Options::Algorithm::ResolveCustomOpMatMul))
+ {
+ phase.emplace_back(std::make_unique<luci::ResolveCustomOpMatMulPass>());
+ }
if (_options->query(Options::Algorithm::FuseInstanceNorm))
{
phase.emplace_back(std::make_unique<FuseInstanceNormPass>());
}
+ if (_options->query(Options::Algorithm::FuseBCQ))
+ {
+ phase.emplace_back(std::make_unique<FuseBCQPass>());
+ }
+
// Shape inference is needed for added nodes doing above transformations
phase.emplace_back(std::make_unique<luci::ShapeInferencePass>());
phase.emplace_back(std::make_unique<luci::TypeInferencePass>());
- phase.emplace_back(std::make_unique<logo::RemoveDeadNodePass>());
+ phase.emplace_back(std::make_unique<logo::RemoveDeadNodeWithQueryPass>());
/* TRANSFORM DECLARATION END */
ProgressReporter prog(g, logo::PhaseStrategy::Saturate);
@@ -93,4 +138,74 @@ void CircleOptimizer::optimize(loco::Graph *g) const
phase_runner.run(phase);
}
+void CircleOptimizer::quantize(loco::Graph *g) const
+{
+ // Fake quantization of weights
+ if (_options->query(Options::Algorithm::QuantizeDequantizeWeights))
+ {
+ static const std::vector<std::string> fakeq_supported_input_dtype{"float32"};
+ static const std::vector<std::string> fakeq_supported_output_dtype{"uint8"};
+ static const std::vector<std::string> fakeq_supported_granularity{"layer"};
+
+ auto input_dtype = _options->param(Options::AlgorithmParameters::Quantize_input_dtype);
+ auto output_dtype = _options->param(Options::AlgorithmParameters::Quantize_output_dtype);
+ auto granularity = _options->param(Options::AlgorithmParameters::Quantize_granularity);
+
+ if (!in_array(to_lower_case(input_dtype), fakeq_supported_input_dtype))
+ throw std::runtime_error("Unsupported input type. List of supported input type: " +
+ to_string(fakeq_supported_input_dtype));
+
+ if (!in_array(to_lower_case(output_dtype), fakeq_supported_output_dtype))
+ throw std::runtime_error("Unsupported output type. List of supported output type: " +
+ to_string(fakeq_supported_output_dtype));
+
+ if (!in_array(to_lower_case(granularity), fakeq_supported_granularity))
+ throw std::runtime_error("Unsupported granularity. List of supported granularity: " +
+ to_string(fakeq_supported_granularity));
+
+ luci::QuantizeDequantizeWeightsPass fake_quantizer(
+ str_to_dtype(input_dtype), str_to_dtype(output_dtype), str_to_granularity(granularity));
+ fake_quantizer.run(g);
+ }
+
+ // Actual quantization of weights, bias, and activation
+ if (_options->query(Options::Algorithm::QuantizeWithMinMax))
+ {
+ static const std::vector<std::string> qwmm_supported_input_dtype{"float32"};
+ static const std::vector<std::string> qwmm_supported_output_dtype{"uint8"};
+ static const std::vector<std::string> qwmm_supported_granularity{"layer"};
+
+ auto input_dtype = _options->param(Options::AlgorithmParameters::Quantize_input_dtype);
+ auto output_dtype = _options->param(Options::AlgorithmParameters::Quantize_output_dtype);
+ auto granularity = _options->param(Options::AlgorithmParameters::Quantize_granularity);
+
+ if (!in_array(to_lower_case(input_dtype), qwmm_supported_input_dtype))
+ throw std::runtime_error("Unsupported input type. List of supported input types: " +
+ to_string(qwmm_supported_input_dtype));
+
+ if (!in_array(to_lower_case(output_dtype), qwmm_supported_output_dtype))
+ throw std::runtime_error("Unsupported output type. List of supported output types: " +
+ to_string(qwmm_supported_output_dtype));
+
+ if (!in_array(to_lower_case(granularity), qwmm_supported_granularity))
+ throw std::runtime_error("Unsupported granularity. List of supported granularity: " +
+ to_string(qwmm_supported_granularity));
+
+ luci::QuantizeWithMinMaxPass quantizer(str_to_dtype(input_dtype), str_to_dtype(output_dtype),
+ str_to_granularity(granularity));
+ quantizer.run(g);
+ }
+
+ logo::Phase phase;
+
+ // Do Shape/Type inference
+ phase.emplace_back(std::make_unique<luci::ShapeInferencePass>());
+ phase.emplace_back(std::make_unique<luci::TypeInferencePass>());
+
+ ProgressReporter prog(g, logo::PhaseStrategy::Saturate);
+ logo::PhaseRunner<logo::PhaseStrategy::Saturate> phase_runner{g};
+ phase_runner.attach(&prog);
+ phase_runner.run(phase);
+}
+
} // namespace luci
diff --git a/compiler/luci/pass/src/CircleOptimizerUtils.cpp b/compiler/luci/pass/src/CircleOptimizerUtils.cpp
new file mode 100644
index 000000000..ffc372392
--- /dev/null
+++ b/compiler/luci/pass/src/CircleOptimizerUtils.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleOptimizerUtils.h"
+
+namespace luci
+{
+
+bool in_array(const std::string &str, const std::vector<std::string> &array)
+{
+ return std::find(array.begin(), array.end(), str) != array.end();
+}
+
+std::string to_string(const std::vector<std::string> &strings)
+{
+ assert(!strings.empty());
+
+ std::string res;
+ for (unsigned int i = 0; i < strings.size() - 1; i++)
+ res += strings[i] + ", ";
+
+ res += strings[strings.size() - 1];
+ return res;
+}
+
+std::string to_lower_case(std::string s)
+{
+ std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::tolower(c); });
+ return s;
+}
+
+loco::DataType str_to_dtype(const std::string &str)
+{
+ if (to_lower_case(str).compare("uint8") == 0)
+ return loco::DataType::U8;
+ if (to_lower_case(str).compare("uint16") == 0)
+ return loco::DataType::U16;
+ if (to_lower_case(str).compare("uint32") == 0)
+ return loco::DataType::U32;
+ if (to_lower_case(str).compare("uint64") == 0)
+ return loco::DataType::U64;
+
+ if (to_lower_case(str).compare("int8") == 0)
+ return loco::DataType::S8;
+ if (to_lower_case(str).compare("int16") == 0)
+ return loco::DataType::S16;
+ if (to_lower_case(str).compare("int32") == 0)
+ return loco::DataType::S32;
+ if (to_lower_case(str).compare("int64") == 0)
+ return loco::DataType::S64;
+
+ if (to_lower_case(str).compare("float16") == 0)
+ return loco::DataType::FLOAT16;
+ if (to_lower_case(str).compare("float32") == 0)
+ return loco::DataType::FLOAT32;
+ if (to_lower_case(str).compare("float64") == 0)
+ return loco::DataType::FLOAT64;
+
+ if (to_lower_case(str).compare("bool") == 0)
+ return loco::DataType::BOOL;
+
+ return loco::DataType::Unknown;
+}
+
+QuantizationGranularity str_to_granularity(const std::string &str)
+{
+ if (to_lower_case(str).compare("layer") == 0)
+ return QuantizationGranularity::LayerWise;
+
+ if (to_lower_case(str).compare("channel") == 0)
+ return QuantizationGranularity::ChannelWise;
+
+ throw std::runtime_error("Quantization granularity must be either 'layer' or 'channel'");
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/CircleOptimizerUtils.h b/compiler/luci/pass/src/CircleOptimizerUtils.h
new file mode 100644
index 000000000..7e577a05f
--- /dev/null
+++ b/compiler/luci/pass/src/CircleOptimizerUtils.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_CIRCLE_OPTIMIZER_UTILS_H__
+#define __LUCI_CIRCLE_OPTIMIZER_UTILS_H__
+
+#include "luci/Pass/QuantizeDequantizeWeightsPass.h"
+#include "luci/Pass/QuantizeWithMinMaxPass.h"
+
+#include <loco.h>
+
+#include <algorithm>
+
+namespace luci
+{
+
+bool in_array(const std::string &, const std::vector<std::string> &);
+
+std::string to_string(const std::vector<std::string> &);
+
+std::string to_lower_case(std::string);
+
+loco::DataType str_to_dtype(const std::string &);
+
+QuantizationGranularity str_to_granularity(const std::string &);
+
+} // namespace luci
+
+#endif // __LUCI_CIRCLE_OPTIMIZER_UTILS_H__
diff --git a/compiler/luci/pass/src/FuseBCQPass.cpp b/compiler/luci/pass/src/FuseBCQPass.cpp
new file mode 100644
index 000000000..b81db8827
--- /dev/null
+++ b/compiler/luci/pass/src/FuseBCQPass.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/FuseBCQPass.h"
+
+#include <luci/IR/CircleNodes.h>
+
+#include <cassert>
+#include <string>
+#include <set>
+
+namespace
+{
+
+/**
+ * @brief Circle nodes including BCQ information and a circle node to which BCQ will be applied
+ * are connected with their name. And their names include common prefix.
+ * However, after pb file is converted to tflite file, some nodes' name are changed.
+ * Thus this function will return original common prefix.
+ *
+ * @note All the re-naming rule of TFLite converter is not figured out.
+ * Therefore, if new naming rule is detected, this function should be updated.
+ */
+const std::string node_name_prefix(luci::NodeName node_name)
+{
+ std::string prefix = node_name;
+
+ if (prefix.find("ReadVariableOp/resource/") != std::string::npos)
+ {
+ const auto start_index = prefix.find("ReadVariableOp/resource/");
+
+ const auto left_prefix = prefix.substr(0, start_index);
+ const auto right_prefix = prefix.substr(start_index + 24);
+
+ prefix = left_prefix + right_prefix;
+ }
+
+ if (prefix.find("Tensordot/") != std::string::npos)
+ {
+ const auto index = prefix.find("Tensordot/");
+ prefix = prefix.substr(0, index - 1);
+ }
+ else if (prefix.find("kernel/") != std::string::npos)
+ {
+ const auto index = prefix.find("kernel/");
+ prefix = prefix.substr(0, index - 1);
+ }
+ else if (prefix.find("/bcqinfo_") != std::string::npos)
+ {
+ const auto index = prefix.find("/bcqinfo_");
+ prefix = prefix.substr(0, index);
+ }
+
+ return prefix;
+}
+
+} // namespace
+
+namespace
+{
+
+class BCQConverter final
+{
+public:
+ void add_BCQ_info_node(luci::CircleConst *node)
+ {
+ const auto node_name = node->name();
+ const auto prefix = node_name_prefix(node_name);
+
+ // If bcqinfo_* nodes are held by Reshape operation,
+ // shape of bcqinfo_* nodes are copied to `shape` input of Reshape operation.
+ // Then the name becomes bcqinfo_*_copy_shape.
+ // We should prevent this node not to added to bcq information.
+ if (node_name.find("_copy_shape") != std::string::npos)
+ return;
+
+ if (node_name.find("bcqinfo_do_w_x") != std::string::npos)
+ _do_w_x[prefix] = node;
+ else if (node_name.find("bcqinfo_alpha") != std::string::npos)
+ _alpha[prefix] = node;
+ else if (node_name.find("bcqinfo_packed_binary_code") != std::string::npos)
+ _packed_binary_code[prefix] = node;
+ else if (node_name.find("bcqinfo_number_of_clusters") != std::string::npos)
+ _number_of_clusters[prefix] = node;
+ else if (node_name.find("bcqinfo_size_of_clusters") != std::string::npos)
+ _size_of_clusters[prefix] = node;
+ else if (node_name.find("bcqinfo_qbits_of_clusters") != std::string::npos)
+ _qbits_of_clusters[prefix] = node;
+ else if (node_name.find("bcqinfo_dequant_weight") != std::string::npos)
+ _dequant_weight[prefix] = node;
+ }
+
+ bool has_BCQ_info(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ bool has_info = true;
+
+ has_info &= (_do_w_x.find(prefix) != _do_w_x.end());
+ has_info &= (_alpha.find(prefix) != _alpha.end());
+ has_info &= (_packed_binary_code.find(prefix) != _packed_binary_code.end());
+ has_info &= (_number_of_clusters.find(prefix) != _number_of_clusters.end());
+ has_info &= (_size_of_clusters.find(prefix) != _size_of_clusters.end());
+ has_info &= (_qbits_of_clusters.find(prefix) != _qbits_of_clusters.end());
+ // bcqinfo_dequant_weight is just for validation, so not always exists.
+
+ return has_info;
+ }
+
+ bool do_w_x(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+
+ if (_do_w_x[prefix]->dtype() == loco::DataType::S32)
+ return _do_w_x[prefix]->at<loco::DataType::S32>(0) == 1;
+ else if (_do_w_x[prefix]->dtype() == loco::DataType::BOOL)
+ return _do_w_x[prefix]->at<loco::DataType::BOOL>(0);
+ else
+ throw std::runtime_error("do_w_x should be int or bool");
+ }
+
+ luci::CircleConst *get_alpha(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ return _alpha[prefix];
+ }
+
+ luci::CircleConst *get_packed_binary_code(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ return _packed_binary_code[prefix];
+ }
+
+ luci::CircleConst *get_number_of_clusters(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ return _number_of_clusters[prefix];
+ }
+
+ luci::CircleConst *get_size_of_clusters(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ return _size_of_clusters[prefix];
+ }
+
+ luci::CircleConst *get_qbits_of_clusters(luci::CircleConst *node)
+ {
+ const auto prefix = node_name_prefix(node->name());
+ return _qbits_of_clusters[prefix];
+ }
+
+ luci::CircleConst *packed_clusters(luci::CircleConst *node)
+ {
+ auto graph = node->graph();
+ auto qbits_of_clusters = get_qbits_of_clusters(node);
+ auto size_of_clusters = get_size_of_clusters(node);
+ const auto number_of_clusters = get_number_of_clusters(node)->at<loco::DataType::S32>(0);
+
+ auto packed_clusters = graph->nodes()->create<luci::CircleConst>();
+ packed_clusters->dtype(loco::DataType::S32);
+ packed_clusters->size<loco::DataType::S32>(number_of_clusters * 2);
+ packed_clusters->rank(2);
+ packed_clusters->dim(0) = number_of_clusters;
+ packed_clusters->dim(1) = 2;
+ packed_clusters->shape_status(luci::ShapeStatus::VALID);
+
+ for (int i = 0; i < number_of_clusters; ++i)
+ {
+ packed_clusters->at<loco::DataType::S32>(i * 2) =
+ qbits_of_clusters->at<loco::DataType::S32>(i);
+ packed_clusters->at<loco::DataType::S32>(i * 2 + 1) =
+ size_of_clusters->at<loco::DataType::S32>(i);
+ }
+
+ return packed_clusters;
+ }
+
+ /**
+ * @brief Exclude BCQ information nodes which are used for fusing BCQ operations
+ * from graph output by using CircleOutputExclude
+ */
+ void clear_BCQ_nodes()
+ {
+ auto createNoOp = [](luci::CircleNode *circle_node) {
+ auto graph = circle_node->graph();
+ auto noOp = graph->nodes()->create<luci::CircleOutputExclude>();
+
+ if (circle_node->shape_status() == luci::ShapeStatus::VALID)
+ {
+ noOp->dtype(circle_node->dtype());
+ noOp->rank(circle_node->rank());
+ for (uint32_t i = 0; i < circle_node->rank(); ++i)
+ noOp->dim(i) = circle_node->dim(i);
+ }
+ else
+ {
+ // For type inference
+ noOp->dtype(loco::DataType::FLOAT32);
+ }
+
+ return noOp;
+ };
+
+ auto clear_nodes = [createNoOp](std::map<std::string, luci::CircleConst *> &nodes) {
+ for (auto &n : nodes)
+ {
+ auto node = n.second;
+
+ for (auto s : loco::succs(node))
+ {
+ if (auto outnode = dynamic_cast<luci::CircleOutput *>(s))
+ {
+ outnode->from(createNoOp(node));
+ }
+ else if (auto reshape_node = dynamic_cast<luci::CircleReshape *>(s))
+ {
+ for (auto o : loco::succs(reshape_node))
+ {
+ auto circle_output = loco::must_cast<luci::CircleOutput *>(o);
+ circle_output->from(createNoOp(reshape_node));
+ }
+ }
+ }
+ }
+ };
+
+ clear_nodes(_do_w_x);
+ clear_nodes(_alpha);
+ clear_nodes(_packed_binary_code);
+ clear_nodes(_number_of_clusters);
+ clear_nodes(_size_of_clusters);
+ clear_nodes(_qbits_of_clusters);
+ clear_nodes(_dequant_weight);
+ }
+
+private:
+ std::map<std::string, luci::CircleConst *> _do_w_x;
+ std::map<std::string, luci::CircleConst *> _alpha;
+ std::map<std::string, luci::CircleConst *> _packed_binary_code;
+ std::map<std::string, luci::CircleConst *> _number_of_clusters;
+ std::map<std::string, luci::CircleConst *> _size_of_clusters;
+ std::map<std::string, luci::CircleConst *> _qbits_of_clusters;
+ std::map<std::string, luci::CircleConst *> _dequant_weight;
+};
+
+} // namespace
+
+namespace luci
+{
+
+bool FuseBCQPass::run(loco::Graph *g)
+{
+ BCQConverter converter;
+
+ bool changed = false;
+
+ for (auto node : loco::all_nodes(g))
+ {
+ if (auto circle_const = dynamic_cast<luci::CircleConst *>(node))
+ {
+ converter.add_BCQ_info_node(circle_const);
+ }
+ }
+
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ if (auto gather = dynamic_cast<luci::CircleGather *>(node))
+ {
+ auto params = dynamic_cast<luci::CircleConst *>(gather->params());
+ if (params != nullptr && converter.has_BCQ_info(params))
+ {
+ auto bcq_gather = g->nodes()->create<luci::CircleBCQGather>();
+
+ bcq_gather->input_scales(converter.get_alpha(params));
+ bcq_gather->input_binary(converter.get_packed_binary_code(params));
+ bcq_gather->indices(gather->indices());
+ bcq_gather->input_clusters(converter.packed_clusters(params));
+
+ const auto binary_hidden_size =
+ loco::must_cast<luci::CircleConst *>(bcq_gather->input_binary())->dim(1).value() * 32;
+ bcq_gather->input_hidden_size(binary_hidden_size);
+
+ if (converter.do_w_x(params))
+ {
+ bcq_gather->axis(gather->axis());
+ }
+ else
+ {
+ const auto axis_transpose = (gather->axis() == 0) ? 1 : 0;
+ bcq_gather->axis(axis_transpose);
+ }
+
+ loco::replace(gather).with(bcq_gather);
+
+ changed = true;
+ }
+ }
+ else if (auto fully_connected = dynamic_cast<luci::CircleFullyConnected *>(node))
+ {
+ auto weights = dynamic_cast<luci::CircleConst *>(fully_connected->weights());
+ if (weights != nullptr && converter.has_BCQ_info(weights))
+ {
+ auto bcq_fc = g->nodes()->create<luci::CircleBCQFullyConnected>();
+
+ bcq_fc->weights_scales(converter.get_alpha(weights));
+ bcq_fc->weights_binary(converter.get_packed_binary_code(weights));
+ bcq_fc->bias(fully_connected->bias());
+ bcq_fc->weights_clusters(converter.packed_clusters(weights));
+ bcq_fc->fusedActivationFunction(fully_connected->fusedActivationFunction());
+
+ loco::Node *bcq_input = fully_connected->input();
+ int32_t batch_rank = 0;
+
+ // If input of BCQFullyConnected has more than rank 2, we should reshape it as rank 2
+ const auto original_input = loco::must_cast<luci::CircleNode *>(fully_connected->input());
+ if (original_input->shape_status() == ShapeStatus::VALID && original_input->rank() > 2)
+ {
+ auto new_shape = g->nodes()->create<luci::CircleConst>();
+ new_shape->dtype(loco::DataType::S32);
+ new_shape->size<loco::DataType::S32>(2);
+ new_shape->rank(1);
+ new_shape->dim(0) = 2;
+
+ auto batch_size = 1;
+ for (uint32_t i = 0; i < original_input->rank() - 1; ++i)
+ batch_size *= original_input->dim(i).value();
+
+ new_shape->at<loco::DataType::S32>(0) = batch_size;
+ new_shape->at<loco::DataType::S32>(1) =
+ original_input->dim(original_input->rank() - 1).value();
+ new_shape->shape_status(ShapeStatus::VALID);
+
+ auto reshape = g->nodes()->create<luci::CircleReshape>();
+ reshape->tensor(original_input);
+ reshape->shape(new_shape);
+
+ bcq_input = reshape;
+ batch_rank = original_input->rank() - 2;
+ }
+
+ // If x_w formation, we should insert Transpose in front and back of BCQFullyConnected
+ if (converter.do_w_x(weights))
+ {
+ const auto binary_hidden_size =
+ loco::must_cast<luci::CircleNode *>(fully_connected->input())
+ ->dim(batch_rank)
+ .value();
+ bcq_fc->weights_hidden_size(binary_hidden_size);
+ bcq_fc->input(bcq_input);
+ loco::replace(fully_connected).with(bcq_fc);
+ }
+ else
+ {
+ const auto binary_hidden_size =
+ loco::must_cast<luci::CircleNode *>(fully_connected->input())
+ ->dim(1 + batch_rank)
+ .value();
+ bcq_fc->weights_hidden_size(binary_hidden_size);
+
+ auto perm = g->nodes()->create<luci::CircleConst>();
+ perm->dtype(loco::DataType::S32);
+ perm->size<loco::DataType::S32>(2);
+ perm->rank(1);
+ perm->dim(0) = 2;
+ perm->at<loco::DataType::S32>(0) = 1;
+ perm->at<loco::DataType::S32>(1) = 0;
+ perm->shape_status(ShapeStatus::VALID);
+
+ auto input_transpose = g->nodes()->create<luci::CircleTranspose>();
+ input_transpose->a(bcq_input);
+ input_transpose->perm(perm);
+
+ bcq_fc->input(input_transpose);
+
+ auto output_transpose = g->nodes()->create<luci::CircleTranspose>();
+ output_transpose->a(bcq_fc);
+ output_transpose->perm(perm);
+
+ loco::replace(fully_connected).with(output_transpose);
+ }
+
+ changed = true;
+ }
+ }
+ }
+
+ if (changed)
+ converter.clear_BCQ_nodes();
+
+ return changed;
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/FuseInstanceNormPass.cpp b/compiler/luci/pass/src/FuseInstanceNormPass.cpp
index 180b5bbef..ad8765c41 100644
--- a/compiler/luci/pass/src/FuseInstanceNormPass.cpp
+++ b/compiler/luci/pass/src/FuseInstanceNormPass.cpp
@@ -15,6 +15,7 @@
*/
#include "luci/Pass/FuseInstanceNormPass.h"
+#include "FuseInstanceNormPassInternal.h"
#include <luci/IR/CircleNodes.h>
@@ -114,8 +115,6 @@ bool NodeFiller<ARG_TYPE_1, ARG_TYPE_2>::with_commutative_args_of(const COMM_NOD
} // namespace
// Helper to check detail
-namespace
-{
/// @return true When node has shape of '1 x .. x 1 x depth'
bool is_1D_with_dummy_dim(luci::CircleConst *node, uint32_t depth)
@@ -130,7 +129,23 @@ bool is_1D_with_dummy_dim(luci::CircleConst *node, uint32_t depth)
return node->dim(axis).value() == depth;
}
-bool is_instance_mean(luci::CircleMean *mean)
+/// @return true if node shape consists of ones, except the one before the last dim: 1,...1,depth,1
+bool is_quasi_1D_with_dummy_dim(luci::CircleConst *node, uint32_t depth)
+{
+ auto rank = node->rank();
+ // minimal accepted shape is [1 x depth x 1]
+ if (rank < 3)
+ return false;
+ const auto depth_axis = rank - 2;
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ if (axis != depth_axis && node->dim(axis).value() != 1)
+ return false;
+ }
+ return node->dim(depth_axis).value() == depth;
+}
+
+bool is_instance_mean_v0(luci::CircleMean *mean)
{
//
// CHECK 1) input is rank 4
@@ -175,7 +190,53 @@ bool is_instance_mean(luci::CircleMean *mean)
return mean->keep_dims();
}
-} // namespace
+bool is_instance_mean_v1(luci::CircleMean *mean)
+{
+ //
+ // CHECK 1) input is rank 5 (NHWCX)
+ //
+ auto input = mean->input();
+ if (not loco::shape_known(input))
+ return false;
+ auto input_shape = loco::shape_get(input).as<loco::TensorShape>();
+ if (input_shape.rank() != 5)
+ return false;
+
+ //
+ // CHECK 2) 'reduction indices' is CircleConst of value [1,2,4], that is HWX of NHWCX input shape
+ //
+ // TODO Support equivalent case, like [-3,-2]
+ // TODO Support non-Const case?
+ // TODO What if input is NCHW format in Circle?
+ auto red_indices = dynamic_cast<luci::CircleConst *>(mean->reduction_indices());
+ if (not red_indices)
+ return false;
+ if (red_indices->rank() != 1)
+ return false;
+ std::set<int32_t> red_indices_set;
+
+ // TODO Currently only support S32, support other types
+ if (red_indices->dtype() != loco::DataType::S32)
+ return false;
+ for (uint32_t i = 0; i < red_indices->dim(0).value(); ++i)
+ red_indices_set.insert(red_indices->at<loco::DataType::S32>(i));
+
+ if (red_indices_set.size() != 3)
+ return false;
+ if (red_indices_set.find(1) == red_indices_set.end())
+ return false;
+ if (red_indices_set.find(2) == red_indices_set.end())
+ return false;
+ if (red_indices_set.find(4) == red_indices_set.end())
+ return false;
+
+ //
+ // CHECK 3) keep_dims == true (?)
+ //
+ // We only have case of 'keep_dims == true' so far, but it might be okay with 'keep_dims == false'
+ // TODO Check this fact, and if true, return true regardless of keep_dims
+ return mean->keep_dims();
+}
// Helper to fuse Instance Norm
namespace
@@ -227,14 +288,61 @@ namespace
* |
* V
* [Out]
+ *-------------------------------------------------------------------
+ * [In]
+ * |
+ * V
+ * ifm
+ * |
+ * V
+ * +---------reshape_of_ifm ----+ (reduction indicies)
+ * | | | |
+ * | | V V
+ * | | mean_of_reshape -------------+
+ * | V | |
+ * | sqdiff <--+ (reduction indicies) |
+ * | | | |
+ * | V | |
+ * | mean_as_variance <---+ const_as_epsilon |
+ * | | | |
+ * | V | |
+ * | add_as_variance <--------+ |
+ * | | |
+ * | V |
+ * | rsqrt const_as_gamma |
+ * | | | |
+ * | V | |
+ * | mul_gamma <--+ |
+ * | | | |
+ * V V V |
+ * mul_as_scaled_reshape mul_as_scaled_mean <-----------+
+ * | |
+ * | const_as_beta |
+ * | | V
+ * | +------> sub
+ * V |
+ * add_as_terminal <----------+
+ * |
+ * V
+ * reshape_as_terminal
+ * |
+ * V
+ * [Out]
*/
class InstanceNormPattern final
{
public:
- InstanceNormPattern(luci::CircleAdd *candidate)
+ enum PatternVersion
+ {
+ Version_0,
+ Version_1
+ };
+
+ InstanceNormPattern(luci::CircleAdd *candidate, PatternVersion pv)
{
assert(candidate);
add_as_terminal = candidate;
+ _pv = pv;
}
public:
@@ -244,7 +352,9 @@ public:
public:
// Context
loco::Node *ifm = nullptr;
+ luci::CircleReshape *reshape_of_ifm = nullptr;
luci::CircleMean *mean_of_ifm = nullptr;
+ luci::CircleMean *mean_of_reshape = nullptr;
luci::CircleSquaredDifference *sqdiff = nullptr;
luci::CircleMean *mean_as_variance = nullptr;
luci::CircleConst *const_as_epsilon = nullptr;
@@ -254,12 +364,14 @@ public:
luci::CircleMul *mul_gamma = nullptr;
luci::CircleMul *mul_as_scaled_ifm = nullptr;
luci::CircleMul *mul_as_scaled_mean = nullptr;
+ luci::CircleMul *mul_as_scaled_reshape = nullptr;
luci::CircleConst *const_as_beta = nullptr;
luci::CircleSub *sub = nullptr;
luci::CircleAdd *add_as_terminal = nullptr;
private:
bool _matched = false;
+ PatternVersion _pv;
};
bool InstanceNormPattern::matched()
@@ -273,8 +385,18 @@ bool InstanceNormPattern::matched()
// Check order is DFS
- CHECK_OR_FALSE(fill(&mul_as_scaled_ifm, &sub).with_commutative_args_of(add_as_terminal));
- CHECK_OR_FALSE(fill(&ifm, &mul_gamma).with_commutative_args_of(mul_as_scaled_ifm));
+ if (_pv == PatternVersion::Version_0)
+ {
+ CHECK_OR_FALSE(fill(&mul_as_scaled_ifm, &sub).with_commutative_args_of(add_as_terminal));
+ CHECK_OR_FALSE(fill(&ifm, &mul_gamma).with_commutative_args_of(mul_as_scaled_ifm));
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ CHECK_OR_FALSE(fill(&mul_as_scaled_reshape, &sub).with_commutative_args_of(add_as_terminal));
+ CHECK_OR_FALSE(
+ fill(&reshape_of_ifm, &mul_gamma).with_commutative_args_of(mul_as_scaled_reshape));
+ ifm = reshape_of_ifm->tensor();
+ }
CHECK_OR_FALSE(loco::shape_known(ifm));
auto ifm_shape = loco::shape_get(ifm);
@@ -284,7 +406,15 @@ bool InstanceNormPattern::matched()
uint32_t ifm_channel_depth = ifm_tensor_shape.dim(3).value();
CHECK_OR_FALSE(fill(&rsqrt, &const_as_gamma).with_commutative_args_of(mul_gamma));
- CHECK_OR_FALSE(is_1D_with_dummy_dim(const_as_gamma, ifm_channel_depth));
+
+ if (_pv == PatternVersion::Version_0)
+ {
+ CHECK_OR_FALSE(is_1D_with_dummy_dim(const_as_gamma, ifm_channel_depth));
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ CHECK_OR_FALSE(is_quasi_1D_with_dummy_dim(const_as_gamma, ifm_channel_depth));
+ }
add_as_variance = dynamic_cast<luci::CircleAdd *>(rsqrt->x());
CHECK_OR_FALSE(add_as_variance);
@@ -296,29 +426,69 @@ bool InstanceNormPattern::matched()
// TODO Support regarding broadcast
CHECK_OR_FALSE(const_as_epsilon->size<loco::DataType::FLOAT32>() == 1);
- CHECK_OR_FALSE(is_instance_mean(mean_as_variance));
+ if (_pv == PatternVersion::Version_0)
+ {
+ CHECK_OR_FALSE(is_instance_mean_v0(mean_as_variance));
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ CHECK_OR_FALSE(is_instance_mean_v1(mean_as_variance));
+ }
+
sqdiff = dynamic_cast<luci::CircleSquaredDifference *>(mean_as_variance->input());
CHECK_OR_FALSE(sqdiff);
- loco::Node *ifm_should_be = nullptr;
- CHECK_OR_FALSE(fill(&ifm_should_be, &mean_of_ifm).with_commutative_args_of(sqdiff));
- CHECK_OR_FALSE(ifm == ifm_should_be);
- CHECK_OR_FALSE(is_instance_mean(mean_of_ifm));
- CHECK_OR_FALSE(ifm == mean_of_ifm->input());
+ if (_pv == PatternVersion::Version_0)
+ {
+ loco::Node *ifm_should_be = nullptr;
+ CHECK_OR_FALSE(fill(&ifm_should_be, &mean_of_ifm).with_commutative_args_of(sqdiff));
+ CHECK_OR_FALSE(ifm == ifm_should_be);
+ CHECK_OR_FALSE(is_instance_mean_v0(mean_of_ifm));
+ CHECK_OR_FALSE(ifm == mean_of_ifm->input());
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ loco::Node *reshape_should_be = nullptr;
+ CHECK_OR_FALSE(fill(&reshape_should_be, &mean_of_reshape).with_commutative_args_of(sqdiff));
+ CHECK_OR_FALSE(reshape_of_ifm == reshape_should_be);
+ CHECK_OR_FALSE(is_instance_mean_v1(mean_of_reshape));
+ CHECK_OR_FALSE(reshape_of_ifm == mean_of_reshape->input());
+ }
const_as_beta = dynamic_cast<luci::CircleConst *>(sub->x());
CHECK_OR_FALSE(const_as_beta);
- CHECK_OR_FALSE(is_1D_with_dummy_dim(const_as_beta, ifm_channel_depth));
+
+ if (_pv == PatternVersion::Version_0)
+ {
+ CHECK_OR_FALSE(is_1D_with_dummy_dim(const_as_beta, ifm_channel_depth));
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ CHECK_OR_FALSE(is_quasi_1D_with_dummy_dim(const_as_beta, ifm_channel_depth));
+ }
mul_as_scaled_mean = dynamic_cast<luci::CircleMul *>(sub->y());
CHECK_OR_FALSE(mul_as_scaled_mean);
luci::CircleMul *mul_gamma_should_be = nullptr;
luci::CircleMean *mean_of_ifm_should_be = nullptr;
- CHECK_OR_FALSE(fill(&mul_gamma_should_be, &mean_of_ifm_should_be)
- .with_commutative_args_of(mul_as_scaled_mean));
- CHECK_OR_FALSE(mul_gamma == mul_gamma_should_be);
- CHECK_OR_FALSE(mean_of_ifm == mean_of_ifm_should_be);
+ luci::CircleMean *mean_of_reshape_should_be = nullptr;
+
+ if (_pv == PatternVersion::Version_0)
+ {
+ CHECK_OR_FALSE(fill(&mul_gamma_should_be, &mean_of_ifm_should_be)
+ .with_commutative_args_of(mul_as_scaled_mean));
+ CHECK_OR_FALSE(mul_gamma == mul_gamma_should_be);
+ CHECK_OR_FALSE(mean_of_ifm == mean_of_ifm_should_be);
+ }
+ if (_pv == PatternVersion::Version_1)
+ {
+ CHECK_OR_FALSE(fill(&mul_gamma_should_be, &mean_of_reshape_should_be)
+ .with_commutative_args_of(mul_as_scaled_mean));
+ CHECK_OR_FALSE(mul_gamma == mul_gamma_should_be);
+ CHECK_OR_FALSE(mean_of_reshape == mean_of_reshape_should_be);
+ }
+
#undef CHECK_OR_FALSE
_matched = true;
return true;
@@ -381,13 +551,28 @@ namespace luci
bool FuseInstanceNormPass::run(loco::Graph *g)
{
bool changed = false;
+ luci::CircleAdd *add;
+ InstanceNormPattern::PatternVersion pv;
+
for (auto node : loco::active_nodes(loco::output_nodes(g)))
{
- auto add = dynamic_cast<luci::CircleAdd *>(node);
- if (not add)
- continue;
+ auto reshape = dynamic_cast<luci::CircleReshape *>(node);
+ if (not reshape)
+ {
+ add = dynamic_cast<luci::CircleAdd *>(node);
+ if (not add)
+ continue;
+ pv = InstanceNormPattern::PatternVersion::Version_0;
+ }
+ else
+ {
+ add = dynamic_cast<luci::CircleAdd *>(reshape->tensor());
+ if (not add)
+ continue;
+ pv = InstanceNormPattern::PatternVersion::Version_1;
+ }
- InstanceNormPattern pattern(add);
+ InstanceNormPattern pattern(add, pv);
if (not pattern.matched())
continue;
diff --git a/compiler/luci/pass/src/FuseInstanceNormPass.test.cpp b/compiler/luci/pass/src/FuseInstanceNormPass.test.cpp
new file mode 100644
index 000000000..3037f3def
--- /dev/null
+++ b/compiler/luci/pass/src/FuseInstanceNormPass.test.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FuseInstanceNormPassInternal.h"
+
+#include <vector>
+
+#include <gtest/gtest.h>
+
+namespace
+{
+
+void setShape(luci::CircleNode &node, const std::vector<int> &v)
+{
+ node.rank(v.size());
+ for (int i = 0; i < v.size(); ++i)
+ {
+ node.dim(i) = v[i];
+ }
+}
+
+} // namespace
+
+TEST(FuseInstanceNormPass, is_quasi_1D_with_dummy_dim)
+{
+ luci::CircleConst const_node;
+
+ setShape(const_node, {});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {1});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {8});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {1, 2, 1, 8, 1});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {8, 3});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {8, 1});
+ EXPECT_FALSE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {1, 8, 1});
+ EXPECT_TRUE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+
+ setShape(const_node, {1, 1, 1, 8, 1});
+ EXPECT_TRUE(is_quasi_1D_with_dummy_dim(&const_node, 8));
+}
diff --git a/compiler/luci/pass/src/FuseInstanceNormPassInternal.h b/compiler/luci/pass/src/FuseInstanceNormPassInternal.h
new file mode 100644
index 000000000..32b638ba5
--- /dev/null
+++ b/compiler/luci/pass/src/FuseInstanceNormPassInternal.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_CIRCLE_FUSE_INSTANCE_NORM_PASS_INTERNAL_H__
+#define __LUCI_CIRCLE_FUSE_INSTANCE_NORM_PASS_INTERNAL_H__
+
+#include <luci/IR/CircleNodes.h>
+
+/// @return true When node has shape of '1 x .. x 1 x depth'
+bool is_1D_with_dummy_dim(luci::CircleConst *node, uint32_t depth);
+
+/// @return true When node has shape of '1 x .. x depth x 1'
+bool is_quasi_1D_with_dummy_dim(luci::CircleConst *node, uint32_t depth);
+
+#endif // __LUCI_CIRCLE_FUSE_INSTANCE_NORM_PASS_INTERNAL_H__
diff --git a/compiler/luci/pass/src/QuantizationUtils.cpp b/compiler/luci/pass/src/QuantizationUtils.cpp
new file mode 100644
index 000000000..6726ce746
--- /dev/null
+++ b/compiler/luci/pass/src/QuantizationUtils.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QuantizationUtils.h"
+
+#include <luci/Log.h>
+
+#include <iostream>
+#include <cmath>
+
+namespace luci
+{
+
+void compute_sym_scale_zp(float min, float max, float &scaling_factor, int64_t &zp,
+ float &nudged_min, float &nudged_max)
+{
+ assert(min != max);
+
+ const int32_t kMaxScale = std::numeric_limits<int16_t>::max();
+ const int32_t kMinScale = -kMaxScale;
+ const double qmin_double = kMinScale;
+ const double qmax_double = kMaxScale;
+ const double rmin = std::fmin(0, min);
+ const double rmax = std::fmax(0, max);
+ double scale_factor_from_min_side{0};
+ double scale_factor_from_max_side{0};
+
+ if ((qmin_double * rmin) > 0)
+ scale_factor_from_min_side = rmin / qmin_double;
+
+ if ((qmax_double * rmax) > 0)
+ scale_factor_from_max_side = rmax / qmax_double;
+
+ scaling_factor = scale_factor_from_min_side > scale_factor_from_max_side
+ ? scale_factor_from_min_side
+ : scale_factor_from_max_side;
+ zp = 0;
+ nudged_min = static_cast<float>(qmin_double * scaling_factor);
+ nudged_max = static_cast<float>(qmax_double * scaling_factor);
+}
+
+void compute_asym_scale_zp(float min, float max, float &scaling_factor, int64_t &zp,
+ float &nudged_min, float &nudged_max)
+{
+ LOGGER(l);
+
+ assert(min <= max);
+ const int32_t kMinScale = 0;
+ const int32_t kMaxScale = 255;
+ const double qmin_double = kMinScale;
+ const double qmax_double = kMaxScale;
+ const double rmin = std::fmin(0, min);
+ const double rmax = std::fmax(0, max);
+
+ double scale = (rmax - rmin) / (qmax_double - qmin_double);
+ double zero_point_double = 0;
+ uint8_t nudged_zero_point = 0;
+ if (scale == 0)
+ {
+ WARN(l) << "The minimum and maximum values are the same." << std::endl;
+ if (min >= 0 && max >= 0)
+ zero_point_double = kMinScale;
+ else
+ zero_point_double = kMaxScale;
+ }
+ else
+ zero_point_double = qmin_double - rmin / scale;
+ if (zero_point_double <= qmin_double)
+ {
+ assert(min >= 0 && max >= 0);
+ nudged_zero_point = kMinScale;
+ scale = max / (qmax_double - qmin_double);
+ if (min > 0 && max > 0)
+ WARN(l) << "The minimum and maximum values are all positive." << std::endl;
+ }
+ else if (zero_point_double >= qmax_double)
+ {
+ assert(min < 0 && max < 0);
+ nudged_zero_point = kMaxScale;
+ scale = -min / (qmax_double - qmin_double);
+ WARN(l) << "The minimum and maximum values are all negative." << std::endl;
+ }
+ else
+ {
+ assert(min < 0 && max >= 0);
+ nudged_zero_point = static_cast<uint8_t>(std::round(zero_point_double));
+ }
+
+ nudged_min = static_cast<float>((qmin_double - nudged_zero_point) * scale);
+ nudged_max = static_cast<float>((qmax_double - nudged_zero_point) * scale);
+
+ scaling_factor = scale;
+ zp = nudged_zero_point;
+}
+
+bool get_channel_dim_index(CircleConst *node, loco::TensorShape &dimension, int &channel_dim_index)
+{
+ auto succs = loco::succs(node);
+ if (succs.size() != 1) // assume weights is used by only one node
+ return false;
+
+ for (auto out : succs)
+ {
+ auto conv = dynamic_cast<CircleConv2D *>(out);
+ auto dw_conv = dynamic_cast<CircleDepthwiseConv2D *>(out);
+ auto tw_conv = dynamic_cast<CircleTransposeConv *>(out);
+ auto fc = dynamic_cast<CircleFullyConnected *>(out);
+
+ // Refer to https://github.com/Samsung/ONE/pull/2448.
+ if ((conv != nullptr && conv->filter() == node) ||
+ (tw_conv != nullptr && tw_conv->filter() == node)) // OHWI
+ {
+ assert(node->rank() == 4);
+ dimension.dim(0).set(node->dim(0).value());
+ dimension.dim(1).set(node->dim(1).value());
+ dimension.dim(2).set(node->dim(2).value());
+ dimension.dim(3).set(node->dim(3).value());
+ channel_dim_index = 0; // Set channel_dim_index based on "O"
+ return true;
+ }
+ else if (dw_conv != nullptr && dw_conv->filter() == node) // IHWC
+ {
+ assert(node->rank() == 4);
+ dimension.dim(0).set(node->dim(0).value());
+ dimension.dim(1).set(node->dim(1).value());
+ dimension.dim(2).set(node->dim(2).value());
+ dimension.dim(3).set(node->dim(3).value());
+ channel_dim_index = 3; // Set channel_dim_index based on "C"
+ return true;
+ }
+ else if (fc != nullptr && fc->weights() == node) // OI
+ {
+ assert(node->rank() == 2);
+ dimension.dim(0).set(node->dim(0).value());
+ dimension.dim(1).set(1); // Set FC layer like CONV
+ dimension.dim(2).set(1);
+ dimension.dim(3).set(node->dim(1).value());
+ channel_dim_index = 0; // Set channel_dim_index based on "O"
+ return true;
+ }
+ else
+ {
+ // node does not support channle-wise quantization
+ assert(false);
+ }
+ }
+
+ return false;
+}
+
+uint32_t cal_offset(loco::TensorShape &dimension, uint32_t *indices)
+{
+ return indices[0] * dimension.dim(1).value() * dimension.dim(2).value() *
+ dimension.dim(3).value() +
+ indices[1] * dimension.dim(2).value() * dimension.dim(3).value() +
+ indices[2] * dimension.dim(3).value() + indices[3];
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/QuantizationUtils.h b/compiler/luci/pass/src/QuantizationUtils.h
new file mode 100644
index 000000000..ec0e86df8
--- /dev/null
+++ b/compiler/luci/pass/src/QuantizationUtils.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LUCI_QUANTIZATION_UTILS_H__
+#define __LUCI_QUANTIZATION_UTILS_H__
+
+#include <luci/IR/CircleNodes.h>
+#include <loco/IR/TensorShape.h>
+
+namespace luci
+{
+
+void compute_sym_scale_zp(float min, float max, float &scaling_factor, int64_t &zp,
+ float &nudged_min, float &nudged_max);
+
+void compute_asym_scale_zp(float min, float max, float &scaling_factor, int64_t &zp,
+ float &nudged_min, float &nudged_max);
+
+bool get_channel_dim_index(CircleConst *node, loco::TensorShape &dimension, int &channel_dim_index);
+
+uint32_t cal_offset(loco::TensorShape &dimension, uint32_t *indices);
+
+} // namespace luci
+
+#endif // __LUCI_QUANTIZATION_UTILS_H__
diff --git a/compiler/luci/pass/src/QuantizeDequantizeWeightsPass.cpp b/compiler/luci/pass/src/QuantizeDequantizeWeightsPass.cpp
new file mode 100644
index 000000000..c492234c7
--- /dev/null
+++ b/compiler/luci/pass/src/QuantizeDequantizeWeightsPass.cpp
@@ -0,0 +1,495 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/QuantizeDequantizeWeightsPass.h"
+#include "QuantizationUtils.h"
+
+#include <luci/IR/CircleNodes.h>
+#include <luci/IR/CircleNodeVisitor.h>
+#include <luci/Log.h>
+#include <loco/IR/TensorShape.h>
+
+#include <iostream>
+#include <cmath>
+
+namespace luci
+{
+
+namespace
+{
+
+void cal_minmax_per_channel(CircleConst *node, std::vector<float> &min, std::vector<float> &max)
+{
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+ int size{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+ size = dimension.dim(channel_dim_index).value();
+
+ std::vector<bool> has_min_max_value(size, false);
+ min.resize(size);
+ max.resize(size);
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ auto data = node->at<loco::DataType::FLOAT32>(cal_offset(dimension, indices));
+ if (has_min_max_value[channel_idx])
+ {
+ min[channel_idx] = data < min[channel_idx] ? data : min[channel_idx];
+ max[channel_idx] = data > max[channel_idx] ? data : max[channel_idx];
+ }
+ else
+ {
+ min[channel_idx] = data;
+ max[channel_idx] = data;
+ has_min_max_value[channel_idx] = true;
+ }
+ }
+ }
+ }
+ }
+}
+
+void sym_wquant_per_channel(CircleConst *node, std::vector<float> &min, std::vector<float> &max,
+ std::vector<float> &scaling_factor, std::vector<int64_t> &zp,
+ std::vector<float> &nudged_min, std::vector<float> &nudged_max)
+{
+ assert(node->dtype() == loco::DataType::FLOAT32);
+ const int32_t kMaxScale = std::numeric_limits<int16_t>::max();
+ const int32_t kMinScale = -kMaxScale;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+
+ for (size_t i = 0; i < min.size(); ++i)
+ {
+ compute_sym_scale_zp(min[i], max[i], scaling_factor[i], zp[i], nudged_min[i], nudged_max[i]);
+ }
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ const float scaling_factor_inv = 1.0 / scaling_factor[channel_idx];
+ auto data = node->at<loco::DataType::FLOAT32>(cal_offset(dimension, indices));
+ data = data < nudged_min[channel_idx] ? nudged_min[channel_idx] : data;
+ data = data > nudged_max[channel_idx] ? nudged_max[channel_idx] : data;
+ quantized_values[cal_offset(dimension, indices)] =
+ static_cast<int32_t>(std::round(data * scaling_factor_inv));
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::S16); // change the type of tensor
+ node->size<loco::DataType::S16>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::S16>(i) =
+ std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+void sym_wdequant_per_channel(CircleConst *node, std::vector<float> &scaling_factor)
+{
+ assert(node->dtype() == loco::DataType::S16);
+ uint32_t size = node->size<loco::DataType::S16>();
+ std::vector<float> dequantized_values(size);
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ auto data = node->at<loco::DataType::S16>(cal_offset(dimension, indices));
+ dequantized_values[cal_offset(dimension, indices)] =
+ static_cast<float>(data) * scaling_factor[channel_idx];
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::FLOAT32); // change the type of tensor
+ node->size<loco::DataType::FLOAT32>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::FLOAT32>(i) = dequantized_values[i];
+ }
+}
+
+void asymmetric_wquant_per_channel(CircleConst *node, std::vector<float> &min,
+ std::vector<float> &max, std::vector<float> &scaling_factor,
+ std::vector<int64_t> &zp, std::vector<float> &nudged_min,
+ std::vector<float> &nudged_max)
+{
+ assert(node->dtype() == loco::DataType::FLOAT32);
+
+ const int32_t kMinScale = 0;
+ const int32_t kMaxScale = 255;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+
+ for (size_t i = 0; i < min.size(); ++i)
+ {
+ compute_asym_scale_zp(min[i], max[i], scaling_factor[i], zp[i], nudged_min[i], nudged_max[i]);
+ }
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ const float scaling_factor_inv = 1.0 / scaling_factor[channel_idx];
+ auto data = node->at<loco::DataType::FLOAT32>(cal_offset(dimension, indices));
+ data = data < nudged_min[channel_idx] ? nudged_min[channel_idx] : data;
+ data = data > nudged_max[channel_idx] ? nudged_max[channel_idx] : data;
+ quantized_values[cal_offset(dimension, indices)] = static_cast<int32_t>(
+ std::round((data - nudged_min[channel_idx]) * scaling_factor_inv));
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::U8); // change the type of tensor
+ node->size<loco::DataType::U8>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::U8>(i) = std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+void asymmetric_wdequant_per_channel(CircleConst *node, std::vector<float> &scaling_factor,
+ std::vector<float> &nudged_min)
+{
+ assert(node->dtype() == loco::DataType::U8);
+ uint32_t size = node->size<loco::DataType::U8>();
+ std::vector<float> dequantized_values(size);
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ auto data = node->at<loco::DataType::U8>(cal_offset(dimension, indices));
+ dequantized_values[cal_offset(dimension, indices)] =
+ static_cast<float>(data) * scaling_factor[channel_idx] + nudged_min[channel_idx];
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::FLOAT32); // change the type of tensor
+ node->size<loco::DataType::FLOAT32>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::FLOAT32>(i) = dequantized_values[i];
+ }
+}
+
+void asymmetric_wquant_with_minmax_per_layer(CircleConst *node, float min, float max,
+ float &scaling_factor, int64_t &zp, float &nudged_min,
+ float &nudged_max)
+{
+
+ const int32_t kMinScale = 0;
+ const int32_t kMaxScale = 255;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ compute_asym_scale_zp(min, max, scaling_factor, zp, nudged_min, nudged_max);
+ const float scaling_factor_inv = 1.0 / scaling_factor;
+ std::vector<int32_t> quantized_values(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ // clipping
+ auto data = node->at<loco::DataType::FLOAT32>(i);
+ data = data < nudged_min ? nudged_min : data;
+ data = data > nudged_max ? nudged_max : data;
+ quantized_values[i] =
+ static_cast<int32_t>(std::round((data - nudged_min) * scaling_factor_inv));
+ }
+
+ node->dtype(loco::DataType::U8); // change the type of tensor
+ node->size<loco::DataType::U8>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::U8>(i) = std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+void asymmetric_wdequant_with_minmax_per_layer(CircleConst *node, float scaling_factor,
+ float nudged_min)
+{
+ uint32_t size = node->size<loco::DataType::U8>();
+ std::vector<float> dequantized_values(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ auto data = node->at<loco::DataType::U8>(i);
+ dequantized_values[i] = static_cast<float>(data) * scaling_factor + nudged_min;
+ }
+
+ node->dtype(loco::DataType::FLOAT32); // change the type of tensor
+ node->size<loco::DataType::FLOAT32>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::FLOAT32>(i) = dequantized_values[i];
+ }
+}
+
+bool is_quantized(const CircleNode *node)
+{
+ return node->dtype() == loco::DataType::U8 || // activation, weight
+ node->dtype() == loco::DataType::S16 || // activation, weight
+ node->dtype() == loco::DataType::S32; // bias
+}
+
+// Check if node is weights of conv2d, transepose_conv2d, depthwise_conv2d, or fully_connected layer
+bool is_weights(CircleNode *node)
+{
+ auto circle_const = dynamic_cast<CircleConst *>(node);
+ if (circle_const == nullptr)
+ return false;
+
+ auto succs = loco::succs(node);
+ if (succs.size() != 1) // assume weights is used by only one node
+ return false;
+
+ for (auto out : succs)
+ {
+ auto conv = dynamic_cast<CircleConv2D *>(out);
+ if (conv != nullptr && conv->filter() == circle_const && circle_const->rank() == 4)
+ return true;
+
+ auto dw_conv = dynamic_cast<CircleDepthwiseConv2D *>(out);
+ if (dw_conv != nullptr && dw_conv->filter() == circle_const && circle_const->rank() == 4)
+ return true;
+
+ auto tw_conv = dynamic_cast<CircleTransposeConv *>(out);
+ if (tw_conv != nullptr && tw_conv->filter() == circle_const && circle_const->rank() == 4)
+ return true;
+
+ auto fc = dynamic_cast<CircleFullyConnected *>(out);
+ if (fc != nullptr && fc->weights() == circle_const && circle_const->rank() == 2)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * @brief QuantizeDequantizeWeights quantizes and dequantizes tensors for weights
+ * @details Find min/max values on the fly, quantize the model, and dequantize the model
+ */
+struct QuantizeDequantizeWeights final : public luci::CircleNodeMutableVisitor<bool>
+{
+ QuantizeDequantizeWeights(loco::DataType input, loco::DataType output,
+ QuantizationGranularity granularity)
+ : input_type(input), output_type(output), granularity(granularity)
+ {
+ }
+
+ loco::DataType input_type;
+ loco::DataType output_type;
+ QuantizationGranularity granularity;
+
+ // Quantize and dequantize input tensors of each node
+ bool visit(luci::CircleNode *node)
+ {
+ assert(output_type == loco::DataType::U8 || output_type == loco::DataType::S16);
+ LOGGER(l);
+ INFO(l) << "QuantizeDequantizeWeights visit node: " << node->name() << std::endl;
+ auto arity = node->arity();
+ for (uint32_t i = 0; i < arity; i++)
+ {
+ auto input_node = node->arg(i);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(input_node);
+
+ // Check if this is already quantized
+ if (is_quantized(circle_node))
+ continue;
+
+ if (is_weights(circle_node))
+ {
+ auto circle_const = loco::must_cast<luci::CircleConst *>(circle_node);
+
+ // Find min/max per channel-wise
+ if (granularity == QuantizationGranularity::ChannelWise)
+ {
+ std::vector<float> min;
+ std::vector<float> max;
+
+ cal_minmax_per_channel(circle_const, min, max);
+
+ std::vector<float> nudged_min(min.size());
+ std::vector<float> nudged_max(min.size());
+ std::vector<float> scaling_factor(min.size());
+ std::vector<int64_t> zp(min.size());
+
+ if (output_type == loco::DataType::U8)
+ {
+ asymmetric_wquant_per_channel(circle_const, min, max, scaling_factor, zp, nudged_min,
+ nudged_max);
+ asymmetric_wdequant_per_channel(circle_const, scaling_factor, nudged_min);
+ }
+ else
+ {
+ sym_wquant_per_channel(circle_const, min, max, scaling_factor, zp, nudged_min,
+ nudged_max);
+ sym_wdequant_per_channel(circle_const, scaling_factor);
+ }
+
+ auto quantparam = std::make_unique<CircleQuantParam>();
+ quantparam->min = nudged_min;
+ quantparam->max = nudged_max;
+ quantparam->scale = scaling_factor;
+ quantparam->zerop = zp;
+ circle_node->quantparam(std::move(quantparam));
+ }
+ // Find min/max per layer-wise
+ else
+ {
+ float min = std::numeric_limits<float>::max();
+ float max = std::numeric_limits<float>::lowest();
+ for (uint32_t i = 0; i < circle_const->size<loco::DataType::FLOAT32>(); i++)
+ {
+ auto data = circle_const->at<loco::DataType::FLOAT32>(i);
+ min = data < min ? data : min;
+ max = data > max ? data : max;
+ }
+ float scaling_factor{0};
+ int64_t zp{0};
+ float nudged_min{0};
+ float nudged_max{0};
+
+ asymmetric_wquant_with_minmax_per_layer(circle_const, min, max, scaling_factor, zp,
+ nudged_min, nudged_max);
+ asymmetric_wdequant_with_minmax_per_layer(circle_const, scaling_factor, nudged_min);
+ auto quantparam = std::make_unique<CircleQuantParam>();
+ quantparam->min.push_back(nudged_min);
+ quantparam->max.push_back(nudged_max);
+ quantparam->scale.push_back(scaling_factor);
+ quantparam->zerop.push_back(zp);
+ circle_node->quantparam(std::move(quantparam));
+ }
+ }
+ }
+ return false;
+ }
+};
+
+} // namespace
+
+bool QuantizeDequantizeWeightsPass::run(loco::Graph *g)
+{
+ LOGGER(l);
+ INFO(l) << "QuantizeDequantizeWeightsPass Start" << std::endl;
+
+ // Quantize weights
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ QuantizeDequantizeWeights qw(_input_dtype, _output_dtype, _granularity);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(node);
+ circle_node->accept(&qw);
+ }
+
+ INFO(l) << "QuantizeDequantizeWeightsPass End" << std::endl;
+ return false; // one time run
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp b/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp
new file mode 100644
index 000000000..f8abee751
--- /dev/null
+++ b/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/QuantizeWithMinMaxPass.h"
+#include "QuantizationUtils.h"
+
+#include <luci/IR/CircleNodes.h>
+#include <luci/IR/CircleNodeVisitor.h>
+#include <luci/Log.h>
+
+#include <oops/UserExn.h>
+
+#include <iostream>
+#include <cmath>
+
+namespace luci
+{
+
+namespace
+{
+
+// Check if the node is the bias of Conv2D, DepthwiseConv2D, or FullyConnected layer
+// If true, return <input, weight> pair of the successor node (used to quantize bias)
+// If flase, return <nullptr, nullptr>
+std::pair<loco::Node *, loco::Node *> get_input_weight_of_bias(CircleNode *node)
+{
+ auto circle_const = dynamic_cast<CircleConst *>(node);
+ if (circle_const == nullptr)
+ return std::make_pair(nullptr, nullptr);
+
+ auto succs = loco::succs(node);
+ if (succs.size() != 1) // assume bias is used by only one node
+ return std::make_pair(nullptr, nullptr);
+
+ for (auto out : succs)
+ {
+ auto conv = dynamic_cast<CircleConv2D *>(out);
+ if (conv != nullptr && conv->bias() == circle_const)
+ {
+ assert(conv->input() != nullptr);
+ assert(conv->filter() != nullptr);
+ return std::make_pair(conv->input(), conv->filter());
+ }
+ auto dw_conv = dynamic_cast<CircleDepthwiseConv2D *>(out);
+ if (dw_conv != nullptr && dw_conv->bias() == circle_const)
+ {
+ assert(dw_conv->input() != nullptr);
+ assert(dw_conv->filter() != nullptr);
+ return std::make_pair(dw_conv->input(), dw_conv->filter());
+ }
+ auto fc = dynamic_cast<CircleFullyConnected *>(out);
+ if (fc != nullptr && fc->bias() == circle_const)
+ {
+ assert(fc->input() != nullptr);
+ assert(fc->weights() != nullptr);
+ return std::make_pair(fc->input(), fc->weights());
+ }
+ }
+ return std::make_pair(nullptr, nullptr);
+}
+
+void asym_quant_bias_per_layer(CircleConst *node, float input_scale, float weight_scale,
+ float *scaling_factor, int64_t *zp)
+{
+ float scale = input_scale * weight_scale;
+ const float scaling_factor_inv = (scale == 0) ? 0 : 1.0 / scale;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ quantized_values[i] =
+ static_cast<int32_t>(std::round(node->at<loco::DataType::FLOAT32>(i) * scaling_factor_inv));
+ }
+
+ node->dtype(loco::DataType::S32); // change the type of tensor
+ node->size<loco::DataType::S32>(size); // resize tensor
+ const int32_t kMinScale = std::numeric_limits<int32_t>::lowest();
+ const int32_t kMaxScale = std::numeric_limits<int32_t>::max();
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::S32>(i) =
+ std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+ *scaling_factor = scale;
+ *zp = 0;
+}
+
+void quant_bias_per_channel(CircleConst *node, float input_scale, std::vector<float> &weight_scale,
+ std::vector<float> &scaling_factor, std::vector<int64_t> &zp)
+{
+ float scaling_factor_inv{0};
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ scaling_factor[i] = input_scale * weight_scale[i];
+ scaling_factor_inv = (scaling_factor[i] == 0) ? 0 : 1.0 / scaling_factor[i];
+ quantized_values[i] =
+ static_cast<int32_t>(std::round(node->at<loco::DataType::FLOAT32>(i) * scaling_factor_inv));
+ zp[i] = 0;
+ }
+
+ node->dtype(loco::DataType::S32); // change the type of tensor
+ node->size<loco::DataType::S32>(size); // resize tensor
+ const int32_t kMinScale = std::numeric_limits<int32_t>::lowest();
+ const int32_t kMaxScale = std::numeric_limits<int32_t>::max();
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::S32>(i) =
+ std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+bool has_min_max(const CircleNode *node)
+{
+ return node->quantparam() && !node->quantparam()->min.empty() && !node->quantparam()->max.empty();
+}
+
+bool is_quantized(const CircleNode *node)
+{
+ return node->dtype() == loco::DataType::U8 || // activation, weight
+ node->dtype() == loco::DataType::S32; // bias
+}
+
+void sym_wquant_per_channel(CircleConst *node, std::vector<float> &scaling_factor)
+{
+ assert(node->dtype() == loco::DataType::FLOAT32);
+
+ const int32_t kMaxScale = std::numeric_limits<int16_t>::max();
+ const int32_t kMinScale = -kMaxScale;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ const float scaling_factor_inv = 1.0 / scaling_factor[channel_idx];
+ auto data = node->at<loco::DataType::FLOAT32>(cal_offset(dimension, indices));
+ quantized_values[cal_offset(dimension, indices)] =
+ static_cast<int32_t>(std::round(data * scaling_factor_inv));
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::S16); // change the type of tensor
+ node->size<loco::DataType::S16>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::S16>(i) =
+ std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+void asym_wquant_per_channel(CircleConst *node, std::vector<float> &min,
+ std::vector<float> &scaling_factor)
+{
+ assert(node->dtype() == loco::DataType::FLOAT32);
+
+ const int32_t kMinScale = 0;
+ const int32_t kMaxScale = 255;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+ std::vector<int32_t> quantized_values(size);
+
+ loco::TensorShape dimension;
+ dimension.rank(4);
+ uint32_t indices[4] = {
+ 0,
+ };
+ int channel_dim_index{0};
+
+ if (!get_channel_dim_index(node, dimension, channel_dim_index))
+ {
+ assert(false);
+ return;
+ }
+
+ for (indices[0] = 0; indices[0] < dimension.dim(0).value(); indices[0]++)
+ {
+ for (indices[1] = 0; indices[1] < dimension.dim(1).value(); indices[1]++)
+ {
+ for (indices[2] = 0; indices[2] < dimension.dim(2).value(); indices[2]++)
+ {
+ for (indices[3] = 0; indices[3] < dimension.dim(3).value(); indices[3]++)
+ {
+ int channel_idx = indices[channel_dim_index];
+ const float scaling_factor_inv = 1.0 / scaling_factor[channel_idx];
+ auto data = node->at<loco::DataType::FLOAT32>(cal_offset(dimension, indices));
+ quantized_values[cal_offset(dimension, indices)] =
+ static_cast<int32_t>(std::round((data - min[channel_idx]) * scaling_factor_inv));
+ }
+ }
+ }
+ }
+
+ node->dtype(loco::DataType::U8); // change the type of tensor
+ node->size<loco::DataType::U8>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::U8>(i) = std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+void asym_wquant_per_layer(CircleConst *node, float min, float scaling_factor)
+{
+ const int32_t kMinScale = 0;
+ const int32_t kMaxScale = 255;
+
+ uint32_t size = node->size<loco::DataType::FLOAT32>();
+
+ const float scaling_factor_inv = 1.0 / scaling_factor;
+ std::vector<int32_t> quantized_values(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ auto data = node->at<loco::DataType::FLOAT32>(i);
+ quantized_values[i] = static_cast<int32_t>(std::round((data - min) * scaling_factor_inv));
+ }
+
+ node->dtype(loco::DataType::U8); // change the type of tensor
+ node->size<loco::DataType::U8>(size); // resize tensor
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ node->at<loco::DataType::U8>(i) = std::min(kMaxScale, std::max(kMinScale, quantized_values[i]));
+ }
+}
+
+// Check if node is weights of conv2d, depthwise_conv2d, or fully_connected layer
+bool is_weights(CircleNode *node)
+{
+ auto circle_const = dynamic_cast<CircleConst *>(node);
+ if (circle_const == nullptr)
+ return false;
+
+ auto succs = loco::succs(node);
+ if (succs.size() != 1) // assume weights is used by only one node
+ return false;
+
+ for (auto out : succs)
+ {
+ auto conv = dynamic_cast<CircleConv2D *>(out);
+ if (conv != nullptr && conv->filter() == circle_const)
+ return true;
+
+ auto dw_conv = dynamic_cast<CircleDepthwiseConv2D *>(out);
+ if (dw_conv != nullptr && dw_conv->filter() == circle_const)
+ return true;
+
+ auto fc = dynamic_cast<CircleFullyConnected *>(out);
+ if (fc != nullptr && fc->weights() == circle_const)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * @brief QuantizeActivation quantizes tensors for activations
+ * @details Quantize using recorded min/max values
+ */
+struct QuantizeActivation final : public luci::CircleNodeMutableVisitor<bool>
+{
+ QuantizeActivation(loco::DataType input, loco::DataType output)
+ : input_type(input), output_type(output)
+ {
+ }
+
+ loco::DataType input_type;
+ loco::DataType output_type;
+
+ // Quantize input tensors of each node
+ bool visit(luci::CircleNode *node)
+ {
+ LOGGER(l);
+ INFO(l) << "QuantizeActivation visit node: " << node->name() << std::endl;
+ auto arity = node->arity();
+ for (uint32_t i = 0; i < arity; i++)
+ {
+ auto input_node = node->arg(i);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(input_node);
+
+ // Check if this is already quantized
+ if (is_quantized(circle_node))
+ continue;
+
+ // Check if this is bias (bias is quantized later)
+ auto iw = get_input_weight_of_bias(circle_node);
+ if (iw.first != nullptr && iw.second != nullptr)
+ continue;
+
+ // Check if this is activation
+ // We assume min/max are recorded only for activations
+ if (has_min_max(circle_node) && !is_weights(circle_node))
+ {
+ // Quantize using recorded min/max
+ auto quantparam = circle_node->quantparam();
+ assert(quantparam->min.size() == 1); // only support layer-wise quant
+ assert(quantparam->max.size() == 1); // only support layer-wise quant
+ auto min = quantparam->min[0];
+ auto max = quantparam->max[0];
+
+ float scaling_factor{0};
+ int64_t zp{0};
+ float nudged_min{0};
+ float nudged_max{0};
+
+ if (output_type == loco::DataType::U8)
+ {
+ compute_asym_scale_zp(min, max, scaling_factor, zp, nudged_min, nudged_max);
+ circle_node->dtype(loco::DataType::U8);
+ }
+ else
+ {
+ compute_sym_scale_zp(min, max, scaling_factor, zp, nudged_min, nudged_max);
+ circle_node->dtype(loco::DataType::S16);
+ }
+
+ circle_node->quantparam()->max[0] = nudged_max;
+ circle_node->quantparam()->min[0] = nudged_min;
+ circle_node->quantparam()->scale.push_back(scaling_factor);
+ circle_node->quantparam()->zerop.push_back(zp);
+ }
+ }
+ return false;
+ }
+};
+
+struct QuantizeBias final : public luci::CircleNodeMutableVisitor<bool>
+{
+ QuantizeBias(loco::DataType input, loco::DataType output, QuantizationGranularity gr)
+ : input_type(input), output_type(output), granularity(gr)
+ {
+ }
+
+ loco::DataType input_type;
+ loco::DataType output_type;
+ QuantizationGranularity granularity;
+
+ // Quantize bias node
+ bool visit(luci::CircleNode *node)
+ {
+ // Check if this is already quantized
+ if (is_quantized(node))
+ return false;
+
+ // Check if this is bias
+ auto iw = get_input_weight_of_bias(node);
+ if (iw.first == nullptr || iw.second == nullptr)
+ return false;
+
+ auto input = loco::must_cast<luci::CircleNode *>(iw.first);
+ auto weight = loco::must_cast<luci::CircleNode *>(iw.second);
+
+ if (granularity == QuantizationGranularity::ChannelWise)
+ {
+ assert(input->quantparam()->scale.size() == 1); // input scale's layer-wise
+ auto input_scale = input->quantparam()->scale[0];
+
+ assert(weight->quantparam() != nullptr); // weight scale's channel-wise
+ auto weight_scale = weight->quantparam()->scale;
+
+ auto circle_const = loco::must_cast<luci::CircleConst *>(node);
+
+ uint32_t size = circle_const->size<loco::DataType::FLOAT32>();
+ assert(size == weight_scale.size());
+ std::vector<float> scaling_factor(size);
+ std::vector<int64_t> zp(size);
+
+ quant_bias_per_channel(circle_const, input_scale, weight_scale, scaling_factor, zp);
+
+ auto quantparam = std::make_unique<CircleQuantParam>();
+ quantparam->scale = scaling_factor;
+ quantparam->zerop = zp;
+ assert(circle_const->quantparam() == nullptr); // bias should not be quantized before
+ circle_const->quantparam(std::move(quantparam));
+ }
+ else
+ {
+ assert(input->quantparam()->scale.size() == 1); // Only support per-layer quant
+ auto input_scale = input->quantparam()->scale[0];
+
+ assert(weight->quantparam()->scale.size() == 1); // Only support per-layer quant
+ auto weight_scale = weight->quantparam()->scale[0];
+
+ auto circle_const = loco::must_cast<luci::CircleConst *>(node);
+ float scaling_factor{0};
+ int64_t zp{0};
+ asym_quant_bias_per_layer(circle_const, input_scale, weight_scale, &scaling_factor, &zp);
+ auto quantparam = std::make_unique<CircleQuantParam>();
+ quantparam->scale.push_back(scaling_factor);
+ quantparam->zerop.push_back(zp);
+ assert(circle_const->quantparam() == nullptr); // bias should not be quantized before
+ circle_const->quantparam(std::move(quantparam));
+ }
+ return false;
+ }
+};
+
+/**
+ * @brief QuantizeWeights quantizes tensors for weights
+ * @details Find min/max values on the fly and then quantize
+ */
+struct QuantizeWeights final : public luci::CircleNodeMutableVisitor<bool>
+{
+ QuantizeWeights(loco::DataType input, loco::DataType output, QuantizationGranularity gr)
+ : input_type(input), output_type(output), granularity(gr)
+ {
+ }
+
+ loco::DataType input_type;
+ loco::DataType output_type;
+ QuantizationGranularity granularity;
+
+ // Quantize input tensors of each node
+ bool visit(luci::CircleNode *node)
+ {
+ LOGGER(l);
+ INFO(l) << "QuantizeWeights visit node: " << node->name() << std::endl;
+ auto arity = node->arity();
+ for (uint32_t i = 0; i < arity; i++)
+ {
+ auto input_node = node->arg(i);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(input_node);
+
+ // Check if this is already quantized
+ if (is_quantized(circle_node))
+ continue;
+
+ if (is_weights(circle_node))
+ {
+ auto circle_const = loco::must_cast<luci::CircleConst *>(circle_node);
+
+ // Find min/max per channel-wise
+ if (granularity == QuantizationGranularity::ChannelWise)
+ {
+ auto quantparam = circle_node->quantparam();
+ assert(quantparam != nullptr);
+ auto min = quantparam->min;
+ auto scaling_factor = quantparam->scale;
+
+ if (output_type == loco::DataType::U8)
+ {
+ asym_wquant_per_channel(circle_const, min, scaling_factor);
+ }
+ else
+ {
+ sym_wquant_per_channel(circle_const, scaling_factor);
+ }
+ }
+ // Find min/max per layer-wise
+ else
+ {
+ // Quantize using recorded quantparam
+ auto quantparam = circle_node->quantparam();
+ assert(quantparam != nullptr);
+ assert(quantparam->min.size() == 1); // only support layer-wise quant
+ assert(quantparam->scale.size() == 1); // only support layer-wise quant
+ auto min = quantparam->min[0];
+ auto scaling_factor = quantparam->scale[0];
+ asym_wquant_per_layer(circle_const, min, scaling_factor);
+ }
+ }
+ }
+ return false;
+ }
+};
+
+} // namespace
+
+bool QuantizeWithMinMaxPass::run(loco::Graph *g)
+{
+ LOGGER(l);
+ INFO(l) << "QuantizeWithMinMaxPass Start" << std::endl;
+
+ // Quantize activation
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ QuantizeActivation qa(_input_dtype, _output_dtype);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(node);
+ circle_node->accept(&qa);
+ }
+
+ // Quantize weights
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ QuantizeWeights qw(_input_dtype, _output_dtype, _granularity);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(node);
+ circle_node->accept(&qw);
+ }
+
+ // Quantize bias
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ QuantizeBias qb(_input_dtype, _output_dtype, _granularity);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(node);
+ circle_node->accept(&qb);
+ }
+
+ // Update output dtype
+ auto graph_outputs = g->outputs();
+ for (auto node : loco::output_nodes(g))
+ {
+ auto circle_node = loco::must_cast<luci::CircleOutput *>(node);
+ if (static_cast<luci::CircleNode *>(circle_node->from())->dtype() == _output_dtype)
+ {
+ circle_node->dtype(_output_dtype);
+ auto graph_output = graph_outputs->at(circle_node->index());
+ graph_output->dtype(_output_dtype);
+ }
+ }
+
+ INFO(l) << "QuantizeWithMinMaxPass End" << std::endl;
+ return false; // one time run
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/ResolveCustomOpAddPass.cpp b/compiler/luci/pass/src/ResolveCustomOpAddPass.cpp
new file mode 100644
index 000000000..e52d667d7
--- /dev/null
+++ b/compiler/luci/pass/src/ResolveCustomOpAddPass.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/ResolveCustomOpAddPass.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+#include <luci/IR/CircleNodes.h>
+#include <luci/IR/AttrFusedActFunc.h>
+
+namespace
+{
+
+/// @brief Returns the index of BroadcastTo node among cop's inputs.
+// NOTE This function assumes there is only one BroadcastTo node among its inputs.
+int32_t get_broadcastTo_index_among_inputs_of(luci::CircleCustom *cop)
+{
+ for (uint32_t idx = 0; idx < cop->numInputs(); idx++)
+ {
+ auto input = dynamic_cast<const luci::CircleCustomOut *>(cop->inputs(idx));
+ if (input)
+ {
+ auto broadcastTo = loco::must_cast<luci::CircleCustom *>(input->input());
+ if (broadcastTo->custom_code() == "BroadcastTo")
+ return idx;
+ }
+ }
+
+ return -1;
+}
+
+/** BEFORE
+ * [CircleConst]
+ * |
+ * [CircleNode] [BroadcastTo(CircleCustom)]
+ * \ |
+ * \ [CircleCustomOUt]
+ * \ /
+ * [AddV2(CircleCustom)]
+ * AFTER
+ *
+ * [CircleConst] [CircleNode]
+ * \ /
+ * \ /
+ * [CircleAdd]
+ */
+bool resolve_with_BroadcastTo(luci::CircleCustom *addv2)
+{
+ int32_t broadcastTo_idx = get_broadcastTo_index_among_inputs_of(addv2);
+
+ if (broadcastTo_idx == -1)
+ return false;
+
+ auto input = loco::must_cast<const luci::CircleCustomOut *>(addv2->inputs(broadcastTo_idx));
+ auto broadcastTo = loco::must_cast<luci::CircleCustom *>(input->input());
+
+ auto add = addv2->graph()->nodes()->create<luci::CircleAdd>();
+ add->fusedActivationFunction(luci::FusedActFunc::NONE);
+ add->x(addv2->inputs(1 - broadcastTo_idx));
+ add->y(broadcastTo->inputs(0));
+ auto customOut = loco::succs(addv2);
+ assert(customOut.size() == 1);
+ replace(*customOut.begin()).with(add);
+
+ return true;
+}
+
+bool resolve_custom_op(luci::CircleCustom *addv2)
+{
+ const std::string custom_code = addv2->custom_code();
+ const std::vector<uint8_t> custom_options = addv2->custom_options();
+
+ if (custom_code != "AddV2")
+ return false;
+
+ if (resolve_with_BroadcastTo(addv2))
+ return true;
+
+ auto add = addv2->graph()->nodes()->create<luci::CircleAdd>();
+ add->fusedActivationFunction(luci::FusedActFunc::NONE);
+ add->x(addv2->inputs(0));
+ add->y(addv2->inputs(1));
+ auto customOut = loco::succs(addv2);
+ assert(customOut.size() == 1);
+ replace(*customOut.begin()).with(add);
+
+ return true;
+}
+
+} // namespace
+
+namespace luci
+{
+
+bool ResolveCustomOpAddPass::run(loco::Graph *g)
+{
+ bool changed = false;
+
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ auto cop = dynamic_cast<luci::CircleCustom *>(node);
+ if (not cop)
+ continue;
+
+ changed |= resolve_custom_op(cop);
+ }
+
+ return changed;
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/ResolveCustomOpBatchMatMulPass.cpp b/compiler/luci/pass/src/ResolveCustomOpBatchMatMulPass.cpp
new file mode 100644
index 000000000..145e9cb62
--- /dev/null
+++ b/compiler/luci/pass/src/ResolveCustomOpBatchMatMulPass.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/ResolveCustomOpBatchMatMulPass.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+#include <luci/IR/CircleNodes.h>
+
+namespace
+{
+
+bool resolve_custom_op(luci::CircleCustom *cop)
+{
+ const std::string custom_code = cop->custom_code();
+ const std::vector<uint8_t> custom_options = cop->custom_options();
+
+ if (custom_code == "BatchMatMulV2")
+ {
+ auto batch_matmul = cop->graph()->nodes()->create<luci::CircleBatchMatMul>();
+ // input
+ batch_matmul->x(cop->inputs(0));
+ batch_matmul->y(cop->inputs(1));
+ // TODO find much better way of parsing custom_options
+ // adj
+ auto map = flexbuffers::GetRoot(custom_options).AsMap();
+ batch_matmul->adj_x(map["adj_x"].AsBool());
+ batch_matmul->adj_y(map["adj_y"].AsBool());
+
+ replace(cop).with(batch_matmul);
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+namespace luci
+{
+
+bool ResolveCustomOpBatchMatMulPass::run(loco::Graph *g)
+{
+ bool changed = false;
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ auto cop = dynamic_cast<luci::CircleCustom *>(node);
+ if (not cop)
+ continue;
+
+ changed |= resolve_custom_op(cop);
+ }
+
+ return changed;
+}
+
+} // namespace luci
diff --git a/compiler/luci/pass/src/ResolveCustomOpMatMulPass.cpp b/compiler/luci/pass/src/ResolveCustomOpMatMulPass.cpp
new file mode 100644
index 000000000..547fd22fc
--- /dev/null
+++ b/compiler/luci/pass/src/ResolveCustomOpMatMulPass.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "luci/Pass/ResolveCustomOpMatMulPass.h"
+
+#include "flatbuffers/flexbuffers.h"
+#include <loco/IR/DataTypeTraits.h>
+
+#include <luci/IR/CircleNodes.h>
+
+#include <loco.h>
+#include <oops/InternalExn.h>
+#include <loco/Service/ShapeInference.h>
+#include <loco/Service/TypeInference.h>
+
+namespace
+{
+
+template <typename T>
+luci::CircleConst *create_const_node(loco::Graph *g, const loco::DataType dtype,
+ const std::vector<uint32_t> &shape,
+ const std::vector<T> &values)
+{
+ auto node = g->nodes()->create<luci::CircleConst>();
+ node->dtype(dtype);
+ node->rank(shape.size());
+
+ uint32_t size = 1;
+ for (uint32_t i = 0; i < shape.size(); ++i)
+ {
+ node->dim(i) = shape.at(i);
+ size *= shape.at(i);
+ }
+
+#define INIT_VALUES(DT) \
+ { \
+ node->size<DT>(size); \
+ for (uint32_t i = 0; i < values.size(); ++i) \
+ node->at<DT>(i) = values[i]; \
+ }
+
+ switch (dtype)
+ {
+ case loco::DataType::U8:
+ INIT_VALUES(loco::DataType::U8);
+ break;
+ case loco::DataType::S16:
+ INIT_VALUES(loco::DataType::S16);
+ break;
+ case loco::DataType::S32:
+ INIT_VALUES(loco::DataType::S32);
+ break;
+ case loco::DataType::FLOAT32:
+ INIT_VALUES(loco::DataType::FLOAT32)
+ break;
+ default:
+ INTERNAL_EXN("create_const_node called with unsupported type");
+ break;
+ }
+ return node;
+}
+
+bool resolve_matmul(luci::CircleCustom *cop)
+{
+#define CHECK_OR_FALSE(condition) \
+ if (not(condition)) \
+ return false;
+#define CHECK_OR_THROW(condition, message) \
+ if (not(condition)) \
+ INTERNAL_EXN(message);
+
+ auto graph = cop->graph();
+ const std::vector<uint8_t> custom_options = cop->custom_options();
+ auto map = flexbuffers::GetRoot(custom_options).AsMap();
+ const auto U8 = loco::DataType::U8;
+ const auto S16 = loco::DataType::S16;
+ const auto S32 = loco::DataType::S32;
+ const auto FLOAT32 = loco::DataType::FLOAT32;
+
+ bool transpose_a = map["transpose_a"].AsBool();
+ bool transpose_b = map["transpose_b"].AsBool();
+
+ loco::Node *lhs = cop->inputs(0);
+ loco::Node *rhs = cop->inputs(1);
+
+ // Check that the type of the first input is known
+ CHECK_OR_FALSE(loco::dtype_known(lhs));
+ auto lhs_dtype = loco::dtype_get(cop->inputs(0));
+
+ // If transpose of first input is requested, its shape must be known
+ CHECK_OR_FALSE(!transpose_a || loco::shape_known(lhs));
+ // and its rank should be at least 2
+ CHECK_OR_FALSE(!transpose_a || loco::shape_get(lhs).as<loco::TensorShape>().rank() >= 2);
+ // Check that the shape of the 2nd input is known
+ CHECK_OR_FALSE(loco::shape_known(rhs));
+ // TODO as of 06/23/20 TFLite only supports rank 2 for 2nd input. Fix this once that changes!
+ CHECK_OR_FALSE(loco::shape_get(rhs).as<loco::TensorShape>().rank() == 2);
+ // Check that input data type is supported
+ CHECK_OR_THROW(lhs_dtype == U8 || lhs_dtype == S16 || lhs_dtype == FLOAT32,
+ "Only UInt8, Int16 and Float32 data types are supported by MatMul");
+
+ if (transpose_a)
+ {
+ auto a_shape = loco::shape_get(lhs).as<loco::TensorShape>();
+ // Create a permutation constant node
+ std::vector<uint32_t> perm;
+ for (uint32_t i = 0; i < a_shape.rank(); ++i)
+ perm.push_back(i);
+ std::swap(perm[a_shape.rank() - 1], perm[a_shape.rank() - 2]);
+ auto perm_node = create_const_node(graph, S32, {a_shape.rank()}, perm);
+ // Now make a transpose node
+ auto transpose_node = graph->nodes()->create<luci::CircleTranspose>();
+ transpose_node->a(lhs);
+ transpose_node->perm(perm_node);
+ lhs = transpose_node;
+ }
+
+ // Transpose the second input if needed. TFLite FullyConnected operator
+ // assumes the second input is in column-major order, but the input is
+ // in row-major order, thus we need to convert between them.
+ if (!transpose_b)
+ {
+ const std::vector<uint32_t> perm{1, 0};
+ auto perm_node = create_const_node(graph, S32, {2}, perm);
+ auto transpose_node = graph->nodes()->create<luci::CircleTranspose>();
+ transpose_node->a(rhs);
+ transpose_node->perm(perm_node);
+ rhs = transpose_node;
+ }
+
+ // Make a constant zero-filled bias node
+ auto b_shape = loco::shape_get(cop->inputs(1)).as<loco::TensorShape>();
+ uint32_t bias_size = b_shape.dim(transpose_b ? 1 : 0).value();
+ const std::vector<float> val(bias_size, .0f);
+ auto bias_node = create_const_node(graph, lhs_dtype, {bias_size}, val);
+ auto fc_node = graph->nodes()->create<luci::CircleFullyConnected>();
+ fc_node->input(lhs);
+ fc_node->weights(rhs);
+ fc_node->bias(bias_node);
+ fc_node->fusedActivationFunction(luci::FusedActFunc::NONE);
+
+ replace(cop).with(fc_node);
+ return true;
+}
+
+} // namespace
+
+namespace luci
+{
+
+bool ResolveCustomOpMatMulPass::run(loco::Graph *g)
+{
+ bool changed = false;
+ for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ {
+ auto cop = dynamic_cast<luci::CircleCustom *>(node);
+ if (not cop)
+ continue;
+
+ if (cop->custom_code() != "MatMul")
+ continue;
+
+ if (!resolve_matmul(cop))
+ continue;
+
+ changed = true;
+ }
+
+ return changed;
+}
+
+} // namespace luci
diff --git a/compiler/luci/requires.cmake b/compiler/luci/requires.cmake
index e88dabd24..e52523d45 100644
--- a/compiler/luci/requires.cmake
+++ b/compiler/luci/requires.cmake
@@ -1,5 +1,7 @@
+require("foder")
require("loco")
require("locop")
+require("logo")
require("logo-core")
require("mio-circle")
require("oops")
diff --git a/compiler/luci/service/src/CircleShapeInference.cpp b/compiler/luci/service/src/CircleShapeInference.cpp
index fdcfa76bc..0732849db 100644
--- a/compiler/luci/service/src/CircleShapeInference.cpp
+++ b/compiler/luci/service/src/CircleShapeInference.cpp
@@ -27,11 +27,8 @@ namespace luci
ShapeDescription ShapeInference::get(loco::Node *node)
{
- // TODO Adjust indentation level
- {
- assert(loco::shape_known(node));
- return to_shape_description(loco::shape_get(node));
- }
+ assert(loco::shape_known(node));
+ return to_shape_description(loco::shape_get(node));
}
} // namespace luci
diff --git a/compiler/luci/service/src/CircleShapeInferenceRule.cpp b/compiler/luci/service/src/CircleShapeInferenceRule.cpp
index c8e872b1e..a291cfe70 100644
--- a/compiler/luci/service/src/CircleShapeInferenceRule.cpp
+++ b/compiler/luci/service/src/CircleShapeInferenceRule.cpp
@@ -17,6 +17,8 @@
#include "luci/Service/CircleShapeInferenceRule.h"
#include "Check.h"
+#include "ShapeInfer_StridedSlice.h"
+
#include <luci/IR/CircleNodes.h>
#include <luci/IR/CircleDialect.h>
#include <luci/IR/CircleNodeVisitor.h>
@@ -26,11 +28,25 @@
#include <algorithm>
#include <cassert>
+#include <cmath>
#include <stdexcept>
namespace
{
+std::ostream &operator<<(std::ostream &os, const loco::TensorShape &tensor_shape)
+{
+ os << "[";
+ for (uint32_t r = 0; r < tensor_shape.rank(); ++r)
+ {
+ if (r)
+ os << ",";
+ os << tensor_shape.dim(r).value();
+ }
+ os << "]";
+ return os;
+}
+
// Call this for CircleAvgPool2D and CircleMaxPool2D only
template <class Pool2DType> loco::NodeShape infer_pool_2d_shape(const Pool2DType *node)
{
@@ -176,6 +192,157 @@ loco::TensorShape broadcast_shape(const loco::TensorShape &x, const loco::Tensor
return output_shape;
}
+// BatchMatMulV2 supports broadcasting in the batch dimensions(BatchMatMul doesn't)
+// TODO Distinguish BatchMatMul and BatchMatMulV2
+loco::NodeShape infer_batchmatmul_shape(const loco::TensorShape &x_shape,
+ const loco::TensorShape &y_shape, bool adj_x, bool adj_y)
+{
+ uint32_t x_rank = x_shape.rank();
+ uint32_t y_rank = y_shape.rank();
+ assert(x_rank >= 2 && y_rank >= 2);
+
+ loco::TensorShape output_shape;
+ output_shape.rank(x_shape.rank());
+ // Braodcast in the batch dimension
+ if (x_rank > 2 || y_rank > 2)
+ {
+ loco::TensorShape dummy_x = x_shape;
+ loco::TensorShape dummy_y = y_shape;
+ expand_rank(dummy_x, dummy_y);
+ if (x_rank < y_rank)
+ expand_rank(output_shape, dummy_y);
+
+ for (uint32_t d = 0; d < output_shape.rank() - 2; d++)
+ {
+ uint32_t max_dim = std::max(dummy_x.dim(d).value(), dummy_y.dim(d).value());
+ if (dummy_x.dim(d) == dummy_y.dim(d) ||
+ dummy_x.dim(d).value() * dummy_y.dim(d).value() == max_dim)
+ output_shape.dim(d).set(max_dim);
+ else
+ INTERNAL_EXN("BatchMatMul has wrong shape");
+ }
+ }
+
+ loco::Dimension x_lhs = adj_x ? x_shape.dim(x_rank - 1) : x_shape.dim(x_rank - 2);
+ loco::Dimension x_rhs = adj_x ? x_shape.dim(x_rank - 2) : x_shape.dim(x_rank - 1);
+ loco::Dimension y_lhs = adj_y ? y_shape.dim(y_rank - 1) : y_shape.dim(y_rank - 2);
+ loco::Dimension y_rhs = adj_y ? y_shape.dim(y_rank - 2) : y_shape.dim(y_rank - 1);
+
+ if (not(x_rhs == y_lhs))
+ INTERNAL_EXN("x_rhs and y_lhs should be same");
+
+ uint32_t out_rank = output_shape.rank();
+ output_shape.dim(out_rank - 2) = x_lhs;
+ output_shape.dim(out_rank - 1) = y_rhs;
+
+ return loco::NodeShape{output_shape};
+}
+
+loco::TensorShape own_shape(const luci::CircleNode *node)
+{
+ loco::TensorShape shape;
+ shape.rank(node->rank());
+ for (uint32_t r = 0; r < node->rank(); ++r)
+ shape.dim(r) = loco::Dimension(node->dim(r).value());
+ return shape;
+}
+
+loco::TensorShape infer_reducer(const loco::Node *input, const loco::Node *indices, bool keep_dims)
+{
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto input_shape = loco::shape_get(input).as<loco::TensorShape>();
+ auto reduction_indices = loco::must_cast<const luci::CircleConst *>(indices);
+
+ { // Exceptions
+ // TODO support non-const case
+ // TODO support other data type
+ LUCI_ASSERT(reduction_indices->dtype() == S32, "Only support int 32");
+ }
+
+ std::vector<int32_t> reduction_values;
+
+ for (uint32_t i = 0; i < reduction_indices->size<S32>(); ++i)
+ {
+ int32_t axis = reduction_indices->at<S32>(i);
+ if (axis < 0)
+ axis += input_shape.rank();
+ if (not(0 <= axis and axis < static_cast<int32_t>(input_shape.rank())))
+ INTERNAL_EXN_V("Invalid reduction axis for REDUCER", oops::to_uint32(axis));
+ reduction_values.push_back(axis);
+ }
+
+ loco::TensorShape output_shape;
+
+ if (keep_dims)
+ {
+ output_shape.rank(input_shape.rank());
+ for (uint32_t i = 0; i < input_shape.rank(); ++i)
+ output_shape.dim(i) = input_shape.dim(i);
+ for (uint32_t i = 0; i < reduction_values.size(); ++i)
+ output_shape.dim(reduction_values.at(i)) = 1;
+ }
+ else
+ {
+ std::vector<bool> check_reduce(input_shape.rank(), false);
+ for (uint32_t i = 0; i < reduction_values.size(); ++i)
+ check_reduce.at(reduction_values.at(i)) = true;
+
+ uint32_t reduce_cnt = 0;
+ for (uint32_t i = 0; i < check_reduce.size(); ++i)
+ if (check_reduce.at(i))
+ ++reduce_cnt;
+
+ output_shape.rank(input_shape.rank() - reduce_cnt);
+ for (uint32_t i = 0, j = 0; i < check_reduce.size(); ++i)
+ if (check_reduce.at(i) == false)
+ output_shape.dim(j++) = input_shape.dim(i);
+ }
+
+ return output_shape;
+}
+
+/**
+ * @brief vector_from_constant will return int64_t vector from CircleConst node
+ */
+template <loco::DataType T> std::vector<int64_t> vector_from_constant(luci::CircleConst *const_node)
+{
+ std::vector<int64_t> result;
+
+ for (uint32_t idx = 0; idx < const_node->size<T>(); ++idx)
+ result.push_back(const_node->at<T>(idx));
+
+ return result;
+}
+
+template <class CIRCLENODE> loco::NodeShape broadcast_xy(const CIRCLENODE *node)
+{
+ auto x_shape = loco::shape_get(node->x()).template as<loco::TensorShape>();
+ auto y_shape = loco::shape_get(node->y()).template as<loco::TensorShape>();
+
+ auto output_shape = broadcast_shape(x_shape, y_shape);
+
+ return loco::NodeShape{output_shape};
+}
+
+template <class CIRCLENODE> loco::NodeShape use_x(const CIRCLENODE *node)
+{
+ auto x_shape = loco::shape_get(node->x()).template as<loco::TensorShape>();
+ return loco::NodeShape{x_shape};
+}
+
+template <class CIRCLENODE> loco::NodeShape use_logits(const CIRCLENODE *node)
+{
+ auto shape = loco::shape_get(node->logits()).template as<loco::TensorShape>();
+ return loco::NodeShape{shape};
+}
+
+loco::NodeShape use_own(const luci::CircleNode *node)
+{
+ loco::TensorShape shape = own_shape(node);
+ return loco::NodeShape{shape};
+}
+
/**
* @brief Class to infer the shape of CircleNode
*
@@ -184,20 +351,24 @@ loco::TensorShape broadcast_shape(const loco::TensorShape &x, const loco::Tensor
class ShapeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::NodeShape>
{
public:
- loco::NodeShape visit(const luci::CircleAbs *node) final
- {
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- return loco::NodeShape{x_shape};
- }
+ loco::NodeShape visit(const luci::CircleAbs *node) final { return use_x(node); }
- loco::NodeShape visit(const luci::CircleAdd *node) final
+ loco::NodeShape visit(const luci::CircleAdd *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleAddN *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ auto shape = loco::shape_get(node->inputs(0)).as<loco::TensorShape>();
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ for (uint32_t idx = 1; idx < node->arity(); ++idx)
+ {
+ auto shape_idx = loco::shape_get(node->inputs(idx)).as<loco::TensorShape>();
+ if (!(shape == shape_idx))
+ {
+ INTERNAL_EXN_V("ADD_N shape not same as the first input: ", idx);
+ }
+ }
- return loco::NodeShape{output_shape};
+ return loco::NodeShape{shape};
}
loco::NodeShape visit(const luci::CircleArgMax *node) final
@@ -211,8 +382,7 @@ public:
// Only support node's shape() is CircleConst with S32/S64
// Support S32 for now.
- auto const_shape_node = dynamic_cast<luci::CircleConst *>(node->dimension());
- LUCI_ASSERT(const_shape_node, "Only support CircleConst for shape of CircleArgMax");
+ auto const_shape_node = loco::must_cast<luci::CircleConst *>(node->dimension());
LUCI_ASSERT(const_shape_node->dtype() == loco::DataType::S32,
"Only support int32 CircleConst for CircleArgMax");
@@ -240,11 +410,58 @@ public:
return loco::NodeShape{shape_output};
}
+ loco::NodeShape visit(const luci::CircleArgMin *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto dimension_shape = loco::shape_get(node->dimension()).as<loco::TensorShape>();
+
+ int64_t select_axis = 0;
+ {
+ LUCI_ASSERT(node->dimension(), "2nd input dimension() should not be nullptr");
+
+ // Only support node's shape() is CircleConst with S32/S64
+ // Support S32 for now.
+ auto const_shape_node = loco::must_cast<luci::CircleConst *>(node->dimension());
+ LUCI_ASSERT(const_shape_node->dtype() == loco::DataType::S32,
+ "Only support int32 CircleConst for CircleArgMin");
+
+ if (const_shape_node->rank() > 1)
+ INTERNAL_EXN_V("Only support rank 0/1 CircleConst",
+ oops::to_uint32(const_shape_node->rank()));
+
+ select_axis = const_shape_node->scalar<loco::DataType::S32>();
+ }
+ assert(select_axis < input_shape.rank());
+ assert(select_axis >= 0); // TODO support minus of this breaks
+
+ // NOTE select_axis is removed
+ loco::TensorShape shape_output;
+ uint32_t rank = input_shape.rank();
+ uint32_t shrink = static_cast<uint32_t>(select_axis);
+ assert(rank > 0);
+ shape_output.rank(rank - 1);
+ for (uint32_t r = 0, d = 0; r < rank; ++r)
+ {
+ if (r == shrink)
+ continue;
+ shape_output.dim(d++) = input_shape.dim(r);
+ }
+ return loco::NodeShape{shape_output};
+ }
+
loco::NodeShape visit(const luci::CircleAveragePool2D *node) final
{
return infer_pool_2d_shape(node);
}
+ loco::NodeShape visit(const luci::CircleBatchMatMul *node) final
+ {
+ auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+
+ return infer_batchmatmul_shape(x_shape, y_shape, node->adj_x(), node->adj_y());
+ }
+
loco::NodeShape visit(const luci::CircleBatchToSpaceND *node) final
{
const loco::DataType S32 = loco::DataType::S32;
@@ -254,14 +471,12 @@ public:
assert(input_shape.rank() == 3 || input_shape.rank() == 4);
// Only support block_shape() with S32 type CircleConst for now
- auto const_block_shape = dynamic_cast<luci::CircleConst *>(node->block_shape());
- LUCI_ASSERT(const_block_shape, "Only support CircleConst for block_shape");
+ auto const_block_shape = loco::must_cast<luci::CircleConst *>(node->block_shape());
LUCI_ASSERT(const_block_shape->dtype() == loco::DataType::S32,
"Only support int32 block_shape");
// Only support crops() with S32 type CircleConst for now
- auto const_crops = dynamic_cast<luci::CircleConst *>(node->crops());
- LUCI_ASSERT(const_crops, "Only support CircleConst for crops");
+ auto const_crops = loco::must_cast<luci::CircleConst *>(node->crops());
LUCI_ASSERT(const_crops->dtype() == loco::DataType::S32, "Only support int32 crops");
auto const_block_shape_shape = loco::shape_get(const_block_shape).as<loco::TensorShape>();
@@ -295,6 +510,10 @@ public:
return loco::NodeShape{shape_output};
}
+ loco::NodeShape visit(const luci::CircleCast *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleCeil *node) final { return use_x(node); }
+
loco::NodeShape visit(const luci::CircleConcatenation *node) final
{
// TODO Support when CircleConcatenation has 0 input
@@ -330,16 +549,7 @@ public:
return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleConst *node) final
- {
- loco::TensorShape shape;
-
- shape.rank(node->rank());
- for (uint32_t axis = 0; axis < node->rank(); axis++)
- shape.dim(axis) = node->dim(axis);
-
- return loco::NodeShape{shape};
- }
+ loco::NodeShape visit(const luci::CircleConst *node) final { return use_own(node); }
loco::NodeShape visit(const luci::CircleConv2D *node) final
{
@@ -361,8 +571,8 @@ public:
uint32_t stride_width = node->stride()->w();
uint32_t ker_height = ker_shape.dim(1).value();
uint32_t ker_width = ker_shape.dim(2).value();
- uint32_t dilation_height = 1;
- uint32_t dilation_width = 1;
+ uint32_t dilation_height = node->dilation()->h();
+ uint32_t dilation_width = node->dilation()->w();
uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
@@ -392,11 +602,40 @@ public:
return loco::NodeShape{ofm_shape};
}
- loco::NodeShape visit(const luci::CircleCos *node) final
+ loco::NodeShape visit(const luci::CircleCos *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleCustom *node) final { return use_own(node); }
+
+ loco::NodeShape visit(const luci::CircleDepthToSpace *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ LUCI_ASSERT(input_shape.rank() == 4, "Only input rank 4 is supported");
+
+ // Only data format NHWC is supported
+ // TODO need to clarify what to do with layout in this operator
+ int32_t height = input_shape.dim(1).value();
+ int32_t width = input_shape.dim(2).value();
+ int32_t depth = input_shape.dim(3).value();
+
+ int block_size = node->block_size();
+
+ if (block_size < 2)
+ INTERNAL_EXN("Block size must be >= 2");
+
+ if (depth % (block_size * block_size))
+ {
+ INTERNAL_EXN("The input tensor's depth must be divisible by block_size^2");
+ }
- return loco::NodeShape{x_shape};
+ loco::TensorShape output_shape;
+ output_shape.rank(4);
+
+ output_shape.dim(0) = input_shape.dim(0).value();
+ output_shape.dim(1) = height * block_size;
+ output_shape.dim(2) = width * block_size;
+ output_shape.dim(3) = depth / (block_size * block_size);
+
+ return loco::NodeShape{output_shape};
}
loco::NodeShape visit(const luci::CircleDepthwiseConv2D *node) final
@@ -414,8 +653,8 @@ public:
uint32_t stride_width = node->stride()->w();
uint32_t ker_height = ker_shape.dim(1).value();
uint32_t ker_width = ker_shape.dim(2).value();
- uint32_t dilation_height = 1;
- uint32_t dilation_width = 1;
+ uint32_t dilation_height = node->dilation()->h();
+ uint32_t dilation_width = node->dilation()->w();
uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
@@ -445,30 +684,87 @@ public:
return loco::NodeShape{ofm_shape};
}
- loco::NodeShape visit(const luci::CircleDiv *node) final
- {
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ loco::NodeShape visit(const luci::CircleDiv *node) final { return broadcast_xy(node); }
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ loco::NodeShape visit(const luci::CircleElu *node) final
+ {
+ auto input_shape = loco::shape_get(node->features()).as<loco::TensorShape>();
- return loco::NodeShape{output_shape};
+ return loco::NodeShape{input_shape};
}
- loco::NodeShape visit(const luci::CircleEqual *node) final
+ loco::NodeShape visit(const luci::CircleEqual *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleExp *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleExpandDims *node) final
{
- const auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- const auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
- loco::TensorShape output_shape = broadcast_shape(x_shape, y_shape);
+ const loco::DataType S32 = loco::DataType::S32;
+ auto x_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ if (x_shape.rank() == 0)
+ {
+ // This maybe for unknown shape. We use shape from the node itself.
+ return use_own(node);
+ }
+ auto const_axis = loco::must_cast<luci::CircleConst *>(node->axis());
+ LUCI_ASSERT(const_axis->dtype() == S32, "Only support int32 CircleConst for axis");
+ if (const_axis->rank() != 0 && const_axis->rank() != 1)
+ {
+ INTERNAL_EXN_V("Non-scalar axis in OP", node->opnum());
+ }
+ int32_t axis = const_axis->at<S32>(0);
+ LUCI_ASSERT((axis <= static_cast<int32_t>(x_shape.rank())) &&
+ (axis >= -1 - static_cast<int32_t>(x_shape.rank())),
+ "Axis has to be between [-(D+1), D], where D is rank of input.");
+ size_t positive_axis = axis < 0 ? x_shape.rank() + axis + 1 : axis;
+ loco::TensorShape output_shape;
+ output_shape.rank(x_shape.rank() + 1);
+ size_t i = 0;
+ for (; i < positive_axis; i++)
+ output_shape.dim(i) = x_shape.dim(i);
+ output_shape.dim(i) = loco::Dimension(1);
+ for (; i < x_shape.rank(); i++)
+ output_shape.dim(i + 1) = x_shape.dim(i);
return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleExp *node) final
+ loco::NodeShape visit(const luci::CircleFill *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- return loco::NodeShape{x_shape};
+ loco::TensorShape shape;
+ {
+ LUCI_ASSERT(node->dims(), "dims input should not be nullptr");
+
+ auto dims_node = dynamic_cast<luci::CircleConst *>(node->dims());
+ if (dims_node != nullptr)
+ {
+ // Only support node with S32
+ LUCI_ASSERT(dims_node->dtype() == loco::DataType::S32, "Only support int32 CircleConst");
+
+ if (dims_node->rank() != 1)
+ INTERNAL_EXN_V("Only support rank 1 CircleConst", oops::to_uint32(dims_node->rank()));
+
+ shape.rank(dims_node->dim(0).value());
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ shape.dim(axis) = dims_node->at<loco::DataType::S32>(axis);
+ }
+ }
+ else
+ {
+ shape = own_shape(node);
+ }
+ }
+
+ return loco::NodeShape{shape};
}
+ loco::NodeShape visit(const luci::CircleFloor *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleFloorDiv *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleFloorMod *node) final { return broadcast_xy(node); }
+
loco::NodeShape visit(const luci::CircleFullyConnected *node) final
{
auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
@@ -478,8 +774,11 @@ public:
// Input: a tensor of at least rank 2 [D1, D2, ... Dn]
// Weight: [# of units, K]
// Output: [D1 * D2 * ... * Dn / K, # of units]
- LUCI_ASSERT(input_shape.rank() >= 2, "Input rank should be at least 2");
- LUCI_ASSERT(weights_shape.rank() == 2, "Incompatible weights rank for fully connected");
+ if (input_shape.rank() < 2 || weights_shape.rank() != 2)
+ {
+ // Return node own shape if shape inference is not possible
+ return use_own(node);
+ }
uint32_t input_size = 1;
for (uint32_t i = 0; i < input_shape.rank(); i++)
@@ -495,28 +794,171 @@ public:
return loco::NodeShape{out_shape};
}
- loco::NodeShape visit(const luci::CircleLogicalNot *node) final
+ loco::NodeShape visit(const luci::CircleGather *node) final
+ {
+ loco::TensorShape output_shape;
+
+ const auto input_shape = loco::shape_get(node->params()).as<loco::TensorShape>();
+ const auto positions_shape = loco::shape_get(node->indices()).as<loco::TensorShape>();
+ int32_t axis = node->axis();
+
+ // If CircleGather input has a dynamic shape, it can't inference this shape. So, it returns the
+ // shape that node already has.
+ if (input_shape.rank() == 0 || positions_shape.rank() == 0)
+ return use_own(node);
+
+ if (axis < 0)
+ axis += input_shape.rank();
+
+ output_shape.rank(input_shape.rank() - 1 + positions_shape.rank());
+ int32_t outdim_index = 0;
+ for (int32_t i = 0; i < axis; ++i)
+ output_shape.dim(outdim_index++) = input_shape.dim(i);
+ for (uint32_t i = 0; i < positions_shape.rank(); ++i)
+ output_shape.dim(outdim_index++) = positions_shape.dim(i);
+ for (uint32_t i = axis + 1; i < input_shape.rank(); ++i)
+ output_shape.dim(outdim_index++) = input_shape.dim(i);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleGatherNd *node) final
+ {
+ loco::TensorShape output_shape;
+
+ const auto params_shape = loco::shape_get(node->params()).as<loco::TensorShape>();
+ const auto indices_shape = loco::shape_get(node->indices()).as<loco::TensorShape>();
+
+ const auto params_rank = params_shape.rank();
+ const auto indices_rank = indices_shape.rank();
+
+ // see https://www.tensorflow.org/api_docs/python/tf/gather_nd
+ // output.shape = indices.shape[:-1] + params.shape[indices.shape[-1]:]
+ // batch_dims isn't supported in tflite
+
+ // TODO: replace exceptions with setting shape to unknown?
+
+ if (!indices_shape.dim(indices_rank - 1).known())
+ INTERNAL_EXN("Last indices dimension is unknown");
+
+ auto indices_last_dim = indices_shape.dim(indices_rank - 1).value();
+
+ if (indices_last_dim > params_rank)
+ INTERNAL_EXN("Last indices dimension should be <= params rank");
+
+ const uint32_t output_rank = indices_rank + params_rank - indices_last_dim - 1;
+
+ output_shape.rank(output_rank);
+
+ uint32_t output_index = 0;
+ for (uint32_t i = 0; i < indices_rank - 1; ++i)
+ {
+ auto &dim = indices_shape.dim(i);
+ if (!dim.known())
+ INTERNAL_EXN("Unknown indices dimension is unsupported");
+ output_shape.dim(output_index++).set(dim.value());
+ }
+
+ for (uint32_t i = indices_last_dim; i < params_rank; ++i)
+ {
+ auto &dim = params_shape.dim(i);
+ if (!dim.known())
+ INTERNAL_EXN("Unknown params dimension is unsupported");
+ output_shape.dim(output_index++).set(dim.value());
+ }
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleGreater *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleGreaterEqual *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleIf *node) final
{
- const auto input_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ // Shape of CircleIf is not used. Just use input 0
+ assert(node->input_count() > 0);
+ const auto input_shape = loco::shape_get(node->input(0)).as<loco::TensorShape>();
return loco::NodeShape{input_shape};
}
- loco::NodeShape visit(const luci::CircleLogicalOr *node) final
+ loco::NodeShape visit(const luci::CircleL2Normalize *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleL2Pool2D *node) final
+ {
+ return infer_pool_2d_shape(node);
+ }
+
+ loco::NodeShape visit(const luci::CircleLeakyRelu *node) final
{
- const auto input_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ const auto input_shape = loco::shape_get(node->features()).as<loco::TensorShape>();
return loco::NodeShape{input_shape};
}
- loco::NodeShape visit(const luci::CircleMaximum *node) final
+ loco::NodeShape visit(const luci::CircleLess *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleLessEqual *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleLocalResponseNormalization *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ const auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleLog *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleLogicalAnd *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleLogicalNot *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleLogicalOr *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleLogistic *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleMatrixSetDiag *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto diagonal_shape = loco::shape_get(node->diagonal()).as<loco::TensorShape>();
+
+ auto rank = diagonal_shape.rank();
+
+ LUCI_ASSERT(rank == input_shape.rank() - 1, "diagonal rank = input rank - 1");
+
+ for (uint32_t i = 0; i < rank - 1; i++)
+ {
+ LUCI_ASSERT(diagonal_shape.dim(i) == input_shape.dim(i), "diagonal dims = input dims");
+ }
+
+ auto dim = std::min(input_shape.dim(rank - 1).value(), input_shape.dim(rank).value());
+
+ LUCI_ASSERT(dim == diagonal_shape.dim(rank - 1), "Max diag len error");
+
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleLogSoftmax *node) final { return use_logits(node); }
+
+ loco::NodeShape visit(const luci::CircleMatrixDiag *node) final
+ {
+ loco::TensorShape output_shape;
+
+ auto diagonal_shape = loco::shape_get(node->diagonal()).as<loco::TensorShape>();
+ auto rank = diagonal_shape.rank();
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ output_shape.rank(rank + 1);
+
+ for (uint32_t i = 0; i < rank; i++)
+ {
+ output_shape.dim(i) = diagonal_shape.dim(i);
+ }
+
+ output_shape.dim(rank) = diagonal_shape.dim(rank - 1);
return loco::NodeShape{output_shape};
}
+ loco::NodeShape visit(const luci::CircleMaximum *node) final { return broadcast_xy(node); }
+
loco::NodeShape visit(const luci::CircleMaxPool2D *node) final
{
return infer_pool_2d_shape(node);
@@ -524,67 +966,81 @@ public:
loco::NodeShape visit(const luci::CircleMean *node) final
{
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleMinimum *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleMirrorPad *node) final
+ {
const loco::DataType S32 = loco::DataType::S32;
auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
- auto reduction_indices = dynamic_cast<luci::CircleConst *>(node->reduction_indices());
+ auto paddings = loco::must_cast<luci::CircleConst *>(node->paddings());
- { // Exceptions
- // TODO support non-const case
- LUCI_ASSERT(reduction_indices, "Only support constant reduction_indices");
- // TODO support other data type
- LUCI_ASSERT(reduction_indices->dtype() == S32, "Only support int 32");
- }
+ // TODO support non-const case
+ // TODO support other data type
+ LUCI_ASSERT(paddings->dtype() == S32, "Only support int 32 for now");
+ LUCI_ASSERT(paddings->rank() == 2, "paddings should be rank 2")
- std::vector<int32_t> reduction_values;
+ int32_t n = paddings->dim(0).value();
+ int32_t v = paddings->dim(1).value();
- for (uint32_t i = 0; i < reduction_indices->size<S32>(); ++i)
- {
- int32_t axis = reduction_indices->at<S32>(i);
- if (axis < 0)
- axis += input_shape.rank();
- if (not(0 <= axis and axis < static_cast<int32_t>(input_shape.rank())))
- INTERNAL_EXN_V("Invalid reduction axis for MEAN", oops::to_uint32(axis));
- reduction_values.push_back(axis);
- }
+ LUCI_ASSERT(v == 2, "paddings should be [n, 2]");
+ LUCI_ASSERT(n == int32_t(input_shape.rank()),
+ "paddings [n, 2] should have same value of input rank");
loco::TensorShape output_shape;
- if (node->keep_dims())
- {
- output_shape.rank(input_shape.rank());
- for (uint32_t i = 0; i < input_shape.rank(); ++i)
- output_shape.dim(i) = input_shape.dim(i);
- for (uint32_t i = 0; i < reduction_values.size(); ++i)
- output_shape.dim(reduction_values.at(i)) = 1;
- }
- else
+ output_shape.rank(input_shape.rank());
+ for (int32_t ni = 0; ni < n; ++ni)
{
- std::vector<bool> check_reduce(input_shape.rank(), false);
- for (uint32_t i = 0; i < reduction_values.size(); ++i)
- check_reduce.at(reduction_values.at(i)) = true;
-
- uint32_t reduce_cnt = 0;
- for (uint32_t i = 0; i < check_reduce.size(); ++i)
- if (check_reduce.at(i))
- ++reduce_cnt;
-
- output_shape.rank(input_shape.rank() - reduce_cnt);
- for (uint32_t i = 0, j = 0; i < check_reduce.size(); ++i)
- if (check_reduce.at(i) == false)
- output_shape.dim(j++) = i;
+ int32_t idx = ni * 2;
+ int value = input_shape.dim(ni).value();
+ value += paddings->at<S32>(idx + 0); // left
+ value += paddings->at<S32>(idx + 1); // right
+ output_shape.dim(ni) = value;
}
return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleMul *node) final
- {
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ loco::NodeShape visit(const luci::CircleMul *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleNeg *node) final { return use_x(node); }
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ loco::NodeShape visit(const luci::CircleNotEqual *node) final { return broadcast_xy(node); }
+ loco::NodeShape visit(const luci::CircleOneHot *node) final
+ {
+ const loco::DataType S32 = loco::DataType::S32;
+ auto indices_shape = loco::shape_get(node->indices()).as<loco::TensorShape>();
+ // Only support OneHot node's depth() is CircleConst with type S32
+ // TODO support depth with other types
+ auto depth = loco::must_cast<luci::CircleConst *>(node->depth());
+ LUCI_ASSERT(depth->dtype() == S32, "Only support int32 CircleConst");
+ if (depth->rank() != 0)
+ INTERNAL_EXN_V("Only support rank 0 CircleOneHot in Depth", oops::to_uint32(depth->rank()));
+ loco::TensorShape output_shape;
+ output_shape.rank(indices_shape.rank() + 1);
+ auto axis = node->axis();
+ if (axis < 0)
+ axis += indices_shape.rank() + 1;
+ LUCI_ASSERT(0 <= axis, "Axis is out of range");
+ LUCI_ASSERT(static_cast<uint32_t>(axis) <= indices_shape.rank(), "Axis is out of range");
+ uint32_t j = 0;
+ for (uint32_t i = 0; i < output_shape.rank(); i++)
+ {
+ if (i == static_cast<uint32_t>(axis))
+ {
+ output_shape.dim(i) = depth->at<S32>(0);
+ }
+ else
+ {
+ output_shape.dim(i) = indices_shape.dim(j++);
+ }
+ }
return loco::NodeShape{output_shape};
}
@@ -636,10 +1092,9 @@ public:
const loco::DataType S32 = loco::DataType::S32;
auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
- auto paddings = dynamic_cast<luci::CircleConst *>(node->paddings());
+ auto paddings = loco::must_cast<luci::CircleConst *>(node->paddings());
// TODO support non-const case
- LUCI_ASSERT(paddings, "Only support constant reduction_indices");
// TODO support other data type
LUCI_ASSERT(paddings->dtype() == S32, "Only support int 32 for now");
LUCI_ASSERT(paddings->rank() == 2, "paddings should be rank 2")
@@ -666,6 +1121,93 @@ public:
return loco::NodeShape{output_shape};
}
+ loco::NodeShape visit(const luci::CirclePow *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CirclePRelu *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto alpha_shape = loco::shape_get(node->alpha()).as<loco::TensorShape>();
+
+ auto output_shape = broadcast_shape(input_shape, alpha_shape);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleRange *node) final
+ {
+ loco::TensorShape output_shape;
+ output_shape.rank(1);
+
+ auto start_node = dynamic_cast<luci::CircleConst *>(node->start());
+ auto limit_node = dynamic_cast<luci::CircleConst *>(node->limit());
+ auto delta_node = dynamic_cast<luci::CircleConst *>(node->delta());
+
+ if (start_node == nullptr || limit_node == nullptr || delta_node == nullptr)
+ {
+ return use_own(node);
+ }
+
+ double start = 0, limit = 0, delta = 0;
+
+#define GET_RANGE_PARAM(DT) \
+ start = start_node->scalar<DT>(); \
+ limit = limit_node->scalar<DT>(); \
+ delta = delta_node->scalar<DT>();
+
+ switch (start_node->dtype())
+ {
+ case loco::DataType::FLOAT32:
+ GET_RANGE_PARAM(loco::DataType::FLOAT32)
+ break;
+ case loco::DataType::S32:
+ GET_RANGE_PARAM(loco::DataType::S32)
+ break;
+ default:
+ INTERNAL_EXN("Range data type not supported");
+ }
+
+#undef GET_RANGE_PARAM
+
+ if (delta == 0)
+ INTERNAL_EXN("Delta can not be zero");
+
+ output_shape.dim(0) = ceil((limit - start) / delta);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleRank *) final
+ {
+ loco::TensorShape shape_output;
+ shape_output.rank(0);
+
+ return loco::NodeShape{shape_output};
+ }
+
+ loco::NodeShape visit(const luci::CircleReduceAny *node) final
+ {
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleReduceMax *node) final
+ {
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleReduceMin *node) final
+ {
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleReduceProd *node) final
+ {
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
loco::NodeShape visit(const luci::CircleRelu *node) final
{
auto input_shape = loco::shape_get(node->features()).as<loco::TensorShape>();
@@ -680,15 +1222,24 @@ public:
return loco::NodeShape{input_shape};
}
+ loco::NodeShape visit(const luci::CircleReluN1To1 *node) final
+ {
+ auto input_shape = loco::shape_get(node->features()).as<loco::TensorShape>();
+
+ return loco::NodeShape{input_shape};
+ }
+
/**
* @note CircleReshape has new shape info in two places: 2nd input and attribute.
- * This shape inference forces both to exist, and match each other.
- * When this condition satisfied, it return the inferred shape
+ * This shape inference uses shape from input 'shape' node when it's constant.
+ * If not, shape will be from node itself. shape from attribute is not used.
*
* TODO Change this policy when not appropriate
*/
loco::NodeShape visit(const luci::CircleReshape *node) final
{
+ LOGGER(l);
+
const loco::DataType S32 = loco::DataType::S32;
loco::TensorShape shape_by_input;
@@ -698,18 +1249,21 @@ public:
// Only support node's shape() is CircleConst with S32
// TODO support other node with other types
auto const_shape_node = dynamic_cast<luci::CircleConst *>(node->shape());
- LUCI_ASSERT(const_shape_node, "Only support CircleConst for shape of CircleReshape");
- LUCI_ASSERT(const_shape_node->dtype() == S32, "Only support int32 CircleConst");
-
- if (const_shape_node->rank() != 1)
- INTERNAL_EXN_V("Only support rank 1 CircleConst",
- oops::to_uint32(const_shape_node->rank()));
+ if (const_shape_node != nullptr)
+ {
+ LUCI_ASSERT(const_shape_node->dtype() == S32, "Only support int32 CircleConst");
- shape_by_input.rank(const_shape_node->dim(0).value());
+ shape_by_input.rank(const_shape_node->size<S32>());
- for (uint32_t axis = 0; axis < shape_by_input.rank(); ++axis)
+ for (uint32_t axis = 0; axis < shape_by_input.rank(); ++axis)
+ {
+ shape_by_input.dim(axis) = const_shape_node->at<S32>(axis);
+ }
+ }
+ else
{
- shape_by_input.dim(axis) = const_shape_node->at<S32>(axis);
+ // We use shape from the node itself
+ shape_by_input = own_shape(node);
}
}
@@ -723,8 +1277,12 @@ public:
}
}
- LUCI_ASSERT(shape_by_input == shape_by_attr,
- "Warning: Two new shape information mismatched for CircleReshape");
+ if (!(shape_by_input == shape_by_attr))
+ {
+ INFO(l) << "CircleReshape: Two new shape information mismatched : " << std::endl;
+ INFO(l) << " shape_by_input : " << shape_by_input << std::endl;
+ INFO(l) << " shape_by_attr : " << shape_by_attr << std::endl;
+ }
loco::TensorShape output_shape = shape_by_input;
@@ -754,94 +1312,517 @@ public:
return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleRsqrt *node) final
+ loco::NodeShape visit(const luci::CircleResizeBilinear *node) final
{
- auto input_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
- return loco::NodeShape{input_shape};
+ if (input_shape.rank() != 4)
+ INTERNAL_EXN("Expected ResizeBilinear input to have rank 4");
+
+ auto *const_node = loco::must_cast<luci::CircleConst *>(node->size());
+
+ if (const_node->dtype() != loco::DataType::S32)
+ INTERNAL_EXN("Only S32 datatype is supported for ResizeBilinear size");
+
+ if (const_node->rank() != 1)
+ INTERNAL_EXN("Expected size tensor of rank 1");
+
+ if (const_node->dim(0).value() != 2)
+ INTERNAL_EXN("Expected size tensor with shape [2]");
+
+ loco::TensorShape output_shape;
+ output_shape.rank(4);
+ output_shape.dim(0) = input_shape.dim(0);
+ output_shape.dim(1) = const_node->at<loco::DataType::S32>(0);
+ output_shape.dim(2) = const_node->at<loco::DataType::S32>(1);
+ output_shape.dim(3) = input_shape.dim(3);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleResizeNearestNeighbor *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+
+ if (input_shape.rank() != 4)
+ INTERNAL_EXN("Expected ResizeNearesNeighbor input to have rank 4");
+
+ auto *const_node = loco::must_cast<luci::CircleConst *>(node->size());
+
+ if (const_node->dtype() != loco::DataType::S32)
+ INTERNAL_EXN("Only S32 datatype is supported for ResizeNearesNeighbor size");
+
+ if (const_node->rank() != 1)
+ INTERNAL_EXN("Expected size tensor of rank 1");
+
+ if (const_node->dim(0).value() != 2)
+ INTERNAL_EXN("Expected size tensor with shape [2]");
+
+ loco::TensorShape output_shape;
+ output_shape.rank(4);
+ output_shape.dim(0) = input_shape.dim(0);
+ output_shape.dim(1) = const_node->at<loco::DataType::S32>(0);
+ output_shape.dim(2) = const_node->at<loco::DataType::S32>(1);
+ output_shape.dim(3) = input_shape.dim(3);
+
+ return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleSoftmax *node) final
+ loco::NodeShape visit(const luci::CircleReverseSequence *node) final
{
- auto input_shape = loco::shape_get(node->logits()).as<loco::TensorShape>();
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
return loco::NodeShape{input_shape};
}
- loco::NodeShape visit(const luci::CircleSqrt *node) final
+ loco::NodeShape visit(const luci::CircleRound *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleReverseV2 *node) final
{
- auto input_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
+ auto input_shape = loco::shape_get(node->tensor()).as<loco::TensorShape>();
+
+ LUCI_ASSERT(loco::shape_get(node->axis()).as<loco::TensorShape>().rank() == 1,
+ "Tensor must be 1-D");
return loco::NodeShape{input_shape};
}
- loco::NodeShape visit(const luci::CircleSquaredDifference *node) final
+ loco::NodeShape visit(const luci::CircleRsqrt *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleScatterNd *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ loco::TensorShape output_shape;
+
+ auto shape_node = loco::must_cast<luci::CircleConst *>(node->shape());
+
+ const loco::DataType S32 = loco::DataType::S32;
+ const loco::DataType S64 = loco::DataType::S64;
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ std::vector<int64_t> vect_shape;
+
+ if (shape_node->dtype() == S32)
+ vect_shape = vector_from_constant<S32>(shape_node);
+ else if (shape_node->dtype() == S64)
+ vect_shape = vector_from_constant<S64>(shape_node);
+ else
+ LUCI_ASSERT(false, "Only support int32/int64 for shape()");
+
+ output_shape.rank(vect_shape.size());
+ for (uint32_t i = 0; i < vect_shape.size(); ++i)
+ output_shape.dim(i) = vect_shape[i];
return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleSub *node) final
+ loco::NodeShape visit(const luci::CircleSegmentSum *node) final
{
- auto x_shape = loco::shape_get(node->x()).as<loco::TensorShape>();
- auto y_shape = loco::shape_get(node->y()).as<loco::TensorShape>();
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto segment_shape = loco::shape_get(node->segment_ids()).as<loco::TensorShape>();
+
+ LUCI_ASSERT(segment_shape.rank() == 1, "segment_ids must be 1-D tensor");
+ LUCI_ASSERT(segment_shape.dim(0).value() == input_shape.dim(0).value(),
+ "segment_ids size must be equal to the size of data's first dimension");
+
+ auto ids_shape_value = loco::must_cast<luci::CircleConst *>(node->segment_ids());
+
+ std::vector<int64_t> vect_ids;
+
+ if (ids_shape_value->dtype() == loco::DataType::S32)
+ vect_ids = vector_from_constant<loco::DataType::S32>(ids_shape_value);
+
+ LUCI_ASSERT(std::is_sorted(vect_ids.begin(), vect_ids.end()),
+ "segment_ids values should be sorted")
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(input_shape.rank());
+
+ for (uint32_t i = 1; i < input_shape.rank(); ++i)
+ output_shape.dim(i) = input_shape.dim(i);
+
+ output_shape.dim(0) = vect_ids.back() + 1;
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSelect *node) final
+ {
+ auto t_shape = loco::shape_get(node->t()).as<loco::TensorShape>();
+ assert(t_shape == loco::shape_get(node->e()).as<loco::TensorShape>());
+
+ // condition shape validation
+ auto c_shape = loco::shape_get(node->condition()).as<loco::TensorShape>();
+ if (c_shape.rank() != t_shape.rank())
+ {
+ if (c_shape.rank() != 0 && c_shape.rank() != 1)
+ INTERNAL_EXN_V("CircleSelect condition rank is not 0 nor 1: ", c_shape.rank());
+
+ if (c_shape.rank() == 1)
+ {
+ if (c_shape.dim(0).value() != t_shape.dim(0).value())
+ INTERNAL_EXN("CircleSelect condition dim(0) should match with t.dim(0)");
+ }
+ }
+
+ return loco::NodeShape{t_shape};
+ }
- auto output_shape = broadcast_shape(x_shape, y_shape);
+ loco::NodeShape visit(const luci::CircleSelectV2 *node) final
+ {
+ auto c_shape = loco::shape_get(node->condition()).as<loco::TensorShape>();
+ auto t_shape = loco::shape_get(node->t()).as<loco::TensorShape>();
+ auto e_shape = loco::shape_get(node->e()).as<loco::TensorShape>();
+
+ // validate ability to broadcast shapes to each other
+ auto b_shape = broadcast_shape(broadcast_shape(c_shape, t_shape), e_shape);
+ return loco::NodeShape{b_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleShape *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(1);
+ output_shape.dim(0) = input_shape.rank();
return loco::NodeShape{output_shape};
}
- // TODO CircleTanh
+ loco::NodeShape visit(const luci::CircleSin *node) final { return use_x(node); }
- /// @brief Returns output shape of transpose. Use loco::ConstGen and luci::CircleConst for ConstT.
- template <class ConstT>
- loco::TensorShape output_shape_of_transpose(loco::TensorShape input_shape,
- const ConstT *perm_node)
+ loco::NodeShape visit(const luci::CircleSlice *node) final
{
+ const loco::DataType S32 = loco::DataType::S32;
+ const loco::DataType S64 = loco::DataType::S64;
+
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+
+ auto const_begin = loco::must_cast<luci::CircleConst *>(node->begin());
+ auto const_size = loco::must_cast<luci::CircleConst *>(node->size());
+
loco::TensorShape output_shape;
- output_shape.rank(input_shape.rank());
+ std::vector<int64_t> vect_begin; // to hold both S32/S64, we use int64_t
+ std::vector<int64_t> vect_size;
- assert(perm_node->dtype() == loco::DataType::S32);
- assert(input_shape.rank() == perm_node->template size<loco::DataType::S32>());
+ if (const_begin->dtype() == S32)
+ vect_begin = vector_from_constant<S32>(const_begin);
+ else if (const_begin->dtype() == S64)
+ vect_begin = vector_from_constant<S64>(const_begin);
+ else
+ LUCI_ASSERT(false, "Only support int32/int64 for begin()");
- for (uint32_t out_axis = 0; out_axis < output_shape.rank(); out_axis++)
+ if (const_size->dtype() == S32)
+ vect_size = vector_from_constant<S32>(const_size);
+ else if (const_size->dtype() == S64)
+ vect_size = vector_from_constant<S64>(const_size);
+ else
+ LUCI_ASSERT(false, "Only support int32/int64 for size()");
+
+ assert(input_shape.rank() == vect_begin.size());
+ assert(input_shape.rank() == vect_size.size());
+
+ output_shape.rank(vect_begin.size());
+ for (uint32_t idx = 0; idx < vect_begin.size(); ++idx)
{
- auto in_axis = perm_node->template at<loco::DataType::S32>(out_axis);
- output_shape.dim(out_axis) = input_shape.dim(in_axis);
+ auto size = vect_size.at(idx);
+ if (size == -1)
+ {
+ size = input_shape.dim(idx).value() - vect_begin.at(idx);
+ }
+ output_shape.dim(idx) = size;
}
- return output_shape;
+ return loco::NodeShape{output_shape};
}
- loco::NodeShape visit(const luci::CircleTranspose *node) final
+ loco::NodeShape visit(const luci::CircleSoftmax *node) final { return use_logits(node); }
+
+ loco::NodeShape visit(const luci::CircleSpaceToBatchND *node) final
{
- auto input_shape = loco::shape_get(node->a()).as<loco::TensorShape>();
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ // Support only input rank is 3 and 4
+ assert(input_shape.rank() == 3 || input_shape.rank() == 4);
+
+ // Only support block_shape() with S32 type CircleConst for now
+ auto const_block_shape = loco::must_cast<luci::CircleConst *>(node->block_shape());
+ LUCI_ASSERT(const_block_shape->dtype() == S32, "Only support int32 block_shape");
+
+ // Only support paddings() with S32 type CircleConst for now
+ auto const_paddings = loco::must_cast<luci::CircleConst *>(node->paddings());
+ LUCI_ASSERT(const_paddings->dtype() == S32, "Only support int32 paddings");
+
+ auto const_block_shape_shape = loco::shape_get(const_block_shape).as<loco::TensorShape>();
+ auto const_paddings_shape = loco::shape_get(const_paddings).as<loco::TensorShape>();
+ assert(const_block_shape_shape.rank() == 1);
+ assert(const_paddings_shape.rank() == 2);
+
+ int32_t input_spatial_dim = input_shape.rank() - 2;
+ assert(const_block_shape_shape.dim(0) == input_spatial_dim);
+ assert(const_paddings_shape.dim(0) == input_spatial_dim);
+ assert(const_paddings_shape.dim(1) == 2);
+
+ // Check all values of block_shape >= 1
+ uint32_t ele_count = const_block_shape->size<S32>();
+ for (uint32_t e = 0; e < ele_count; ++e)
+ {
+ auto val = const_block_shape->at<S32>(e);
+ if (val < 1)
+ {
+ INTERNAL_EXN_V("All values of block_shape >= 1: ", e);
+ }
+ }
+
+ loco::TensorShape shape_output;
+
+ shape_output.rank(input_shape.rank());
+
+ int32_t output_batch_size = input_shape.dim(0).value();
+ for (int32_t dim = 0; dim < input_spatial_dim; ++dim)
+ {
+ int dim_size = input_shape.dim(dim + 1).value();
+ dim_size += const_paddings->at<S32>(dim * 2);
+ dim_size += const_paddings->at<S32>(dim * 2 + 1);
+ shape_output.dim(dim + 1) = dim_size / const_block_shape->at<S32>(dim);
+
+ assert(dim_size % const_block_shape->at<S32>(dim) == 0);
+ output_batch_size = output_batch_size * const_block_shape->at<S32>(dim);
+ }
+ shape_output.dim(0) = output_batch_size;
+ shape_output.dim(input_shape.rank() - 1) = input_shape.dim(input_shape.rank() - 1);
+
+ return loco::NodeShape{shape_output};
+ }
+
+ loco::NodeShape visit(const luci::CircleSpaceToDepth *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ LUCI_ASSERT(input_shape.rank() == 4, "Only input rank 4 is supported");
+
+ // Only data format NHWC is supported
+ int32_t height = input_shape.dim(1).value();
+ int32_t width = input_shape.dim(2).value();
+ int32_t depth = input_shape.dim(3).value();
+
+ int block_size = node->block_size();
+
+ if (block_size < 2)
+ INTERNAL_EXN("Block size must be >= 2");
+
+ if ((height % block_size) || (width % block_size))
+ {
+ INTERNAL_EXN("The input tensor's height and width must be divisible by block_size");
+ }
+
+ loco::TensorShape output_shape;
+ output_shape.rank(4);
+
+ output_shape.dim(0) = input_shape.dim(0).value();
+ output_shape.dim(1) = height / block_size;
+ output_shape.dim(2) = width / block_size;
+ output_shape.dim(3) = block_size * block_size * depth;
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSparseToDense *node) final
+ {
+ loco::TensorShape shape;
+ {
+ LUCI_ASSERT(node->output_shape(), "dims input should not be nullptr");
+
+ auto output_shape_node = dynamic_cast<luci::CircleConst *>(node->output_shape());
+ if (output_shape_node != nullptr)
+ {
+ // Only support node with S32
+ LUCI_ASSERT(output_shape_node->dtype() == loco::DataType::S32,
+ "Only support int32 CircleConst");
+
+ if (output_shape_node->rank() != 1)
+ INTERNAL_EXN_V("Only support rank 1 CircleConst",
+ oops::to_uint32(output_shape_node->rank()));
+
+ shape.rank(output_shape_node->dim(0).value());
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ shape.dim(axis) = output_shape_node->at<loco::DataType::S32>(axis);
+ }
+ }
+ else
+ {
+ shape = own_shape(node);
+ }
+ }
+
+ return loco::NodeShape{shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSplit *node) final
+ {
+ // We'll set Split output as same as input so that SplitOut can handle it's own shape
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSplitV *node) final
+ {
+ // We'll set SplitV output as same as input so that SplitOut can handle it's own shape
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSqrt *node) final { return use_x(node); }
- auto canon_perm = dynamic_cast<loco::ConstGen *>(node->perm());
- auto circle_perm = dynamic_cast<luci::CircleConst *>(node->perm());
+ loco::NodeShape visit(const luci::CircleSquare *node) final { return use_x(node); }
- if (canon_perm)
+ loco::NodeShape visit(const luci::CircleSquaredDifference *node) final
+ {
+ return broadcast_xy(node);
+ }
+
+ loco::NodeShape visit(const luci::CircleStridedSlice *node) final
+ {
+ auto begin_node = dynamic_cast<luci::CircleConst *>(node->begin());
+ auto end_node = dynamic_cast<luci::CircleConst *>(node->end());
+ auto strides_node = dynamic_cast<luci::CircleConst *>(node->strides());
+
+ if (begin_node == nullptr || end_node == nullptr || strides_node == nullptr)
{
- return loco::NodeShape{output_shape_of_transpose(input_shape, canon_perm)};
+ return use_own(node);
}
- else if (circle_perm)
+
+ loco::TensorShape shape = infer_output_shape(node);
+ return loco::NodeShape{shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSqueeze *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+
+ // TODO input shape may be unknown before runtime
+ std::vector<bool> do_squeeze(input_shape.rank(), false);
+ uint32_t num_squeezed = 0;
+
+ if (!node->squeeze_dims().empty())
{
- return loco::NodeShape{output_shape_of_transpose(input_shape, circle_perm)};
+ // SqueezeDims not empty, squeeze only dims specified
+ for (int32_t raw_dim : node->squeeze_dims())
+ {
+ int32_t dim = raw_dim < 0 ? raw_dim + input_shape.rank() : raw_dim;
+
+ if (dim < 0 || static_cast<uint32_t>(dim) >= input_shape.rank() ||
+ input_shape.dim(dim).value() != 1)
+ {
+ INTERNAL_EXN("invalid dimention specified to Squeeze");
+ }
+
+ if (!do_squeeze[dim])
+ ++num_squeezed;
+ do_squeeze[dim] = true;
+ }
}
else
- INTERNAL_EXN("perm of CircleTranspose should be either ConstGen or CircleConst");
+ {
+ // SqueezeDims empty, squeeze any dims with size == 1
+ for (uint32_t dim = 0; dim < input_shape.rank(); ++dim)
+ {
+ if (input_shape.dim(dim) == 1)
+ {
+ do_squeeze[dim] = true;
+ ++num_squeezed;
+ }
+ }
+ }
+
+ loco::TensorShape output_shape;
+ output_shape.rank(input_shape.rank() - num_squeezed);
+
+ for (uint32_t in_dim = 0, out_dim = 0; in_dim < input_shape.rank(); ++in_dim)
+ {
+ if (!do_squeeze[in_dim])
+ {
+ output_shape.dim(out_dim++) = input_shape.dim(in_dim);
+ }
+ }
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSub *node) final { return broadcast_xy(node); }
+
+ loco::NodeShape visit(const luci::CircleSum *node) final
+ {
+ auto output_shape = infer_reducer(node->input(), node->reduction_indices(), node->keep_dims());
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleTanh *node) final { return use_x(node); }
+
+ loco::NodeShape visit(const luci::CircleTile *node) final
+ {
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto multiples = loco::must_cast<luci::CircleConst *>(node->multiples());
+
+ // TODO support non-const case
+ // TODO support S64 type
+ LUCI_ASSERT(multiples->dtype() == S32, "Only support int32 multiples");
+ LUCI_ASSERT(multiples->rank() == 1, "multiples should be rank 1")
+
+ uint32_t n = multiples->dim(0).value();
+
+ LUCI_ASSERT(n == input_shape.rank(), "length of multiples should be the same with input rank");
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(input_shape.rank());
+ for (uint32_t ni = 0; ni < n; ++ni)
+ {
+ int32_t multiple = multiples->at<S32>(ni);
+ output_shape.dim(ni) = input_shape.dim(ni).value() * static_cast<uint32_t>(multiple);
+ }
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleTopKV2 *node) final
+ {
+ // set shape of this node as same as input
+ const auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleTranspose *node) final
+ {
+ auto input_shape = loco::shape_get(node->a()).as<loco::TensorShape>();
+
+ auto perm_node = loco::must_cast<luci::CircleConst *>(node->perm());
+
+ loco::TensorShape output_shape;
+ output_shape.rank(input_shape.rank());
+
+ assert(perm_node->dtype() == loco::DataType::S32);
+ assert(input_shape.rank() == perm_node->template size<loco::DataType::S32>());
+
+ for (uint32_t out_axis = 0; out_axis < output_shape.rank(); out_axis++)
+ {
+ auto in_axis = perm_node->template at<loco::DataType::S32>(out_axis);
+ output_shape.dim(out_axis) = input_shape.dim(in_axis);
+ }
+
+ return output_shape;
}
loco::NodeShape visit(const luci::CircleTransposeConv *node) final
{
// TransposeConv's output shape is written in its 'inputSizes' argument
- auto input_sizes_const = dynamic_cast<luci::CircleConst *>(node->inputSizes());
- LUCI_ASSERT(input_sizes_const,
- "Only support when CircleTransposeConv's inputSizes is CircleConst")
+ auto input_sizes_const = loco::must_cast<luci::CircleConst *>(node->inputSizes());
+ // TODO support non-const type
LUCI_ASSERT(input_sizes_const->dtype() == loco::DataType::S32, "Only support S32 dtype")
LUCI_ASSERT(input_sizes_const->rank() == 1 && input_sizes_const->dim(0).value() == 4,
"Only support rank 1 with 4 entries")
@@ -855,7 +1836,115 @@ public:
return loco::NodeShape{shape};
}
+ loco::NodeShape visit(const luci::CircleUnpack *node) final
+ {
+ // CircleUnpack provides list(array) of Tensors which has one less dimension of the input
+ // We'll set shape of CircleUnpack to shape of actual outputs
+ // TODO fix this if any problem rises
+ auto value_shape = loco::shape_get(node->value()).as<loco::TensorShape>();
+
+ auto axis = node->axis();
+ auto num = node->num();
+ auto rank = static_cast<int32_t>(value_shape.rank());
+
+ if (rank == 0)
+ {
+ // Unknown shape
+ return use_own(node);
+ }
+
+ LUCI_ASSERT(-rank <= axis && axis < rank, "Axis is out of range");
+
+ if (axis < 0)
+ axis += rank;
+
+ LUCI_ASSERT(num == static_cast<int32_t>(value_shape.dim(axis).value()),
+ "num, axis maybe incorrect");
+
+ loco::TensorShape output_shape;
+ output_shape.rank(rank - 1);
+
+ for (int32_t i = 0, o = 0; i < rank; ++i)
+ {
+ if (i != axis)
+ output_shape.dim(o++) = value_shape.dim(i);
+ }
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleWhere *node) final { return use_own(node); }
+
+ loco::NodeShape visit(const luci::CircleWhile *node) final
+ {
+ // Shape of CircleWhile is not used. Just use input 0
+ assert(node->arity() > 0);
+ const auto input_shape = loco::shape_get(node->input(0)).as<loco::TensorShape>();
+ return loco::NodeShape{input_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleZerosLike *node) final
+ {
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+
+ return loco::NodeShape{input_shape};
+ }
+
// Circle Only
+ loco::NodeShape visit(const luci::CircleBCQFullyConnected *node) final
+ {
+ loco::TensorShape out_shape;
+
+ auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
+ auto weights_clusters = loco::must_cast<luci::CircleConst *>(node->weights_clusters());
+
+ LUCI_ASSERT(input_shape.rank() == 2, "Input rank of BCQFullyConnected should be 2");
+
+ int32_t qbits_sum = 0;
+ for (uint32_t i = 0; i < weights_clusters->dim(0).value(); ++i)
+ {
+ qbits_sum += weights_clusters->at<loco::DataType::S32>(i * 2 + 1);
+ }
+
+ out_shape.rank(2);
+ out_shape.dim(0) = qbits_sum;
+ out_shape.dim(1) = input_shape.dim(1);
+
+ return loco::NodeShape{out_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleBCQGather *node) final
+ {
+ loco::TensorShape input_shape;
+ loco::TensorShape output_shape;
+
+ const auto input_binary_shape = loco::shape_get(node->input_binary()).as<loco::TensorShape>();
+ const auto indices_shape = loco::shape_get(node->indices()).as<loco::TensorShape>();
+ auto axis = node->axis();
+
+ auto input_clusters = loco::must_cast<luci::CircleConst *>(node->input_clusters());
+ auto qbits_sum = 0;
+ for (uint32_t i = 0; i < input_clusters->dim(0).value(); ++i)
+ {
+ qbits_sum += input_clusters->at<loco::DataType::S32>(i * 2 + 1);
+ }
+
+ input_shape.rank(2);
+ input_shape.dim(0) = qbits_sum;
+ input_shape.dim(1) = input_binary_shape.dim(1).value() * 32;
+
+ output_shape.rank(input_shape.rank() - 1 + indices_shape.rank());
+ int32_t outdim_index = 0;
+ for (int32_t i = 0; i < axis; ++i)
+ output_shape.dim(outdim_index++) = input_shape.dim(i);
+ for (uint32_t i = 0; i < indices_shape.rank(); ++i)
+ output_shape.dim(outdim_index++) = indices_shape.dim(i);
+ for (uint32_t i = axis + 1; i < input_shape.rank(); ++i)
+ output_shape.dim(outdim_index++) = input_shape.dim(i);
+
+ return loco::NodeShape{output_shape};
+ }
+
loco::NodeShape visit(const luci::CircleInstanceNorm *node) final
{
auto input_shape = loco::shape_get(node->input()).as<loco::TensorShape>();
@@ -877,9 +1966,230 @@ public:
loco::NodeShape visit(const luci::CircleOutput *node) final
{
- auto from_shape = loco::shape_get(node->from()).as<loco::TensorShape>();
+ auto graph_outputs = node->graph()->outputs();
+ auto graph_output = graph_outputs->at(node->index());
+ auto output_shape = graph_output->shape();
+
+ return loco::NodeShape{*output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleOutputDummy *node) final { return use_own(node); }
+
+ loco::NodeShape visit(const luci::CircleOutputExclude *node) final { return use_own(node); }
+
+ loco::NodeShape visit(const luci::CircleCustomOut *node) final { return use_own(node); }
+
+ loco::NodeShape visit(const luci::CircleIfOut *node) final
+ {
+ /**
+ * @note IF operator type and shape are that of the "then" and "else"
+ * Graph Outputs.
+ */
+ auto circle_if = dynamic_cast<const luci::CircleIf *>(node->input());
+ if (circle_if == nullptr)
+ {
+ INTERNAL_EXN("CircleIf IR is not configured correctly");
+ }
+
+ auto index = node->index();
+ auto then_graph = circle_if->then_graph();
+ auto else_graph = circle_if->else_graph();
+ assert(then_graph != nullptr);
+ assert(else_graph != nullptr);
+
+ // shape and type are assumed to be same
+ // these are checked at post_import_graph() in Import
+ auto then_outputs = loco::output_nodes(then_graph);
+ auto else_outputs = loco::output_nodes(else_graph);
+ assert(then_outputs.size() == else_outputs.size());
+ assert(index < static_cast<int32_t>(then_outputs.size()));
+
+ auto then_out = loco::must_cast<luci::CircleOutput *>(then_outputs.at(index));
+ auto else_out = loco::must_cast<luci::CircleOutput *>(else_outputs.at(index));
+
+ auto then_graph_outputs = then_graph->outputs(); // loco::GraphOutput items
+ auto else_graph_outputs = else_graph->outputs();
+ assert(then_graph_outputs->size() == else_graph_outputs->size());
+
+ auto then_graph_output = then_graph_outputs->at(then_out->index());
+ auto else_graph_output = else_graph_outputs->at(else_out->index());
+ (void)else_graph_output; // make compiler happy for unused variable warnings
+ assert(*then_graph_output->shape() == *else_graph_output->shape());
+
+ return loco::NodeShape{*then_graph_output->shape()};
+ }
+
+ loco::NodeShape visit(const luci::CircleSplitOut *node) final
+ {
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto split = dynamic_cast<const luci::CircleSplit *>(node->input());
+ if (split == nullptr)
+ INTERNAL_EXN("CircleSplit IR is not configured correctly");
+
+ loco::NodeShape unknown;
+
+ auto split_shape = loco::shape_get(split).as<loco::TensorShape>();
+
+ auto split_dim = dynamic_cast<const luci::CircleConst *>(split->split_dim());
+ if (split_dim == nullptr)
+ return unknown; // we need CircleConst for split_dim
+ LUCI_ASSERT(split_dim->dtype() == S32, "Only support int32 for split_dim");
+
+ assert(split_dim->size<S32>() == 1);
+ auto split_dim_axis = split_dim->at<S32>(0);
+ if (split_dim_axis < 0)
+ split_dim_axis += split_shape.rank();
+
+ auto split_dim_value = split_shape.dim(split_dim_axis).value();
+ assert(split_dim_value % split->num_split() == 0);
+ const int split_depth = split_dim_value / split->num_split();
+
+ loco::TensorShape output_shape = split_shape;
+
+ // All shapes are equally same
+ output_shape.dim(split_dim_axis) = loco::Dimension(split_depth);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleSplitVOut *node) final
+ {
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto split = dynamic_cast<const luci::CircleSplitV *>(node->input());
+ if (split == nullptr)
+ INTERNAL_EXN("CircleSplit IR is not configured correctly");
+
+ loco::NodeShape unknown;
+
+ auto split_shape = loco::shape_get(split).as<loco::TensorShape>();
+
+ auto size_splits = dynamic_cast<const luci::CircleConst *>(split->size_splits());
+ if (size_splits == nullptr)
+ return unknown; // we need CircleConst for size_splits
+ LUCI_ASSERT(size_splits->dtype() == S32, "Only support int32 for size_splits");
+
+ auto split_dim = dynamic_cast<const luci::CircleConst *>(split->split_dim());
+ if (split_dim == nullptr)
+ return unknown; // we need CircleConst for split_dim
+ LUCI_ASSERT(split_dim->dtype() == S32, "Only support int32 for split_dim");
+
+ // fetch axis
+ assert(split_dim->size<S32>() == 1);
+ auto split_dim_axis = split_dim->at<S32>(0);
+ if (split_dim_axis < 0)
+ split_dim_axis += split_shape.rank();
+
+ // interpret size_splits values
+ int32_t size_splits_count = static_cast<int32_t>(size_splits->size<S32>());
+ assert(size_splits_count == split->num_split());
+
+ int64_t minus_one_count = 0, size_splits_sum = 0;
+ for (int32_t idx = 0; idx < size_splits_count; ++idx)
+ {
+ auto size = size_splits->at<S32>(idx);
+ assert(size >= -1);
+ if (size == -1)
+ ++minus_one_count;
+ else
+ size_splits_sum += size;
+ }
+ if (minus_one_count > 1)
+ INTERNAL_EXN("CircleSplitV size_splits has more than two -1 values");
+
+ // calcuate this SplitVOut shape
+ auto input_size = split_shape.dim(split_dim_axis).value();
+ assert(size_splits_sum <= input_size);
+
+ auto index_this = node->index();
+ assert(0 <= index_this && index_this < split->num_split());
+ auto split_depth = size_splits->at<S32>(index_this);
+ if (split_depth == -1)
+ split_depth = input_size - size_splits_sum;
+
+ loco::TensorShape output_shape = split_shape;
+
+ output_shape.dim(split_dim_axis) = loco::Dimension(split_depth);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleTopKV2Out *node) final
+ {
+ const loco::DataType S32 = loco::DataType::S32;
+
+ auto topkv2 = dynamic_cast<const luci::CircleTopKV2 *>(node->input());
+ if (topkv2 == nullptr)
+ INTERNAL_EXN("CircleSplit IR is not configured correctly");
+
+ // shape of topkv2 is same as topkv2->input()
+ auto input_shape = loco::shape_get(topkv2).as<loco::TensorShape>();
+
+ auto node_k = loco::must_cast<const luci::CircleConst *>(topkv2->k());
+ LUCI_ASSERT(node_k->dtype() == S32, "Only support Int32");
+ assert(node_k->size<S32>() == 1);
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(input_shape.rank());
+ for (uint32_t idx = 0; idx < input_shape.rank() - 1; ++idx)
+ {
+ output_shape.dim(idx) = input_shape.dim(idx);
+ }
+ output_shape.dim(input_shape.rank() - 1) = node_k->at<S32>(0);
+
+ return loco::NodeShape{output_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleUnpackOut *node) final
+ {
+ auto unpack = dynamic_cast<const luci::CircleUnpack *>(node->input());
+ if (unpack == nullptr)
+ {
+ INTERNAL_EXN("CircleUnpack IR is not configured correctly");
+ }
+
+ auto unpack_shape = loco::shape_get(unpack).as<loco::TensorShape>();
+
+ return loco::NodeShape{unpack_shape};
+ }
+
+ loco::NodeShape visit(const luci::CircleWhileOut *node) final
+ {
+ /**
+ * @note WHILE operator's shape is the same with the "cond"
+ * Graph input.
+ */
+ auto circle_while = dynamic_cast<const luci::CircleWhile *>(node->input());
+ if (circle_while == nullptr)
+ {
+ INTERNAL_EXN("CircleWhile IR is not configured correctly");
+ }
+
+ auto index = node->index();
+ auto cond_graph = circle_while->cond_graph();
+ assert(cond_graph != nullptr);
+
+ // Assumption: the index of CircleWhileOut matches with the index of input nodes returned by
+ // loco::input_nodes
+ auto cond_inputs = loco::input_nodes(cond_graph);
+ auto cond_in = loco::must_cast<luci::CircleInput *>(cond_inputs.at(index));
+
+ auto cond_graph_inputs = cond_graph->inputs();
+ auto cond_graph_input = cond_graph_inputs->at(cond_in->index());
- return loco::NodeShape{from_shape};
+ auto cond_graph_input_shape = *cond_graph_input->shape();
+ auto this_shape = own_shape(node);
+
+ if (!(this_shape == cond_graph_input_shape))
+ {
+ LOGGER(l);
+ WARN(l) << "Warning: CircleWhileOut '" << node->name() << "' shape mispatch " << this_shape
+ << " vs " << cond_graph_input_shape;
+ }
+
+ return loco::NodeShape{this_shape};
}
};
@@ -895,11 +2205,30 @@ bool CircleShapeInferenceRule::recognize(const loco::Dialect *d) const
bool CircleShapeInferenceRule::infer(const loco::Node *node, loco::NodeShape &shape) const
{
+ LOGGER(l);
+
assert(node->dialect() == CircleDialect::get());
- assert(dynamic_cast<const CircleNode *>(node) != nullptr);
ShapeInferenceAlgorithm alg;
- shape = dynamic_cast<const CircleNode *>(node)->accept(&alg);
+ auto circle_node = loco::must_cast<const CircleNode *>(node);
+
+ bool is_shape_undefined = (circle_node->shape_status() == ShapeStatus::UNDEFINED);
+ bool is_shape_none = (circle_node->shape_status() == ShapeStatus::NOSHAPE);
+ bool is_scalar = (circle_node->rank() == 0);
+
+ if (is_shape_undefined)
+ shape = circle_node->accept(&alg);
+ else
+ {
+ if (is_shape_none || is_scalar)
+ shape = own_shape(circle_node);
+ else
+ shape = circle_node->accept(&alg);
+ }
+
+ VERBOSE(l, 1) << "[luci] shape: " << circle_node->name();
+ VERBOSE(l, 1) << " own_shape: " << own_shape(circle_node)
+ << " -> infer: " << shape.as<loco::TensorShape>();
return true;
}
diff --git a/compiler/luci/service/src/CircleShapeInferenceRule.test.cpp b/compiler/luci/service/src/CircleShapeInferenceRule.test.cpp
index 0374251a0..ac27db3bd 100644
--- a/compiler/luci/service/src/CircleShapeInferenceRule.test.cpp
+++ b/compiler/luci/service/src/CircleShapeInferenceRule.test.cpp
@@ -26,6 +26,8 @@
#include <loco/Service/CanonicalShapeInferenceRule.h>
#include <loco/Service/MultiDialectShapeInferenceRule.h>
+#include <oops/InternalExn.h>
+
#include <gtest/gtest.h>
#include <memory>
@@ -51,38 +53,39 @@ TEST(CircleShapeInferenceRuleTest, minimal_with_CircleRelu)
{
// Create a simple network
luci::test::TestGraph graph;
- auto tfl_node = graph.append<luci::CircleRelu>(graph.pull);
- graph.complete(tfl_node);
+ auto relu_node = graph.append<luci::CircleRelu>(graph.input_node);
+ graph.complete(relu_node);
// set shape
{
- graph.pull->rank(2);
- graph.pull->dim(0) = 3;
- graph.pull->dim(1) = 4;
+ graph.input_node->rank(2);
+ graph.input_node->dim(0) = 3;
+ graph.input_node->dim(1) = 4;
+
+ graph.output_node->rank(2);
+ graph.output_node->dim(0) = 3;
+ graph.output_node->dim(1) = 4;
+
+ luci::test::graph_input_shape(graph.input_node);
+ luci::test::graph_output_shape(graph.output_node);
}
// pre-check
- ASSERT_FALSE(loco::shape_known(tfl_node));
+ ASSERT_FALSE(loco::shape_known(relu_node));
// shape inference
- luci::CircleShapeInferenceRule tfl_rule;
- loco::CanonicalShapeInferenceRule canonical_rule;
- loco::MultiDialectShapeInferenceRule rules;
-
- rules.bind(loco::CanonicalDialect::get(), &canonical_rule)
- .bind(luci::CircleDialect::get(), &tfl_rule);
-
- loco::apply(&rules).to(graph.g.get());
+ while (shape_pass(graph.graph()) == true)
+ ;
// Verify
{
- ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
+ ASSERT_TRUE(loco::shape_known(relu_node));
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(relu_node).domain());
- auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 2);
- ASSERT_EQ(shape.dim(0), 3);
- ASSERT_EQ(shape.dim(1), 4);
+ auto shape = loco::shape_get(relu_node).as<loco::TensorShape>();
+ ASSERT_EQ(2, shape.rank());
+ ASSERT_EQ(3, shape.dim(0));
+ ASSERT_EQ(4, shape.dim(1));
}
}
@@ -91,85 +94,92 @@ TEST(CircleShapeInferenceRuleTest, minimal_with_CircleRelu)
TEST(CircleShapeInferenceRuleTest, avgpool2d_valid)
{
luci::test::TestGraph graph;
- auto tfl_node = graph.append<luci::CircleAveragePool2D>(graph.pull);
+ auto avg_node = graph.append<luci::CircleAveragePool2D>(graph.input_node);
graph.complete();
- auto pull = graph.pull;
+ auto input_node = graph.input_node;
{
- pull->shape({1, 4, 3, 1});
+ input_node->shape({1, 4, 3, 1});
+ luci::test::graph_input_shape(input_node);
+ }
+ auto output_node = graph.output_node;
+ {
+ output_node->shape({1, 2, 1, 1});
+ luci::test::graph_output_shape(output_node);
}
// setting CircleAveragePool2D
{
- tfl_node->filter()->h(2);
- tfl_node->filter()->w(2);
- tfl_node->stride()->h(2);
- tfl_node->stride()->w(2);
- tfl_node->fusedActivationFunction(luci::FusedActFunc::NONE);
- tfl_node->padding(luci::Padding::VALID);
+ avg_node->filter()->h(2);
+ avg_node->filter()->w(2);
+ avg_node->stride()->h(2);
+ avg_node->stride()->w(2);
+ avg_node->fusedActivationFunction(luci::FusedActFunc::NONE);
+ avg_node->padding(luci::Padding::VALID);
}
- ASSERT_FALSE(loco::shape_known(tfl_node));
+ ASSERT_FALSE(loco::shape_known(avg_node));
// shape inference
- luci::CircleShapeInferenceRule tfl_rule;
- loco::CanonicalShapeInferenceRule canonical_rule;
- loco::MultiDialectShapeInferenceRule rules;
-
- rules.bind(loco::CanonicalDialect::get(), &canonical_rule)
- .bind(luci::CircleDialect::get(), &tfl_rule);
-
- loco::apply(&rules).to(graph.g.get());
+ while (shape_pass(graph.graph()) == true)
+ ;
// Verify
{
- ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
-
- auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 4);
- ASSERT_EQ(shape.dim(0).value(), 1);
- ASSERT_EQ(shape.dim(1).value(), 2);
- ASSERT_EQ(shape.dim(2).value(), 1);
- ASSERT_EQ(shape.dim(3).value(), 1);
+ ASSERT_TRUE(loco::shape_known(avg_node));
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(avg_node).domain());
+
+ auto shape = loco::shape_get(avg_node).as<loco::TensorShape>();
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0).value());
+ ASSERT_EQ(2, shape.dim(1).value());
+ ASSERT_EQ(1, shape.dim(2).value());
+ ASSERT_EQ(1, shape.dim(3).value());
}
}
TEST(CircleShapeInferenceRuleTest, avgpool2d_same)
{
luci::test::TestGraph graph;
- auto tfl_node = graph.append<luci::CircleAveragePool2D>(graph.pull);
+ auto avg_node = graph.append<luci::CircleAveragePool2D>(graph.input_node);
graph.complete();
- auto pull = graph.pull;
+ auto input_node = graph.input_node;
{
- pull->shape({1, 4, 3, 1});
+ input_node->shape({1, 4, 3, 1});
+ luci::test::graph_input_shape(input_node);
+ }
+ auto output_node = graph.output_node;
+ {
+ output_node->shape({1, 2, 2, 1});
+ luci::test::graph_output_shape(output_node);
}
// setting CircleAveragePool2D
{
- tfl_node->filter()->h(2);
- tfl_node->filter()->w(2);
- tfl_node->stride()->h(2);
- tfl_node->stride()->w(2);
- tfl_node->fusedActivationFunction(luci::FusedActFunc::NONE);
- tfl_node->padding(luci::Padding::SAME);
+ avg_node->filter()->h(2);
+ avg_node->filter()->w(2);
+ avg_node->stride()->h(2);
+ avg_node->stride()->w(2);
+ avg_node->fusedActivationFunction(luci::FusedActFunc::NONE);
+ avg_node->padding(luci::Padding::SAME);
}
- ASSERT_FALSE(loco::shape_known(tfl_node));
+ ASSERT_FALSE(loco::shape_known(avg_node));
// shape inference
- shape_pass(graph.g.get());
+ while (shape_pass(graph.graph()) == true)
+ ;
// Verify
{
- ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
-
- auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 4);
- ASSERT_EQ(shape.dim(0).value(), 1);
- ASSERT_EQ(shape.dim(1).value(), 2);
- ASSERT_EQ(shape.dim(2).value(), 2);
- ASSERT_EQ(shape.dim(3).value(), 1);
+ ASSERT_TRUE(loco::shape_known(avg_node));
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(avg_node).domain());
+
+ auto shape = loco::shape_get(avg_node).as<loco::TensorShape>();
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0).value());
+ ASSERT_EQ(2, shape.dim(1).value());
+ ASSERT_EQ(2, shape.dim(2).value());
+ ASSERT_EQ(1, shape.dim(3).value());
}
}
@@ -186,47 +196,51 @@ TEST(CircleShapeInferenceRuleTest, TFAdd_shapeinf_different)
{
auto g = loco::make_graph();
- auto x_node = g->nodes()->create<loco::Pull>();
+ auto x_node = g->nodes()->create<luci::CircleInput>();
{
x_node->rank(3);
x_node->dim(0) = 2;
x_node->dim(1) = 1;
x_node->dim(2) = 5;
}
- auto y_node = g->nodes()->create<loco::Pull>();
+ auto y_node = g->nodes()->create<luci::CircleInput>();
{
y_node->rank(2);
y_node->dim(0) = 3;
y_node->dim(1) = 5;
}
- auto tfl_node = g->nodes()->create<luci::CircleAdd>();
+ auto add_node = g->nodes()->create<luci::CircleAdd>();
{
- tfl_node->x(x_node);
- tfl_node->y(y_node);
+ add_node->x(x_node);
+ add_node->y(y_node);
}
- auto push_node = g->nodes()->create<loco::Push>();
+ auto output_node = g->nodes()->create<luci::CircleOutput>();
{
- push_node->from(tfl_node);
+ output_node->from(add_node);
}
auto x_input = g->inputs()->create();
{
x_input->name("x");
- loco::link(x_input, x_node);
+ luci::link(x_input, x_node);
}
auto y_input = g->inputs()->create();
{
y_input->name("y");
- loco::link(y_input, y_node);
+ luci::link(y_input, y_node);
}
auto output = g->outputs()->create();
{
output->name("output");
- loco::link(output, push_node);
+ luci::link(output, output_node);
}
+ luci::test::graph_input_shape(x_node);
+ luci::test::graph_input_shape(y_node);
+ luci::test::graph_output_shape(output_node);
+
// pre-check
- ASSERT_FALSE(loco::shape_known(tfl_node));
+ ASSERT_FALSE(loco::shape_known(add_node));
// shape inference
while (shape_pass(g.get()) == true)
@@ -234,14 +248,14 @@ TEST(CircleShapeInferenceRuleTest, TFAdd_shapeinf_different)
// Verify
{
- ASSERT_TRUE(loco::shape_known(tfl_node));
- ASSERT_EQ(loco::shape_get(tfl_node).domain(), loco::Domain::Tensor);
-
- auto shape = loco::shape_get(tfl_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 3);
- ASSERT_EQ(shape.dim(0), 2);
- ASSERT_EQ(shape.dim(1), 3);
- ASSERT_EQ(shape.dim(2), 5);
+ ASSERT_TRUE(loco::shape_known(add_node));
+ ASSERT_EQ(loco::Domain::Tensor, loco::shape_get(add_node).domain());
+
+ auto shape = loco::shape_get(add_node).as<loco::TensorShape>();
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(2, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ ASSERT_EQ(5, shape.dim(2));
}
}
@@ -249,10 +263,10 @@ TEST(CircleShapeInferenceRuleTest, CircleTranspose_simple)
{
luci::test::ExampleGraph<luci::test::ExampleGraphType::CircleTranspose> g;
- g.pull->rank(3);
- g.pull->dim(0) = 3;
- g.pull->dim(1) = 8;
- g.pull->dim(2) = 1;
+ g.input_node->rank(3);
+ g.input_node->dim(0) = 3;
+ g.input_node->dim(1) = 8;
+ g.input_node->dim(2) = 1;
g.const_perm->dtype(loco::DataType::S32);
g.const_perm->rank(1);
@@ -262,6 +276,9 @@ TEST(CircleShapeInferenceRuleTest, CircleTranspose_simple)
g.const_perm->at<loco::DataType::S32>(1) = 2;
g.const_perm->at<loco::DataType::S32>(2) = 0;
+ luci::test::graph_input_shape(g.input_node);
+ luci::test::graph_output_shape(g.output_node);
+
// pre-check
ASSERT_FALSE(loco::shape_known(g.transpose_node));
@@ -274,9 +291,336 @@ TEST(CircleShapeInferenceRuleTest, CircleTranspose_simple)
ASSERT_TRUE(loco::shape_known(g.transpose_node));
auto shape = loco::shape_get(g.transpose_node).as<loco::TensorShape>();
- ASSERT_EQ(shape.rank(), 3);
- ASSERT_EQ(shape.dim(0), 8);
- ASSERT_EQ(shape.dim(1), 1);
- ASSERT_EQ(shape.dim(2), 3);
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(8, shape.dim(0));
+ ASSERT_EQ(1, shape.dim(1));
+ ASSERT_EQ(3, shape.dim(2));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleSqueeze)
+{
+ luci::test::TestGraph graph;
+ auto squeeze_node = graph.append<luci::CircleSqueeze>(graph.input_node);
+ graph.complete();
+
+ auto input_node = graph.input_node;
+ {
+ input_node->shape({1, 4, 3, 1});
+ }
+ auto output_node = graph.output_node;
+ {
+ output_node->shape({4, 3, 1});
+ }
+
+ luci::test::graph_input_shape(input_node);
+ luci::test::graph_output_shape(output_node);
+
+ squeeze_node->squeeze_dims({0});
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(squeeze_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(squeeze_node));
+
+ auto shape = loco::shape_get(squeeze_node).as<loco::TensorShape>();
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(4, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ ASSERT_EQ(1, shape.dim(2));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleExpandDims)
+{
+ luci::test::TestGraph graph;
+ auto axis = graph.append<luci::CircleConst>();
+ axis->dtype(loco::DataType::S32);
+ axis->rank(0);
+ axis->size<loco::DataType::S32>(1);
+ axis->at<loco::DataType::S32>(0) = 1;
+
+ auto expand_dims = graph.append<luci::CircleExpandDims>(graph.input_node, axis);
+ graph.complete();
+
+ auto input_node = graph.input_node;
+ {
+ input_node->shape({4, 3});
+ }
+
+ auto output_node = graph.output_node;
+ {
+ output_node->from(expand_dims);
+ }
+
+ luci::test::graph_input_shape(input_node);
+ luci::test::graph_output_shape(output_node);
+
+ // shape inference
+ while (shape_pass(graph.graph()))
+ ;
+
+ // validation
+ {
+ ASSERT_TRUE(loco::shape_known(expand_dims));
+
+ auto shape = loco::shape_get(expand_dims).as<loco::TensorShape>();
+
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(4, shape.dim(0));
+ ASSERT_EQ(1, shape.dim(1));
+ ASSERT_EQ(3, shape.dim(2));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleSqueezeAll)
+{
+ luci::test::TestGraph graph;
+ auto squeeze_node = graph.append<luci::CircleSqueeze>(graph.input_node);
+ graph.complete();
+
+ auto input_node = graph.input_node;
+ {
+ input_node->shape({1, 4, 3, 1});
+ }
+ auto output_node = graph.output_node;
+ {
+ input_node->shape({4, 3});
+ }
+
+ luci::test::graph_input_shape(input_node);
+ luci::test::graph_output_shape(output_node);
+
+ squeeze_node->squeeze_dims({});
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(squeeze_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(squeeze_node));
+
+ auto shape = loco::shape_get(squeeze_node).as<loco::TensorShape>();
+ ASSERT_EQ(2, shape.rank());
+ ASSERT_EQ(4, shape.dim(0));
+ ASSERT_EQ(3, shape.dim(1));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleGatherNd_simple)
+{
+ luci::test::TestGraph graph;
+ auto indices_const = graph.append<luci::CircleConst>();
+ auto gather_nd_node = graph.append<luci::CircleGatherNd>(graph.input_node, indices_const);
+ graph.complete();
+
+ {
+ auto input_node = graph.input_node;
+ input_node->shape({1, 4, 4, 3});
+ luci::test::graph_input_shape(input_node);
+ }
+ {
+ auto output_node = graph.output_node;
+ output_node->shape({1, 2, 2, 3});
+ luci::test::graph_output_shape(output_node);
+ }
+
+ {
+ indices_const->shape({1, 2, 3});
+ }
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(gather_nd_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(gather_nd_node));
+
+ auto shape = loco::shape_get(gather_nd_node).as<loco::TensorShape>();
+ ASSERT_EQ(3, shape.rank());
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(2, shape.dim(1));
+ ASSERT_EQ(3, shape.dim(2));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleGatherNd_slices)
+{
+ luci::test::TestGraph graph;
+ auto indices_const = graph.append<luci::CircleConst>();
+ auto gather_nd_node = graph.append<luci::CircleGatherNd>(graph.input_node, indices_const);
+ graph.complete();
+
+ {
+ auto input_node = graph.input_node;
+ input_node->shape({1, 4, 4, 3});
+ luci::test::graph_input_shape(input_node);
+ }
+ {
+ auto output_node = graph.output_node;
+ output_node->shape({1, 2, 4, 4, 3});
+ luci::test::graph_output_shape(output_node);
+ }
+
+ {
+ indices_const->shape({1, 2, 1});
+ }
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(gather_nd_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(gather_nd_node));
+
+ auto shape = loco::shape_get(gather_nd_node).as<loco::TensorShape>();
+ ASSERT_EQ(5, shape.rank());
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(2, shape.dim(1));
+ ASSERT_EQ(4, shape.dim(2));
+ ASSERT_EQ(4, shape.dim(3));
+ ASSERT_EQ(3, shape.dim(4));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleGatherNd_NEG)
+{
+ luci::test::TestGraph graph;
+ auto indices_const = graph.append<luci::CircleConst>();
+ auto gather_nd_node = graph.append<luci::CircleGatherNd>(graph.input_node, indices_const);
+ graph.complete();
+
+ {
+ auto input_node = graph.input_node;
+ input_node->shape({1, 4, 4, 3});
+ luci::test::graph_input_shape(input_node);
+ }
+ {
+ // Does not matter, because test should fail anyway
+ auto output_node = graph.output_node;
+ output_node->shape({0, 0, 0});
+ luci::test::graph_output_shape(output_node);
+ }
+
+ {
+ indices_const->shape({1, 2, 5});
+ }
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(gather_nd_node));
+
+ // had to pack into lambda to check throw
+ auto lambda = [&]() {
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+ };
+
+ ASSERT_THROW(lambda(), oops::InternalExn);
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleResizeNearestNeighbor)
+{
+ luci::test::TestGraph graph;
+ auto size_const = graph.append<luci::CircleConst>();
+ size_const->dtype(loco::DataType::S32);
+ size_const->rank(1);
+ size_const->dim(0) = 2;
+ size_const->size<loco::DataType::S32>(2);
+ size_const->at<loco::DataType::S32>(0) = 16;
+ size_const->at<loco::DataType::S32>(1) = 16;
+ auto resize_node = graph.append<luci::CircleResizeNearestNeighbor>(graph.input_node, size_const);
+ graph.complete();
+
+ {
+ auto input_node = graph.input_node;
+ input_node->shape({1, 4, 4, 3});
+ luci::test::graph_input_shape(input_node);
+ }
+ {
+ auto output_node = graph.output_node;
+ output_node->from(resize_node);
+ luci::test::graph_output_shape(output_node);
+ }
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(resize_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(resize_node));
+
+ auto shape = loco::shape_get(resize_node).as<loco::TensorShape>();
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(16, shape.dim(1));
+ ASSERT_EQ(16, shape.dim(2));
+ ASSERT_EQ(3, shape.dim(3));
+ }
+}
+
+TEST(CircleShapeInferenceRuleTest, CircleResizeBilinear)
+{
+ luci::test::TestGraph graph;
+ auto size_const = graph.append<luci::CircleConst>();
+ size_const->dtype(loco::DataType::S32);
+ size_const->rank(1);
+ size_const->dim(0) = 2;
+ size_const->size<loco::DataType::S32>(2);
+ size_const->at<loco::DataType::S32>(0) = 16;
+ size_const->at<loco::DataType::S32>(1) = 16;
+ auto resize_node = graph.append<luci::CircleResizeBilinear>(graph.input_node, size_const);
+ graph.complete();
+
+ {
+ auto input_node = graph.input_node;
+ input_node->shape({1, 4, 4, 3});
+ luci::test::graph_input_shape(input_node);
+ }
+ {
+ auto output_node = graph.output_node;
+ output_node->from(resize_node);
+ luci::test::graph_output_shape(output_node);
+ }
+
+ // pre-check
+ ASSERT_FALSE(loco::shape_known(resize_node));
+
+ // shape inference
+ while (shape_pass(graph.graph()) == true)
+ ;
+
+ // Verify
+ {
+ ASSERT_TRUE(loco::shape_known(resize_node));
+
+ auto shape = loco::shape_get(resize_node).as<loco::TensorShape>();
+ ASSERT_EQ(4, shape.rank());
+ ASSERT_EQ(1, shape.dim(0));
+ ASSERT_EQ(16, shape.dim(1));
+ ASSERT_EQ(16, shape.dim(2));
+ ASSERT_EQ(3, shape.dim(3));
}
}
diff --git a/compiler/luci/service/src/CircleTypeInference.cpp b/compiler/luci/service/src/CircleTypeInference.cpp
index 669906159..aa8524a55 100644
--- a/compiler/luci/service/src/CircleTypeInference.cpp
+++ b/compiler/luci/service/src/CircleTypeInference.cpp
@@ -15,19 +15,13 @@
*/
#include "luci/Service/CircleTypeInference.h"
-#include "luci/Service/CircleTypeInferenceRule.h"
-#include <luci/IR/CircleDialect.h>
-
-#include <loco/IR/CanonicalNode.h>
-#include <loco/IR/CanonicalNodeVisitor.h>
-#include <loco/IR/CanonicalDialect.h>
+#include <loco.h>
#include <loco/Service/TypeInference.h>
+
#include <mio/circle/schema_generated.h>
#include <oops/InternalExn.h>
-#include <memory>
-#include <stdexcept>
#include <type_traits>
namespace
diff --git a/compiler/luci/service/src/CircleTypeInferenceRule.cpp b/compiler/luci/service/src/CircleTypeInferenceRule.cpp
index 21a28c1b6..de2ba3ea4 100644
--- a/compiler/luci/service/src/CircleTypeInferenceRule.cpp
+++ b/compiler/luci/service/src/CircleTypeInferenceRule.cpp
@@ -33,18 +33,45 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
loco::DataType visit(const luci::CircleAdd *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleAddN *node) final
+ {
+ auto dtype = loco::dtype_get(node->inputs(0));
+
+ for (uint32_t idx = 1; idx < node->arity(); ++idx)
+ {
+ auto dtype_idx = loco::dtype_get(node->inputs(idx));
+ if (dtype != dtype_idx)
+ {
+ INTERNAL_EXN_V("ADD_N dtype not same as the first input: ", idx);
+ }
+ }
+
+ return loco::dtype_get(node->inputs(0));
+ }
+
loco::DataType visit(const luci::CircleArgMax *node) final { return node->output_type(); }
+ loco::DataType visit(const luci::CircleArgMin *node) final { return node->output_type(); }
+
loco::DataType visit(const luci::CircleAveragePool2D *node) final
{
return loco::dtype_get(node->value());
}
+ loco::DataType visit(const luci::CircleBatchMatMul *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
loco::DataType visit(const luci::CircleBatchToSpaceND *node) final
{
return loco::dtype_get(node->input());
}
+ loco::DataType visit(const luci::CircleCast *node) final { return node->dtype(); }
+
+ loco::DataType visit(const luci::CircleCeil *node) final { return loco::dtype_get(node->x()); }
+
loco::DataType visit(const luci::CircleConcatenation *node) final
{
// TODO Support when CircleConcatenation has 0 input
@@ -65,6 +92,20 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
loco::DataType visit(const luci::CircleCos *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleCustom *node) final
+ {
+ if (node->custom_code() == "BatchMatMulV2")
+ {
+ return loco::dtype_get(node->inputs(0));
+ }
+ return node->dtype();
+ }
+
+ loco::DataType visit(const luci::CircleDepthToSpace *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleDepthwiseConv2D *node) final
{
return loco::dtype_get(node->input());
@@ -72,15 +113,94 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
loco::DataType visit(const luci::CircleDiv *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleElu *node) final
+ {
+ return loco::dtype_get(node->features());
+ }
+
loco::DataType visit(const luci::CircleEqual *) final { return loco::DataType::BOOL; }
loco::DataType visit(const luci::CircleExp *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleExpandDims *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleFill *node) final
+ {
+ return loco::dtype_get(node->value());
+ }
+
+ loco::DataType visit(const luci::CircleFloor *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleFloorDiv *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
+ loco::DataType visit(const luci::CircleFloorMod *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
loco::DataType visit(const luci::CircleFullyConnected *node) final
{
return loco::dtype_get(node->input());
}
+ loco::DataType visit(const luci::CircleGather *node) final
+ {
+ return loco::dtype_get(node->params());
+ }
+
+ loco::DataType visit(const luci::CircleGatherNd *node) final
+ {
+ return loco::dtype_get(node->params());
+ }
+
+ loco::DataType visit(const luci::CircleGreater *) final { return loco::DataType::BOOL; }
+
+ loco::DataType visit(const luci::CircleGreaterEqual *) final { return loco::DataType::BOOL; }
+
+ loco::DataType visit(const luci::CircleIf *node) final
+ {
+ // Type of If is not used. Just use input 0
+ assert(node->input_count() > 0);
+ return loco::dtype_get(node->input(0));
+ }
+
+ loco::DataType visit(const luci::CircleL2Normalize *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
+ loco::DataType visit(const luci::CircleL2Pool2D *node) final
+ {
+ return loco::dtype_get(node->value());
+ }
+
+ loco::DataType visit(const luci::CircleLeakyRelu *node) final
+ {
+ return loco::dtype_get(node->features());
+ }
+
+ loco::DataType visit(const luci::CircleLess *) final { return loco::DataType::BOOL; }
+
+ loco::DataType visit(const luci::CircleLessEqual *) final { return loco::DataType::BOOL; }
+
+ loco::DataType visit(const luci::CircleLocalResponseNormalization *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleLog *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleLogicalAnd *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
loco::DataType visit(const luci::CircleLogicalNot *node) final
{
return loco::dtype_get(node->x());
@@ -91,6 +211,26 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
return loco::dtype_get(node->x());
}
+ loco::DataType visit(const luci::CircleLogistic *node) final
+ {
+ return loco::dtype_get(node->x());
+ }
+
+ loco::DataType visit(const luci::CircleLogSoftmax *node) final
+ {
+ return loco::dtype_get(node->logits());
+ }
+
+ loco::DataType visit(const luci::CircleMatrixDiag *node) final
+ {
+ return loco::dtype_get(node->diagonal());
+ }
+
+ loco::DataType visit(const luci::CircleMatrixSetDiag *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleMaximum *node) final { return loco::dtype_get(node->x()); }
loco::DataType visit(const luci::CircleMaxPool2D *node) final
@@ -103,6 +243,17 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
return loco::dtype_get(node->input());
}
+ loco::DataType visit(const luci::CircleMinimum *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleMirrorPad *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleNeg *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleNotEqual *) final { return loco::DataType::BOOL; }
+
loco::DataType visit(const luci::CirclePack *node) final
{
// Only support CirclePack with one or more inputs
@@ -117,8 +268,63 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
loco::DataType visit(const luci::CirclePad *node) final { return loco::dtype_get(node->input()); }
+ loco::DataType visit(const luci::CirclePow *node) final
+ {
+ // TODO make sure types cannot differ
+ auto x_type = loco::dtype_get(node->x());
+ auto y_type = loco::dtype_get(node->y());
+
+ if (x_type != y_type)
+ INTERNAL_EXN("Different datatype for x and y are not supported");
+
+ return x_type;
+ }
+
+ loco::DataType visit(const luci::CirclePRelu *node) final
+ {
+ auto input_type = loco::dtype_get(node->input());
+ auto alpha_type = loco::dtype_get(node->alpha());
+
+ if (input_type != alpha_type)
+ INTERNAL_EXN("Different datatype for input and alpha are not supported");
+
+ return input_type;
+ }
+
+ loco::DataType visit(const luci::CircleRange *node) final
+ {
+ return loco::dtype_get(node->start());
+ }
+
+ loco::DataType visit(const luci::CircleRank *) final { return loco::DataType::S32; }
+
loco::DataType visit(const luci::CircleMul *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleOneHot *node) final
+ {
+ return loco::dtype_get(node->on_value());
+ }
+
+ loco::DataType visit(const luci::CircleReduceAny *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleReduceMax *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleReduceMin *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleReduceProd *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleRelu *node) final
{
return loco::dtype_get(node->features());
@@ -129,28 +335,132 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
return loco::dtype_get(node->features());
}
+ loco::DataType visit(const luci::CircleReluN1To1 *node) final
+ {
+ return loco::dtype_get(node->features());
+ }
+
loco::DataType visit(const luci::CircleReshape *node) final
{
return loco::dtype_get(node->tensor());
}
+ loco::DataType visit(const luci::CircleResizeBilinear *) final { return loco::DataType::FLOAT32; }
+
+ loco::DataType visit(const luci::CircleResizeNearestNeighbor *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleReverseSequence *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleReverseV2 *node) final
+ {
+ return loco::dtype_get(node->tensor());
+ }
+
+ loco::DataType visit(const luci::CircleRound *node) final { return loco::dtype_get(node->x()); }
+
loco::DataType visit(const luci::CircleRsqrt *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleScatterNd *node) final
+ {
+ return loco::dtype_get(node->updates());
+ }
+
+ loco::DataType visit(const luci::CircleSegmentSum *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleSelect *node) final
+ {
+ assert(loco::dtype_get(node->t()) == loco::dtype_get(node->e()));
+ return loco::dtype_get(node->t());
+ }
+
+ loco::DataType visit(const luci::CircleSelectV2 *node) final
+ {
+ assert(loco::dtype_get(node->t()) == loco::dtype_get(node->e()));
+ return loco::dtype_get(node->t());
+ }
+
+ loco::DataType visit(const luci::CircleShape *node) final { return node->out_type(); }
+
+ loco::DataType visit(const luci::CircleSin *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleSlice *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleSoftmax *node) final
{
return loco::dtype_get(node->logits());
}
+ loco::DataType visit(const luci::CircleSpaceToBatchND *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleSpaceToDepth *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleSparseToDense *node) final
+ {
+ return loco::dtype_get(node->values());
+ }
+
+ loco::DataType visit(const luci::CircleSplit *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleSplitV *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleSqrt *node) final { return loco::dtype_get(node->x()); }
+ loco::DataType visit(const luci::CircleSquare *node) final { return loco::dtype_get(node->x()); }
+
loco::DataType visit(const luci::CircleSquaredDifference *node) final
{
return loco::dtype_get(node->x());
}
+ loco::DataType visit(const luci::CircleSqueeze *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleStridedSlice *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
loco::DataType visit(const luci::CircleSub *node) final { return loco::dtype_get(node->x()); }
- // TODO CircleTanh
+ loco::DataType visit(const luci::CircleSum *node) final { return loco::dtype_get(node->input()); }
+
+ loco::DataType visit(const luci::CircleTanh *node) final { return loco::dtype_get(node->x()); }
+
+ loco::DataType visit(const luci::CircleTile *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleTopKV2 *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
loco::DataType visit(const luci::CircleTranspose *node) final
{
@@ -162,7 +472,33 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
return loco::dtype_get(node->outBackprop());
}
+ loco::DataType visit(const luci::CircleUnpack *node) final
+ {
+ return loco::dtype_get(node->value());
+ }
+
+ loco::DataType visit(const luci::CircleWhere *) final { return loco::DataType::S64; }
+
+ loco::DataType visit(const luci::CircleWhile *node) final
+ {
+ // Type of While is not used. Just use input 0
+ assert(node->input_count() > 0);
+ return loco::dtype_get(node->input(0));
+ }
+
+ loco::DataType visit(const luci::CircleZerosLike *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
// Circle Only
+ loco::DataType visit(const luci::CircleBCQFullyConnected *) final
+ {
+ return loco::DataType::FLOAT32;
+ }
+
+ loco::DataType visit(const luci::CircleBCQGather *) final { return loco::DataType::FLOAT32; }
+
loco::DataType visit(const luci::CircleInstanceNorm *node) final
{
return loco::dtype_get(node->input());
@@ -173,7 +509,116 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::DataT
loco::DataType visit(const luci::CircleOutput *node) final
{
- return loco::dtype_get(node->from());
+ auto graph_outputs = node->graph()->outputs();
+ auto graph_output = graph_outputs->at(node->index());
+ auto output_dtype = graph_output->dtype();
+
+ if (dynamic_cast<luci::CircleOutputDummy *>(node->from()) == nullptr &&
+ dynamic_cast<luci::CircleOutputExclude *>(node->from()) == nullptr)
+ {
+ // We don't care for the type if from() is CircleOutputDummy or CircleOutputExclude
+ // from() type should match that of CircleOutput
+ assert(output_dtype == loco::dtype_get(node->from()));
+ }
+ return output_dtype;
+ }
+
+ loco::DataType visit(const luci::CircleOutputDummy *node) final { return node->dtype(); }
+
+ loco::DataType visit(const luci::CircleOutputExclude *node) final { return node->dtype(); }
+
+ loco::DataType visit(const luci::CircleCustomOut *node) final { return node->dtype(); }
+
+ loco::DataType visit(const luci::CircleIfOut *node) final
+ {
+ /**
+ * @note IF operator type and shape are that of the "then" and "else"
+ * Graph Outputs.
+ */
+ auto circle_if = dynamic_cast<const luci::CircleIf *>(node->input());
+ if (circle_if == nullptr)
+ {
+ INTERNAL_EXN("CircleIf IR is not configured correctly");
+ }
+
+ auto index = node->index();
+ auto then_graph = circle_if->then_graph();
+ auto else_graph = circle_if->else_graph();
+ assert(then_graph != nullptr);
+ assert(else_graph != nullptr);
+
+ // shape and type are assumed to be same
+ // these are checked at post_import_graph() in Import
+ auto then_outputs = loco::output_nodes(then_graph);
+ auto else_outputs = loco::output_nodes(else_graph);
+ assert(then_outputs.size() == else_outputs.size());
+ assert(index < static_cast<int32_t>(then_outputs.size()));
+
+ auto then_out = loco::must_cast<luci::CircleOutput *>(then_outputs.at(index));
+ auto else_out = loco::must_cast<luci::CircleOutput *>(else_outputs.at(index));
+
+ auto then_graph_outputs = then_graph->outputs(); // loco::GraphOutput items
+ auto else_graph_outputs = else_graph->outputs();
+ assert(then_graph_outputs->size() == else_graph_outputs->size());
+
+ auto then_graph_output = then_graph_outputs->at(then_out->index());
+ auto else_graph_output = else_graph_outputs->at(else_out->index());
+ (void)else_graph_output; // make compiler happy for unused variable warnings
+ assert(then_graph_output->dtype() == else_graph_output->dtype());
+
+ return then_graph_output->dtype();
+ }
+
+ loco::DataType visit(const luci::CircleSplitOut *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleSplitVOut *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleTopKV2Out *node) final
+ {
+ // First output is same as input
+ if (node->index() == 0)
+ return loco::dtype_get(node->input());
+ // Second outout is always S32
+ assert(node->index() == 1);
+ return loco::DataType::S32;
+ }
+
+ loco::DataType visit(const luci::CircleUnpackOut *node) final
+ {
+ return loco::dtype_get(node->input());
+ }
+
+ loco::DataType visit(const luci::CircleWhileOut *node) final
+ {
+ /**
+ * @note WHILE operator's type is the same with the "cond"
+ * Graph Input.
+ */
+ auto circle_while = dynamic_cast<const luci::CircleWhile *>(node->input());
+ if (circle_while == nullptr)
+ {
+ INTERNAL_EXN("CircleWhile IR is not configured correctly");
+ }
+
+ auto index = node->index();
+ auto cond_graph = circle_while->cond_graph();
+ assert(cond_graph != nullptr);
+
+ // Assumption: the index of CircleWhileOut matches with the index of input nodes returned by
+ // loco::input_nodes
+ auto cond_inputs = loco::input_nodes(cond_graph);
+ auto cond_in = loco::must_cast<luci::CircleInput *>(cond_inputs.at(index));
+
+ auto cond_graph_inputs = cond_graph->inputs();
+ auto cond_graph_input = cond_graph_inputs->at(cond_in->index());
+
+ return cond_graph_input->dtype();
}
};
@@ -193,7 +638,8 @@ bool CircleTypeInferenceRule::infer(const loco::Node *node, loco::DataType &dtyp
TypeInferenceAlgorithm alg;
- dtype = dynamic_cast<const CircleNode *>(node)->accept(&alg);
+ auto circle_node = loco::must_cast<const CircleNode *>(node);
+ dtype = circle_node->accept(&alg);
assert(dtype != loco::DataType::Unknown);
return true;
diff --git a/compiler/luci/service/src/CircleTypeInferenceRule.test.cpp b/compiler/luci/service/src/CircleTypeInferenceRule.test.cpp
index 29f45173e..711a489af 100644
--- a/compiler/luci/service/src/CircleTypeInferenceRule.test.cpp
+++ b/compiler/luci/service/src/CircleTypeInferenceRule.test.cpp
@@ -32,26 +32,32 @@ TEST(CircleTypeInferenceRuleTest, minimal_with_CircleRelu)
{
// Create a simple network
luci::test::TestGraph graph;
- auto tfl_node = graph.append<luci::CircleRelu>(graph.pull);
- graph.complete(tfl_node);
+ auto relu_node = graph.append<luci::CircleRelu>(graph.input_node);
+ graph.complete(relu_node);
- graph.pull->dtype(loco::DataType::S32);
+ // set dtype for nodes; like setting them in import
+ graph.input_node->dtype(loco::DataType::S32);
+ relu_node->dtype(loco::DataType::S32);
+ graph.output_node->dtype(loco::DataType::S32);
+
+ luci::test::graph_input_dtype(graph.input_node);
+ luci::test::graph_output_dtype(graph.output_node);
// pre-check
- ASSERT_FALSE(loco::dtype_known(tfl_node));
+ ASSERT_FALSE(loco::dtype_known(relu_node));
// type inference
- luci::CircleTypeInferenceRule tfl_rule;
+ luci::CircleTypeInferenceRule circle_rule;
loco::CanonicalTypeInferenceRule canon_rule;
loco::MultiDialectTypeInferenceRule rules;
rules.bind(loco::CanonicalDialect::get(), &canon_rule);
- rules.bind(luci::CircleDialect::get(), &tfl_rule);
+ rules.bind(luci::CircleDialect::get(), &circle_rule);
loco::apply(&rules).to(graph.g.get());
// Verify
- ASSERT_TRUE(loco::dtype_known(tfl_node));
- auto type = loco::dtype_get(tfl_node);
- ASSERT_EQ(type, loco::DataType::S32);
+ ASSERT_TRUE(loco::dtype_known(relu_node));
+ auto type = loco::dtype_get(relu_node);
+ ASSERT_EQ(loco::DataType::S32, type);
}
diff --git a/compiler/luci/service/src/GraphBlock.h b/compiler/luci/service/src/GraphBlock.h
deleted file mode 100644
index 2a455888a..000000000
--- a/compiler/luci/service/src/GraphBlock.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __GRAPH_BLOCK_H__
-#define __GRAPH_BLOCK_H__
-
-#include <loco.h>
-#include <loco/Service/ShapeInference.h>
-
-#include <oops/InternalExn.h>
-
-#include <functional>
-
-// TODO Change all Canonical nodes to Circle nodes
-
-namespace luci
-{
-
-/// @brief feature layout of TFlite/Circle file
-enum class FeatureLayout
-{
- NHWC,
-};
-
-/// @brief Creates a loco::FeatureEncode with T layout (NHWC for tflite) and add it to graph.
-template <FeatureLayout T> loco::FeatureEncode *make_feature_encode(loco::Node *input_for_encode);
-
-/// @brief Creates a loco::FeatureDecode with T layout (NHWC for tflite) and add it to graph.
-template <FeatureLayout T> loco::FeatureDecode *make_feature_decode(loco::Node *input_for_decode);
-
-enum class FilterLayout
-{
- OHWI, // a.k.a., NHWC, Tensorflow Lite uses this layout for filter
- HWIO, // a.k.a., HWCN, Tensorflow uses this layout for filter
-};
-
-/// @brief Create a loco::FilterEncode of given layout
-template <FilterLayout T> loco::FilterEncode *make_filter_encode(loco::Node *input_for_encode);
-
-/// @brief Create a loco::FilterDecode of given layout
-template <FilterLayout T> loco::FilterDecode *make_filter_decode(loco::Node *input_for_decode);
-
-enum class DepthwiseFilterLayout
-{
- HWCM,
-};
-
-/// @brief Create a loco::DepthwiseFilterDecode of given layout
-template <DepthwiseFilterLayout T>
-loco::DepthwiseFilterDecode *make_dw_filter_decode(loco::Node *input_for_decode);
-
-enum class MatrixLayout
-{
- HW,
- WH
-};
-
-/// @brief Create a loco::MatrixEncode of given layout
-template <MatrixLayout T> loco::MatrixEncode *make_matrix_encode(loco::Node *input_for_encode);
-
-/// @brief Create a loco::MatrixDecode of given layout
-template <MatrixLayout T> loco::MatrixDecode *make_matrix_decode(loco::Node *input_for_decode);
-
-} // luci
-
-//
-// DomainConverter
-//
-
-/**
- * Some canonical nodes can have input of various loco::Domain, e.g., loco::Domain::Tensor,
- * loco::Domain::Feature, etc. However, TFL node accepts only loco::Domain::Tensor.
- * So, When converting such canonical node to TFL node and input(s) of a canonical node are not
- * loco::Domain::Tensor, additional nodes need to be inserted.
- *
- * The following two classes helps this insertion.
- *
- * For example, in case of loco::Relu conversion,
- *
- * Before:
- *
- * A (output: feature) -- loco::ReLU --- B (input:feature)
- *
- * After:
- *
- * A -- loco::FeatureDecode -- locoex::TFLRelu -- loco::FeatureEncode --- B
- *
- * loco::ReLU (dead node)
- */
-
-namespace luci
-{
-
-/**
- * @brief Handles input(s) while converting a canonical node to TFL node(s).
- * This class informs DomainConverter how to handle inputs of a specific canonical node.
- */
-template <class CanonicalT, class TFLT> class InputHandler
-{
-public:
- /**
- * @brief Assign origin's inputs to replacer's inputs.
- * (This is called when origin belongs in Tensor domain.)
- */
- virtual void handover(CanonicalT *origin, TFLT *replacer) = 0;
-
- /**
- * @brief Returns the list of inputs that needs to have FeatureDecode as its input.
- * (This is called when origin belongs in Feature domain.)
- */
- virtual std::vector<loco::Node *> getInputsToConvert(CanonicalT *origin) = 0;
-
- /// @brief Set the inputs of replacer to new_inputs
- virtual void set(TFLT *replacer, std::vector<loco::Node *> &new_inputs) = 0;
-
- /// @brief Set the inputs to nullptr
- virtual void nullify(CanonicalT *origin) = 0;
-};
-
-/**
- * @brief Class to handle domain conversion while converting a canonical node to TFL node(s)
- */
-template <class CanonicalT, class TFLT> class DomainConverter
-{
-public:
- template <FeatureLayout FeatureLayoutT>
- TFLT *convert(CanonicalT *origin, InputHandler<CanonicalT, TFLT> &input_handler);
-};
-
-/**
- * @brief Performs domain conversion
- *
- * 1. if origin belong to loco::Domain::Tensor, and replace origin to a TFL node.
- * 2. if origin belong to loco::Domain::Feature, insert loco::FeatureDecode for input(s) and
- * insert loco::FeatureEncode for output. Then replace origin to a TFL node.
- *
- * @return new TFL node; nullptr if shape of origin cannot be known
- */
-template <class CanonicalT, class TFLT>
-template <FeatureLayout FeatureLayoutT>
-TFLT *DomainConverter<CanonicalT, TFLT>::convert(CanonicalT *origin,
- InputHandler<CanonicalT, TFLT> &input_handler)
-{
- static_assert(FeatureLayoutT == FeatureLayout::NHWC, "Feature layout should be NHWC");
-
- if (!loco::shape_known(origin))
- {
- return nullptr;
- }
-
- auto tfl_node = origin->graph()->nodes()->template create<TFLT>();
-
- // when the input is Tensor, just replace canonical node to TFL node.
- if (loco::shape_get(origin).domain() == loco::Domain::Tensor)
- {
- input_handler.handover(origin, tfl_node);
-
- loco::replace(origin).with(tfl_node);
- input_handler.nullify(origin);
-
- return tfl_node;
- }
- else if (loco::shape_get(origin).domain() == loco::Domain::Feature)
- {
- std::vector<loco::Node *> feature_decodes;
-
- for (auto input : input_handler.getInputsToConvert(origin))
- {
- auto dec = make_feature_decode<FeatureLayoutT>(input);
- feature_decodes.emplace_back(dec);
- }
-
- input_handler.set(tfl_node, feature_decodes);
-
- auto enc = make_feature_encode<FeatureLayoutT>(tfl_node);
-
- loco::replace(origin).with(enc);
- input_handler.nullify(origin);
-
- return tfl_node;
- }
- else
- INTERNAL_EXN_V("Unsupported loco::Domain", oops::to_uint32(loco::shape_get(origin).domain()));
-}
-
-} // namespace luci
-
-#endif //__GRAPH_BLOCK_H__
diff --git a/compiler/luci/service/src/GraphBlock.test.cpp b/compiler/luci/service/src/GraphBlock.test.cpp
deleted file mode 100644
index 1da8c18fa..000000000
--- a/compiler/luci/service/src/GraphBlock.test.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "GraphBlock.h"
-
-#include "Check.h"
-
-#include <loco.h>
-
-#include <memory>
-
-// TODO Change all Canonical nodes to Circle nodes
-
-namespace
-{
-
-template <luci::FeatureLayout T> loco::Permutation<loco::Domain::Feature> perm();
-
-template <> loco::Permutation<loco::Domain::Feature> perm<luci::FeatureLayout::NHWC>()
-{
- // Make NHWC permutation for encoder and decoder
- loco::Permutation<loco::Domain::Feature> NHWC;
-
- NHWC.axis(loco::FeatureAxis::Count) = 0;
- NHWC.axis(loco::FeatureAxis::Height) = 1;
- NHWC.axis(loco::FeatureAxis::Width) = 2;
- NHWC.axis(loco::FeatureAxis::Depth) = 3;
-
- return NHWC;
-}
-
-template <luci::FilterLayout T> loco::Permutation<loco::Domain::Filter> perm();
-
-template <> loco::Permutation<loco::Domain::Filter> perm<luci::FilterLayout::HWIO>()
-{
- loco::Permutation<loco::Domain::Filter> HWIO; // a.k.a., HWCN
-
- HWIO.axis(loco::FilterAxis::Height) = 0;
- HWIO.axis(loco::FilterAxis::Width) = 1;
- HWIO.axis(loco::FilterAxis::Depth) = 2;
- HWIO.axis(loco::FilterAxis::Count) = 3;
-
- return HWIO;
-}
-
-template <> loco::Permutation<loco::Domain::Filter> perm<luci::FilterLayout::OHWI>()
-{
-
- // Make NHWC permutation for encoder and decoder
- loco::Permutation<loco::Domain::Filter> OHWI; // a.k.a., NHWC
-
- OHWI.axis(loco::FilterAxis::Count) = 0;
- OHWI.axis(loco::FilterAxis::Height) = 1;
- OHWI.axis(loco::FilterAxis::Width) = 2;
- OHWI.axis(loco::FilterAxis::Depth) = 3;
-
- return OHWI;
-}
-
-template <luci::DepthwiseFilterLayout T> loco::Permutation<loco::Domain::DepthwiseFilter> perm();
-
-template <>
-loco::Permutation<loco::Domain::DepthwiseFilter> perm<luci::DepthwiseFilterLayout::HWCM>()
-{
- loco::Permutation<loco::Domain::DepthwiseFilter> HWCM;
-
- HWCM.axis(loco::DepthwiseFilterAxis::Height) = 0;
- HWCM.axis(loco::DepthwiseFilterAxis::Width) = 1;
- HWCM.axis(loco::DepthwiseFilterAxis::Depth) = 2;
- HWCM.axis(loco::DepthwiseFilterAxis::Multiplier) = 3;
-
- return HWCM;
-}
-
-template <luci::MatrixLayout T> loco::Permutation<loco::Domain::Matrix> perm();
-
-template <> loco::Permutation<loco::Domain::Matrix> perm<luci::MatrixLayout::HW>()
-{
- loco::Permutation<loco::Domain::Matrix> HW;
-
- HW.axis(loco::MatrixAxis::Height) = 0;
- HW.axis(loco::MatrixAxis::Width) = 1;
-
- return HW;
-}
-
-template <> loco::Permutation<loco::Domain::Matrix> perm<luci::MatrixLayout::WH>()
-{
- loco::Permutation<loco::Domain::Matrix> WH;
-
- WH.axis(loco::MatrixAxis::Height) = 1;
- WH.axis(loco::MatrixAxis::Width) = 0;
-
- return WH;
-}
-
-} // namespace
-
-namespace luci
-{
-
-template <FeatureLayout T> loco::FeatureEncode *make_feature_encode(loco::Node *input_for_encode)
-{
- LUCI_ASSERT(input_for_encode != nullptr, "input should not be nullptr");
- loco::Graph *g = input_for_encode->graph();
-
- auto encoder = std::make_unique<loco::PermutingEncoder<loco::Domain::Feature>>();
-
- encoder->perm(perm<T>());
-
- auto enc = g->nodes()->create<loco::FeatureEncode>();
- enc->input(input_for_encode);
- enc->encoder(std::move(encoder));
-
- return enc;
-}
-
-template <FeatureLayout T> loco::FeatureDecode *make_feature_decode(loco::Node *input_for_decode)
-{
- LUCI_ASSERT(input_for_decode != nullptr, "input should not be nullptr");
- loco::Graph *g = input_for_decode->graph();
-
- auto decoder = std::make_unique<loco::PermutingDecoder<loco::Domain::Feature>>();
-
- decoder->perm(perm<T>());
-
- auto dec = g->nodes()->create<loco::FeatureDecode>();
- dec->input(input_for_decode);
- dec->decoder(std::move(decoder));
-
- return dec;
-}
-
-template <FilterLayout T> loco::FilterEncode *make_filter_encode(loco::Node *input_for_encode)
-{
- LUCI_ASSERT(input_for_encode != nullptr, "filter should not be nullptr");
- loco::Graph *g = input_for_encode->graph();
-
- auto encoder = std::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>();
-
- encoder->perm(perm<T>());
-
- auto enc = g->nodes()->create<loco::FilterEncode>();
- enc->input(input_for_encode);
- enc->encoder(std::move(encoder));
-
- return enc;
-}
-
-template <FilterLayout T> loco::FilterDecode *make_filter_decode(loco::Node *input_for_decode)
-{
- LUCI_ASSERT(input_for_decode != nullptr, "filter should not be nullptr");
- loco::Graph *g = input_for_decode->graph();
-
- auto decoder = std::make_unique<loco::PermutingDecoder<loco::Domain::Filter>>();
-
- decoder->perm(perm<T>());
-
- auto dec = g->nodes()->create<loco::FilterDecode>();
- dec->input(input_for_decode);
- dec->decoder(std::move(decoder));
-
- return dec;
-}
-
-template <DepthwiseFilterLayout T>
-loco::DepthwiseFilterDecode *make_dw_filter_decode(loco::Node *input_for_decode)
-{
- LUCI_ASSERT(input_for_decode != nullptr, "filter should not be nullptr");
- loco::Graph *g = input_for_decode->graph();
-
- auto decoder = std::make_unique<loco::PermutingDecoder<loco::Domain::DepthwiseFilter>>();
-
- decoder->perm(perm<T>());
-
- auto dec = g->nodes()->create<loco::DepthwiseFilterDecode>();
- dec->input(input_for_decode);
- dec->decoder(std::move(decoder));
-
- return dec;
-}
-
-template <MatrixLayout T> loco::MatrixEncode *make_matrix_encode(loco::Node *input_for_encode)
-{
- LUCI_ASSERT(input_for_encode != nullptr, "input should not be nullptr");
- loco::Graph *g = input_for_encode->graph();
-
- auto encoder = std::make_unique<loco::PermutingEncoder<loco::Domain::Matrix>>();
-
- encoder->perm(perm<T>());
-
- auto enc = g->nodes()->create<loco::MatrixEncode>();
- enc->input(input_for_encode);
- enc->encoder(std::move(encoder));
-
- return enc;
-}
-
-template <MatrixLayout T> loco::MatrixDecode *make_matrix_decode(loco::Node *input_for_decode)
-{
- LUCI_ASSERT(input_for_decode != nullptr, "input should not be nullptr");
- loco::Graph *g = input_for_decode->graph();
-
- auto decoder = std::make_unique<loco::PermutingDecoder<loco::Domain::Matrix>>();
-
- decoder->perm(perm<T>());
-
- auto dec = g->nodes()->create<loco::MatrixDecode>();
- dec->input(input_for_decode);
- dec->decoder(std::move(decoder));
-
- return dec;
-}
-
-// template instantiation
-template loco::FeatureEncode *
-make_feature_encode<FeatureLayout::NHWC>(loco::Node *input_for_encode);
-
-template loco::FeatureDecode *
-make_feature_decode<FeatureLayout::NHWC>(loco::Node *input_for_encode);
-
-template loco::FilterEncode *make_filter_encode<FilterLayout::HWIO>(loco::Node *input_for_encode);
-template loco::FilterDecode *make_filter_decode<FilterLayout::OHWI>(loco::Node *input_for_decode);
-
-template loco::DepthwiseFilterDecode *
-make_dw_filter_decode<DepthwiseFilterLayout::HWCM>(loco::Node *input_for_decode);
-
-template loco::MatrixEncode *make_matrix_encode<MatrixLayout::HW>(loco::Node *input_for_encode);
-template loco::MatrixEncode *make_matrix_encode<MatrixLayout::WH>(loco::Node *input_for_encode);
-template loco::MatrixDecode *make_matrix_decode<MatrixLayout::HW>(loco::Node *input_for_decode);
-template loco::MatrixDecode *make_matrix_decode<MatrixLayout::WH>(loco::Node *input_for_decode);
-
-} // namespace luci
diff --git a/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp b/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp
new file mode 100644
index 000000000..341201148
--- /dev/null
+++ b/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeInfer_StridedSlice.h"
+#include "Check.h"
+
+#include <luci/IR/CircleNode.h>
+#include <loco/IR/DataType.h>
+#include <loco/IR/NodeShape.h>
+#include <oops/InternalExn.h>
+#include <loco/Service/ShapeInference.h>
+
+#include <cmath>
+#include <cstdint>
+#include <limits>
+
+namespace
+{
+
+// This Op only supports 1-4D cases and since we use the reference 4D
+// implementation, the 1-3D tensors are mapped to 4D.
+const int kMaxDim = 4;
+
+const loco::DataType S32 = loco::DataType::S32;
+
+using int8 = int8_t;
+using int16 = int16_t;
+
+struct StridedSliceParams
+{
+ int8 start_indices_count;
+ int16 start_indices[kMaxDim];
+ int8 stop_indices_count;
+ int16 stop_indices[kMaxDim];
+ int8 strides_count;
+ int16 strides[kMaxDim];
+
+ int16 begin_mask;
+ int16 ellipsis_mask;
+ int16 end_mask;
+ int16 new_axis_mask;
+ int16 shrink_axis_mask;
+};
+
+// Use until std::clamp() is available from C++17.
+inline int Clamp(const int32_t v, const int32_t lo, const int32_t hi)
+{
+ LUCI_ASSERT(!(hi < lo), "Clamp hi < lo");
+ if (hi < v)
+ return hi;
+ if (v < lo)
+ return lo;
+ return v;
+}
+
+// Return the index for the first element along that axis. This index will be a
+// positive integer between [0, axis_size - 1] that can be used to index
+// directly into the data.
+inline int StartForAxis(const StridedSliceParams &params, const loco::TensorShape &input_shape,
+ uint32_t axis)
+{
+ const auto begin_mask = params.begin_mask;
+ const auto *start_indices = params.start_indices;
+ const auto *strides = params.strides;
+ const int32_t axis_size = static_cast<int>(input_shape.dim(axis).value());
+ if (axis_size == 0)
+ {
+ return 0;
+ }
+ // Begin with the specified index.
+ int32_t start = start_indices[axis];
+
+ // begin_mask override
+ if (begin_mask & (1 << axis))
+ {
+ if (strides[axis] > 0)
+ {
+ // Forward iteration - use the first element. These values will get
+ // clamped below (Note: We could have set them to 0 and axis_size-1, but
+ // use lowest() and max() to maintain symmetry with StopForAxis())
+ start = std::numeric_limits<int32_t>::lowest();
+ }
+ else
+ {
+ // Backward iteration - use the last element.
+ start = std::numeric_limits<int32_t>::max();
+ }
+ }
+
+ // Handle negative indices
+ if (start < 0)
+ {
+ start += axis_size;
+ }
+
+ // Clamping
+ start = Clamp(start, 0, axis_size - 1);
+
+ return start;
+}
+
+// Return the "real" index for the end of iteration along that axis. This is an
+// "end" in the traditional C sense, in that it points to one past the last
+// element. ie. So if you were iterating through all elements of a 1D array of
+// size 4, this function would return 4 as the stop, because it is one past the
+// "real" indices of 0, 1, 2 & 3.
+inline int StopForAxis(const StridedSliceParams &params, const loco::TensorShape &input_shape,
+ int axis, int start_for_axis)
+{
+ const auto end_mask = params.end_mask;
+ const auto shrink_axis_mask = params.shrink_axis_mask;
+ const auto *stop_indices = params.stop_indices;
+ const auto *strides = params.strides;
+ const int axis_size = static_cast<int32_t>(input_shape.dim(axis).value());
+ if (axis_size == 0)
+ {
+ return 0;
+ }
+
+ // Begin with the specified index
+ const bool shrink_axis = shrink_axis_mask & (1 << axis);
+ int32_t stop = stop_indices[axis];
+
+ // When shrinking an axis, the end position does not matter (and can be
+ // incorrect when negative indexing is used, see Issue #19260). Always use
+ // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has
+ // already been adjusted for negative indices.
+ if (shrink_axis)
+ {
+ stop = start_for_axis + 1;
+ }
+
+ // end_mask override
+ if (end_mask & (1 << axis))
+ {
+ if (strides[axis] > 0)
+ {
+ // Forward iteration - use the last element. These values will get
+ // clamped below
+ stop = std::numeric_limits<int32_t>::max();
+ }
+ else
+ {
+ // Backward iteration - use the first element.
+ stop = std::numeric_limits<int32_t>::lowest();
+ }
+ }
+
+ // Handle negative indices
+ if (stop < 0)
+ {
+ stop += axis_size;
+ }
+
+ // Clamping
+ // Because the end index points one past the last element, we need slightly
+ // different clamping ranges depending on the direction.
+ if (strides[axis] > 0)
+ {
+ // Forward iteration
+ stop = Clamp(stop, 0, axis_size);
+ }
+ else
+ {
+ // Backward iteration
+ stop = Clamp(stop, -1, axis_size - 1);
+ }
+
+ return stop;
+}
+
+StridedSliceParams BuildStridedSliceParams(const luci::CircleStridedSlice *node)
+{
+ StridedSliceParams op_params;
+
+ if (kMaxDim < node->rank())
+ {
+ INTERNAL_EXN_V("Cannot support StridedSlice rank > ", kMaxDim);
+ }
+
+ auto begin_node = loco::must_cast<luci::CircleConst *>(node->begin());
+ auto end_node = loco::must_cast<luci::CircleConst *>(node->end());
+ auto strides_node = loco::must_cast<luci::CircleConst *>(node->strides());
+
+ uint32_t dims_count = begin_node->size<S32>();
+
+ op_params.start_indices_count = dims_count;
+ op_params.stop_indices_count = dims_count;
+ op_params.strides_count = dims_count;
+
+ for (uint32_t i = 0; i < dims_count; ++i)
+ {
+ op_params.start_indices[i] = begin_node->at<S32>(i);
+ op_params.stop_indices[i] = end_node->at<S32>(i);
+ op_params.strides[i] = strides_node->at<S32>(i);
+ }
+
+ op_params.begin_mask = node->begin_mask();
+ op_params.ellipsis_mask = 0;
+ op_params.end_mask = node->end_mask();
+ op_params.new_axis_mask = 0;
+ op_params.shrink_axis_mask = node->shrink_axis_mask();
+
+ return op_params;
+}
+
+} // namespace
+
+namespace luci
+{
+
+loco::TensorShape infer_output_shape(const CircleStridedSlice *node)
+{
+ loco::TensorShape output_shape;
+
+ auto input_node = loco::must_cast<luci::CircleNode *>(node->input());
+
+ auto begin_node = dynamic_cast<luci::CircleConst *>(node->begin());
+ auto end_node = dynamic_cast<luci::CircleConst *>(node->end());
+ auto strides_node = dynamic_cast<luci::CircleConst *>(node->strides());
+ if (begin_node == nullptr || end_node == nullptr || strides_node == nullptr)
+ {
+ INTERNAL_EXN("StridedSlice begin/end/strides nodes are not Constant");
+ }
+
+ LUCI_ASSERT(begin_node->dtype() == S32, "Only support S32 for begin_node");
+ LUCI_ASSERT(end_node->dtype() == S32, "Only support S32 for end_node");
+ LUCI_ASSERT(strides_node->dtype() == S32, "Only support S32 for strides_node");
+
+ assert(node->ellipsis_mask() == 0);
+ assert(node->new_axis_mask() == 0);
+
+ auto op_params = BuildStridedSliceParams(node);
+ loco::TensorShape input_shape = loco::shape_get(input_node).as<loco::TensorShape>();
+
+ uint32_t num_input_axes = input_shape.rank();
+ assert(begin_node->size<S32>() <= num_input_axes);
+ assert(end_node->size<S32>() <= num_input_axes);
+ assert(strides_node->size<S32>() <= num_input_axes);
+ for (uint32_t i = 0; i < strides_node->size<S32>(); i++)
+ {
+ LUCI_ASSERT(strides_node->at<S32>(i) != 0, "Stride value has to be non-zero");
+ }
+
+ uint32_t shape_size = 0;
+ std::array<int32_t, 16> output_shape_data;
+
+ for (uint32_t idx = 0; idx < num_input_axes; ++idx)
+ {
+ int32_t begin = StartForAxis(op_params, input_shape, idx);
+ int32_t end = StopForAxis(op_params, input_shape, idx, begin);
+ if (end < 0)
+ end = input_shape.dim(idx).value() + end + 1;
+
+ // This is valid for both positive and negative strides
+ int32_t stride = strides_node->at<S32>(idx);
+ int32_t dim_shape = std::ceil(static_cast<float>(end - begin) / stride);
+ assert(dim_shape > 0);
+
+ // When shrinking an axis, the end position does not matter (and can be
+ // incorrect when negative indexing is used, see Issue #19260). Always use
+ // begin + 1 to generate a length 1 slice, since begin has
+ // already been adjusted for negative indices by StartForAxis.
+ const bool shrink_axis = node->shrink_axis_mask() & (1 << idx);
+ if (shrink_axis)
+ {
+ assert(dim_shape == 1);
+ }
+ else
+ {
+ output_shape_data[shape_size++] = dim_shape;
+ }
+ }
+
+ output_shape.rank(shape_size);
+ for (uint32_t idx = 0; idx < shape_size; ++idx)
+ {
+ output_shape.dim(idx) = output_shape_data[idx];
+ }
+
+ return output_shape;
+}
+
+} // namespace luci
diff --git a/compiler/luci/service/src/ShapeInfer_StridedSlice.h b/compiler/luci/service/src/ShapeInfer_StridedSlice.h
new file mode 100644
index 000000000..fa800b720
--- /dev/null
+++ b/compiler/luci/service/src/ShapeInfer_StridedSlice.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SHAPE_INFER_STRIDED_SLICE_H__
+#define __SHAPE_INFER_STRIDED_SLICE_H__
+
+#include <luci/IR/CircleNodes.h>
+
+#include <loco/IR/NodeShape.h>
+
+namespace luci
+{
+
+loco::TensorShape infer_output_shape(const CircleStridedSlice *node);
+
+} // namespace luci
+
+#endif // __SHAPE_INFER_STRIDED_SLICE_H__
diff --git a/compiler/luci/service/src/TestGraph.h b/compiler/luci/service/src/TestGraph.h
index 73562040f..2865b0f44 100644
--- a/compiler/luci/service/src/TestGraph.h
+++ b/compiler/luci/service/src/TestGraph.h
@@ -18,7 +18,6 @@
#define __TEST_GRAPH_H__
#include <luci/IR/CircleNodes.h>
-#include "GraphBlock.h"
#include <loco.h>
@@ -36,29 +35,29 @@ class TestGraph
{
public:
std::unique_ptr<loco::Graph> g;
- loco::Pull *pull;
- loco::Push *push;
+ luci::CircleInput *input_node = nullptr;
+ luci::CircleOutput *output_node = nullptr;
TestGraph() // creates Pull and Push
{
g = loco::make_graph();
- pull = g->nodes()->create<loco::Pull>();
+ input_node = g->nodes()->create<luci::CircleInput>();
- push = g->nodes()->create<loco::Push>();
+ output_node = g->nodes()->create<luci::CircleOutput>();
auto input = g->inputs()->create();
{
input->name("input");
- loco::link(input, pull);
+ luci::link(input, input_node);
}
auto output = g->outputs()->create();
{
output->name("output");
- loco::link(output, push);
+ luci::link(output, output_node);
}
- _next_input = pull;
+ _next_input = input_node;
}
loco::Graph *graph() { return g.get(); }
@@ -73,7 +72,7 @@ public:
}
/// @brief Creates op T (arity=1) with arg1 as an input and appends it to graph
- template <class T> T *append(loco::Node *arg1)
+ template <class T> T *append(luci::CircleNode *arg1)
{
auto node = g->nodes()->create<T>();
setInput(node, arg1);
@@ -83,7 +82,7 @@ public:
}
/// @brief Creates op T (arity=2) with arg1, arg2 as inputs and appends it to graph
- template <class T> T *append(loco::Node *arg1, loco::Node *arg2)
+ template <class T> T *append(luci::CircleNode *arg1, luci::CircleNode *arg2)
{
auto node = g->nodes()->create<T>();
setInput(node, arg1, arg2);
@@ -93,7 +92,8 @@ public:
}
/// @brief Creates op T (arity=3) with arg1, arg2, arg3 as inputs and appends it to graph
- template <class T> T *append(loco::Node *arg1, loco::Node *arg2, loco::Node *arg3)
+ template <class T>
+ T *append(luci::CircleNode *arg1, luci::CircleNode *arg2, luci::CircleNode *arg3)
{
auto node = g->nodes()->create<T>();
setInput(node, arg1, arg2, arg3);
@@ -102,101 +102,68 @@ public:
return node;
}
- // push will get the last appended node
- void complete() { push->from(_next_input); }
+ // output will get the last appended node
+ void complete() { output_node->from(_next_input); }
- void complete(loco::Node *last_node) { push->from(last_node); }
+ void complete(luci::CircleNode *last_node) { output_node->from(last_node); }
private:
// arity 1
- void setInput(loco::Node *node, loco::Node *) { assert(false && "NYI"); };
-
- void setInput(loco::AvgPool2D *node, loco::Node *input) { node->ifm(input); }
- void setInput(loco::BiasDecode *node, loco::Node *input) { node->input(input); };
- void setInput(loco::BiasEncode *node, loco::Node *input) { node->input(input); };
- void setInput(loco::FeatureDecode *node, loco::Node *input) { node->input(input); };
- void setInput(loco::FeatureEncode *node, loco::Node *input) { node->input(input); };
- void setInput(loco::MaxPool2D *node, loco::Node *input) { node->ifm(input); }
- void setInput(loco::Push *node, loco::Node *input) { node->from(input); };
- void setInput(loco::ReLU *node, loco::Node *input) { node->input(input); };
- void setInput(loco::ReLU6 *node, loco::Node *input) { node->input(input); };
- void setInput(loco::Tanh *node, loco::Node *input) { node->input(input); };
- void setInput(loco::TensorTranspose *node, loco::Node *input) { node->input(input); };
-
- void setInput(luci::CircleAveragePool2D *node, loco::Node *input) { node->value(input); };
- void setInput(luci::CircleMaxPool2D *node, loco::Node *input) { node->value(input); };
- void setInput(luci::CircleRelu *node, loco::Node *input) { node->features(input); };
- void setInput(luci::CircleRelu6 *node, loco::Node *input) { node->features(input); };
+ void setInput(luci::CircleNode *, luci::CircleNode *) { assert(false && "NYI"); };
- // arity 2
- void setInput(loco::Node *node, loco::Node *, loco::Node *) { assert(false && "NYI"); };
+ void setInput(luci::CircleAveragePool2D *node, luci::CircleNode *input) { node->value(input); };
+ void setInput(luci::CircleRelu *node, luci::CircleNode *input) { node->features(input); };
+ void setInput(luci::CircleSqueeze *node, luci::CircleNode *input) { node->input(input); };
- void setInput(loco::Conv2D *node, loco::Node *input, loco::Node *filter)
+ void setInput(luci::CircleGatherNd *node, luci::CircleNode *params, luci::CircleNode *indices)
{
- node->ifm(input);
- node->ker(filter);
- }
-
- void setInput(loco::EltwiseAdd *node, loco::Node *arg1, loco::Node *arg2)
- {
- node->lhs(arg1);
- node->rhs(arg2);
+ node->params(params);
+ node->indices(indices);
};
- void setInput(loco::FeatureBiasAdd *node, loco::Node *arg1, loco::Node *arg2)
+ // arity 2
+ void setInput(luci::CircleNode *, luci::CircleNode *, luci::CircleNode *)
{
- node->value(arg1);
- node->bias(arg2);
+ assert(false && "NYI");
};
- void setInput(luci::CircleAdd *node, loco::Node *arg1, loco::Node *arg2)
+ void setInput(luci::CircleExpandDims *node, luci::CircleNode *arg1, luci::CircleNode *arg2)
{
- node->x(arg1);
- node->y(arg2);
+ node->input(arg1);
+ node->axis(arg2);
};
- void setInput(luci::CircleMul *node, loco::Node *arg1, loco::Node *arg2)
+ void setInput(luci::CircleTranspose *node, luci::CircleNode *arg1, luci::CircleNode *arg2)
{
- node->x(arg1);
- node->y(arg2);
+ node->a(arg1);
+ node->perm(arg2);
};
- void setInput(luci::CircleSub *node, loco::Node *arg1, loco::Node *arg2)
+ void setInput(luci::CircleResizeBilinear *node, luci::CircleNode *input, luci::CircleNode *size)
{
- node->x(arg1);
- node->y(arg2);
+ node->input(input);
+ node->size(size);
};
- void setInput(luci::CircleTranspose *node, loco::Node *arg1, loco::Node *arg2)
+ void setInput(luci::CircleResizeNearestNeighbor *node, luci::CircleNode *input,
+ luci::CircleNode *size)
{
- node->a(arg1);
- node->perm(arg2);
+ node->input(input);
+ node->size(size);
};
// arity 3
- void setInput(loco::Node *node, loco::Node *, loco::Node *, loco::Node *)
+ void setInput(luci::CircleNode *, luci::CircleNode *, luci::CircleNode *, luci::CircleNode *)
{
assert(false && "NYI");
};
- void setInput(luci::CircleConv2D *node, loco::Node *input, loco::Node *filter, loco::Node *bias)
- {
- node->input(input);
- node->filter(filter);
- node->bias(bias);
- }
-
private:
loco::Node *_next_input;
};
enum class ExampleGraphType
{
- FeatureBiasAdd,
- ConstGen_ReLU,
- FilterEncode_FilterDecode,
- Transpose,
-
CircleTranspose,
};
@@ -205,109 +172,42 @@ template <ExampleGraphType T> class ExampleGraph;
/**
* @brief Class to create the following:
*
- * Pull - FeatureEncoder - FeatureBiasAdd - FeatureDecode - Push
- * |
- * ConstGen - BiasEncode --+
+ * CircleInput -- CircleTranspose -- CircleOutput
*/
-template <> class ExampleGraph<ExampleGraphType::FeatureBiasAdd> : public TestGraph
+template <> class ExampleGraph<ExampleGraphType::CircleTranspose> : public TestGraph
{
public:
- loco::FeatureEncode *fea_enc = nullptr;
- loco::ConstGen *constgen = nullptr;
- loco::BiasEncode *bias_enc = nullptr;
- loco::FeatureBiasAdd *fea_bias_add = nullptr;
- loco::FeatureDecode *fea_dec = nullptr;
+ luci::CircleConst *const_perm = nullptr;
+ luci::CircleTranspose *transpose_node = nullptr;
public:
ExampleGraph()
{
- fea_enc = luci::make_feature_encode<luci::FeatureLayout::NHWC>(pull);
- constgen = append<loco::ConstGen>();
- bias_enc = append<loco::BiasEncode>(constgen);
- fea_bias_add = append<loco::FeatureBiasAdd>(fea_enc, bias_enc);
- fea_dec = luci::make_feature_decode<luci::FeatureLayout::NHWC>(fea_bias_add);
- complete(fea_dec);
+ const_perm = append<luci::CircleConst>();
+ transpose_node = append<luci::CircleTranspose>(input_node, const_perm);
+ complete(transpose_node);
}
};
-/**
- * @brief Class to creates the following:
- *
- * ConstGen -- ReLU -- Push
- */
-template <> class ExampleGraph<ExampleGraphType::ConstGen_ReLU> : public TestGraph
-{
-public:
- loco::ConstGen *constgen = nullptr;
- loco::ReLU *relu = nullptr;
-
-public:
- ExampleGraph()
- {
- constgen = append<loco::ConstGen>();
- relu = append<loco::ReLU>(constgen);
- complete(relu);
- }
-};
+} // namespace test
+} // namespace luci
-/**
- * @brief Class to creates the following:
- *
- * Pull -- Transpose -- Push
- */
-template <> class ExampleGraph<ExampleGraphType::Transpose> : public TestGraph
+namespace luci
{
-public:
- loco::TensorTranspose *transpose = nullptr;
-
-public:
- ExampleGraph()
- {
- transpose = append<loco::TensorTranspose>(pull);
- complete(transpose);
- }
-};
-
-/**
- * @brief Class to creates the following:
- *
- * Pull -- FilterEncode -- FilterDecode -- Push
- */
-template <> class ExampleGraph<ExampleGraphType::FilterEncode_FilterDecode> : public TestGraph
+namespace test
{
-public:
- loco::FilterEncode *filterEncode = nullptr;
- loco::FilterDecode *filterDecode = nullptr;
-public:
- ExampleGraph()
- {
- filterEncode = luci::make_filter_encode<luci::FilterLayout::HWIO>(pull); // from Tensorflow
- filterDecode =
- luci::make_filter_decode<luci::FilterLayout::OHWI>(filterEncode); // to Tensorflow Lite
- complete(filterDecode);
- }
-};
+/// @brief This will set GraphInput shape from CircleInput shape
+void graph_input_shape(luci::CircleInput *input);
-/**
- * @brief Class to create the following:
- *
- * Pull -- CircleTranspose -- Push
- */
-template <> class ExampleGraph<ExampleGraphType::CircleTranspose> : public TestGraph
-{
-public:
- loco::ConstGen *const_perm = nullptr;
- luci::CircleTranspose *transpose_node = nullptr;
+/// @brief This will set GraphOutput shape from CircleOutput shape
+void graph_output_shape(luci::CircleOutput *output);
-public:
- ExampleGraph()
- {
- const_perm = append<loco::ConstGen>();
- transpose_node = append<luci::CircleTranspose>(pull, const_perm);
- complete(transpose_node);
- }
-};
+/// @brief This will set GraphInput dtype from CircleInput dtype
+void graph_input_dtype(luci::CircleInput *input);
+
+/// @brief This will set GraphOutput dtype from CircleOutput dtype
+void graph_output_dtype(luci::CircleOutput *output);
} // namespace test
} // namespace luci
diff --git a/compiler/luci/service/src/TestGraph.test.cpp b/compiler/luci/service/src/TestGraph.test.cpp
new file mode 100644
index 000000000..9ede70370
--- /dev/null
+++ b/compiler/luci/service/src/TestGraph.test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TestGraph.h"
+
+namespace luci
+{
+namespace test
+{
+
+void graph_input_shape(luci::CircleInput *input)
+{
+ auto index = input->index();
+ auto inputs = input->graph()->inputs();
+
+ for (uint32_t idx = 0; idx < inputs->size(); ++idx)
+ {
+ auto gi = inputs->at(idx);
+ if (gi->index() == index)
+ {
+ auto input_shape = std::make_unique<loco::TensorShape>();
+
+ input_shape->rank(input->rank());
+ for (uint32_t r = 0; r < input->rank(); ++r)
+ input_shape->dim(r) = loco::Dimension(input->dim(r));
+
+ gi->shape(std::move(input_shape));
+ break;
+ }
+ }
+}
+
+void graph_output_shape(luci::CircleOutput *output)
+{
+ auto index = output->index();
+ auto outputs = output->graph()->outputs();
+
+ for (uint32_t idx = 0; idx < outputs->size(); ++idx)
+ {
+ auto go = outputs->at(idx);
+ if (go->index() == index)
+ {
+ auto output_shape = std::make_unique<loco::TensorShape>();
+
+ output_shape->rank(output->rank());
+ for (uint32_t r = 0; r < output->rank(); ++r)
+ output_shape->dim(r) = loco::Dimension(output->dim(r));
+
+ go->shape(std::move(output_shape));
+ break;
+ }
+ }
+}
+
+void graph_input_dtype(luci::CircleInput *input)
+{
+ auto index = input->index();
+ auto inputs = input->graph()->inputs();
+
+ for (uint32_t idx = 0; idx < inputs->size(); ++idx)
+ {
+ auto gi = inputs->at(idx);
+ if (gi->index() == index)
+ {
+ gi->dtype(input->dtype());
+ break;
+ }
+ }
+}
+
+void graph_output_dtype(luci::CircleOutput *output)
+{
+ auto index = output->index();
+ auto outputs = output->graph()->outputs();
+
+ for (uint32_t idx = 0; idx < outputs->size(); ++idx)
+ {
+ auto go = outputs->at(idx);
+ if (go->index() == index)
+ {
+ go->dtype(output->dtype());
+ break;
+ }
+ }
+}
+
+} // namespace test
+} // namespace luci
diff --git a/compiler/luci/service/src/Validate.cpp b/compiler/luci/service/src/Validate.cpp
index 65b82c2b4..282a068e0 100644
--- a/compiler/luci/service/src/Validate.cpp
+++ b/compiler/luci/service/src/Validate.cpp
@@ -29,6 +29,19 @@
namespace
{
+std::ostream &operator<<(std::ostream &os, const loco::TensorShape &tensor_shape)
+{
+ os << "[";
+ for (uint32_t r = 0; r < tensor_shape.rank(); ++r)
+ {
+ if (r)
+ os << ",";
+ os << tensor_shape.dim(r).value();
+ }
+ os << "]";
+ return os;
+}
+
/**
* @brief returns a node that is CircleOutput with index is out_index in nodes
*/
@@ -46,7 +59,7 @@ luci::CircleOutput *find_node(std::vector<loco::Node *> nodes, loco::GraphOutput
return nullptr;
}
-bool validate_shape_type(loco::Graph *g)
+bool validate_shape_dtype(loco::Graph *g)
{
LOGGER(l);
@@ -61,18 +74,18 @@ bool validate_shape_type(loco::Graph *g)
auto circle_output = find_node(output_nodes, out_index);
assert(circle_output != nullptr);
assert(circle_output->from() != nullptr);
- auto circle_node = dynamic_cast<luci::CircleNode *>(circle_output->from());
- assert(circle_node != nullptr);
+ auto circle_node = loco::must_cast<luci::CircleNode *>(circle_output->from());
assert(loco::shape_known(circle_node));
// check if output node shape is same as graph output shape
- auto co_shape = loco::shape_get(circle_node);
+ auto co_tensor_shape = loco::shape_get(circle_node).as<loco::TensorShape>();
auto go_tensor_shape = graph_out->shape();
assert(go_tensor_shape);
- auto go_shape = loco::NodeShape(*go_tensor_shape);
- if (!(co_shape == go_shape))
+ if (!(co_tensor_shape == *go_tensor_shape))
{
- INFO(l) << "Shape for #" << out_index << " not same " << std::endl;
+ INFO(l) << "[luci] Shape for output #" << out_index << " not same " << std::endl;
+ INFO(l) << "[luci] " << circle_node->name() << " " << co_tensor_shape << " vs "
+ << *go_tensor_shape << std::endl;
return false;
}
@@ -80,7 +93,7 @@ bool validate_shape_type(loco::Graph *g)
assert(loco::dtype_known(circle_node));
if (graph_out->dtype() != loco::dtype_get(circle_node))
{
- INFO(l) << "Type for #" << out_index << " not same " << std::endl;
+ INFO(l) << "[luci] Type for output #" << out_index << " not same " << std::endl;
return false;
}
}
@@ -98,7 +111,7 @@ bool validate(loco::Graph *g)
if (!loco::valid(g))
return false;
- if (!validate_shape_type(g))
+ if (!validate_shape_dtype(g))
return false;
// TODO add more validation
diff --git a/compiler/luci/tester/CMakeLists.txt b/compiler/luci/tester/CMakeLists.txt
index bcb47183e..3ac06ef3a 100644
--- a/compiler/luci/tester/CMakeLists.txt
+++ b/compiler/luci/tester/CMakeLists.txt
@@ -1,17 +1,23 @@
+nnas_include(TargetRequire)
+
+unset(REQUIRED_TARGETS)
+list(APPEND REQUIRED_TARGETS safemain)
+TargetRequire_Return(${REQUIRED_TARGETS})
+
set(SRCS_READ_TESTER
src/ReadTester.cpp
- src/Model.cpp
)
add_executable(luci_readtester "${SRCS_READ_TESTER}")
target_link_libraries(luci_readtester PRIVATE luci_import)
target_link_libraries(luci_readtester PRIVATE luci_service)
target_link_libraries(luci_readtester PRIVATE luci_pass)
+target_link_libraries(luci_readtester PRIVATE foder)
target_link_libraries(luci_readtester PRIVATE oops)
+target_link_libraries(luci_readtester PRIVATE safemain)
set(SRCS_WRITE_TESTER
src/WriteTester.cpp
- src/Model.cpp
)
add_executable(luci_writetester "${SRCS_WRITE_TESTER}")
@@ -19,4 +25,6 @@ target_link_libraries(luci_writetester PRIVATE luci_import)
target_link_libraries(luci_writetester PRIVATE luci_service)
target_link_libraries(luci_writetester PRIVATE luci_pass)
target_link_libraries(luci_writetester PRIVATE luci_export)
+target_link_libraries(luci_writetester PRIVATE foder)
target_link_libraries(luci_writetester PRIVATE oops)
+target_link_libraries(luci_writetester PRIVATE safemain)
diff --git a/compiler/luci/tester/src/Model.cpp b/compiler/luci/tester/src/Model.cpp
deleted file mode 100644
index b02c19161..000000000
--- a/compiler/luci/tester/src/Model.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#include "Model.h"
-
-#include <fstream>
-#include <vector>
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-namespace
-{
-
-class FileModel final : public luci::Model
-{
-public:
- explicit FileModel(const std::string &filename) : _filename(filename) {}
-
-public:
- FileModel(const FileModel &) = delete;
- FileModel(FileModel &&) = delete;
-
-public:
- const ::circle::Model *model(void) override
- {
- std::ifstream file(_filename, std::ios::binary | std::ios::in);
- if (!file.good())
- return nullptr;
-
- file.unsetf(std::ios::skipws);
-
- std::streampos fileSize;
- file.seekg(0, std::ios::end);
- fileSize = file.tellg();
- file.seekg(0, std::ios::beg);
-
- // reserve capacity
- _data.reserve(fileSize);
-
- // read the data
- file.read(_data.data(), fileSize);
- if (file.fail())
- return nullptr;
-
- return ::circle::GetModel(_data.data());
- }
-
-private:
- const std::string _filename;
- std::vector<char> _data;
-};
-
-} // namespace
-
-namespace luci
-{
-
-std::unique_ptr<Model> load_model(const std::string &path)
-{
- return std::make_unique<FileModel>(path);
-}
-
-} // namespace luci
diff --git a/compiler/luci/tester/src/Model.h b/compiler/luci/tester/src/Model.h
deleted file mode 100644
index e40faf33e..000000000
--- a/compiler/luci/tester/src/Model.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __TESTER_MODEL_H__
-#define __TESTER_MODEL_H__
-
-#include <mio/circle/schema_generated.h>
-
-#include <memory>
-
-namespace luci
-{
-
-struct Model
-{
- virtual ~Model() = default;
-
- virtual const ::circle::Model *model(void) = 0;
-};
-
-/**
- * @brief Load Circle model (as a raw Model) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<Model> load_model(const std::string &path);
-
-} // namespace luci
-
-#endif // __TESTER_MODEL_H__
diff --git a/compiler/luci/tester/src/ReadTester.cpp b/compiler/luci/tester/src/ReadTester.cpp
index c105d6ce3..a1aead1bd 100644
--- a/compiler/luci/tester/src/ReadTester.cpp
+++ b/compiler/luci/tester/src/ReadTester.cpp
@@ -1,4 +1,20 @@
-#include "Model.h"
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <foder/FileLoader.h>
#include <luci/Importer.h>
#include <luci/Service/Validate.h>
@@ -37,7 +53,7 @@ void show_error_message(const char *progname, std::ostream &os, const std::strin
* dump graph to console if set.
* i.e. "LUCI_LOG=1 luci_readtester mymodel.circle"
*/
-int main(int argc, char **argv)
+int entry(int argc, char **argv)
{
if (argc != 2)
{
@@ -50,22 +66,17 @@ int main(int argc, char **argv)
std::cout << "[INFO] Circle is '" << input_path << "'" << std::endl;
// Load model from the file
- std::unique_ptr<luci::Model> model = luci::load_model(input_path);
- if (model == nullptr)
+ foder::FileLoader file_loader{input_path};
+ std::vector<char> model_data = file_loader.load();
+ const circle::Model *circle_model = circle::GetModel(model_data.data());
+ if (circle_model == nullptr)
{
- std::cerr << "ERROR: Failed to load '" << input_path << "'" << std::endl;
- return 255;
- }
-
- const circle::Model *input_model = model->model();
- if (input_model == nullptr)
- {
- std::cerr << "ERROR: Failed to read '" << input_path << "'" << std::endl;
- return 255;
+ std::cerr << "ERROR: Failed to load circle '" << input_path << "'" << std::endl;
+ return EXIT_FAILURE;
}
luci::Importer importer;
- auto module = importer.importModule(input_model);
+ auto module = importer.importModule(circle_model);
assert(module->size() > 0);
for (size_t g = 0; g < module->size(); ++g)
diff --git a/compiler/luci/tester/src/WriteTester.cpp b/compiler/luci/tester/src/WriteTester.cpp
index 80019d1b1..aa7085c77 100644
--- a/compiler/luci/tester/src/WriteTester.cpp
+++ b/compiler/luci/tester/src/WriteTester.cpp
@@ -1,4 +1,20 @@
-#include "Model.h"
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <foder/FileLoader.h>
#include <luci/Importer.h>
#include <luci/Pass/ShapeInferencePass.h>
@@ -52,8 +68,8 @@ public:
bool store(const char *ptr, const size_t size) const final;
private:
- loco::Graph *_graph;
- luci::Module *_module;
+ loco::Graph *_graph{nullptr};
+ luci::Module *_module{nullptr};
const std::string _filepath;
};
@@ -79,7 +95,7 @@ bool CircleExpContract::store(const char *ptr, const size_t size) const
* With the graph, this will use luci_export to write to the second file
* Like ReadTester, LUCI_LOG=1 environment variable is available to dump the graph
*/
-int main(int argc, char **argv)
+int entry(int argc, char **argv)
{
if (argc != 3)
{
@@ -93,23 +109,18 @@ int main(int argc, char **argv)
std::cout << "[INFO] Circle from '" << input_path << "' to '" << output_path << "'" << std::endl;
// Load model from the file
- std::unique_ptr<luci::Model> model = luci::load_model(input_path);
- if (model == nullptr)
+ foder::FileLoader file_loader{input_path};
+ std::vector<char> model_data = file_loader.load();
+ const circle::Model *circle_model = circle::GetModel(model_data.data());
+ if (circle_model == nullptr)
{
- std::cerr << "ERROR: Failed to load '" << input_path << "'" << std::endl;
- return 255;
- }
-
- const circle::Model *input_model = model->model();
- if (input_model == nullptr)
- {
- std::cerr << "ERROR: Failed to read '" << input_path << "'" << std::endl;
- return 255;
+ std::cerr << "ERROR: Failed to load circle '" << input_path << "'" << std::endl;
+ return EXIT_FAILURE;
}
// Import from input Circle file
luci::Importer importer;
- auto module = importer.importModule(input_model);
+ auto module = importer.importModule(circle_model);
assert(module->size() > 0);
for (size_t g = 0; g < module->size(); ++g)
diff --git a/compiler/luci/tests/CMakeLists.txt b/compiler/luci/tests/CMakeLists.txt
index 4e5639047..c03835823 100644
--- a/compiler/luci/tests/CMakeLists.txt
+++ b/compiler/luci/tests/CMakeLists.txt
@@ -65,6 +65,35 @@ foreach(RECIPE IN ITEMS ${RECIPES})
list(APPEND TESTFILES "${CIRCLE_OUTPUT_FILE}")
endforeach(RECIPE)
+# Generate from res/CircleRecipes
+# NOTE duplicate names should not exist or test may be incorrect
+nncc_find_resource(CircleRecipes)
+set(CIRCLERECIPES_DIR "${CircleRecipes_DIR}")
+
+file(GLOB RECIPES2 RELATIVE ${CIRCLERECIPES_DIR} "${CIRCLERECIPES_DIR}/*/test.recipe")
+
+foreach(RECIPE IN ITEMS ${RECIPES2})
+ get_filename_component(RECIPE_PREFIX ${RECIPE} DIRECTORY)
+
+ set(RECIPE_SOURCE_FILE "${RECIPE_PREFIX}.recipe")
+ set(CIRCLE_OUTPUT_FILE "${RECIPE_PREFIX}.circle")
+
+ # Copy .recipe
+ add_custom_command(OUTPUT "${RECIPE_SOURCE_FILE}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ "${CIRCLERECIPES_DIR}/${RECIPE}" "${RECIPE_SOURCE_FILE}"
+ DEPENDS "${CIRCLERECIPES_DIR}/${RECIPE}"
+ COMMENT "Generating ${RECIPE_SOURCE_FILE}")
+
+ # Generate .circle
+ add_custom_command(OUTPUT "${CIRCLE_OUTPUT_FILE}"
+ COMMAND circlechef-file "${RECIPE_SOURCE_FILE}" "${CIRCLE_OUTPUT_FILE}"
+ DEPENDS circlechef-file "${RECIPE_SOURCE_FILE}"
+ COMMENT "Generating ${CIRCLE_OUTPUT_FILE}")
+
+ list(APPEND TESTFILES "${CIRCLE_OUTPUT_FILE}")
+endforeach(RECIPE)
+
# Add a dummy target to create a target-level dependency.
# TODO Find a way to create dependency between CTest tests (added below) and generated testfiles.
add_custom_target(luci_testfiles ALL DEPENDS ${TESTFILES})
diff --git a/compiler/luci/tests/readverify.sh b/compiler/luci/tests/readverify.sh
index 3403e9c19..6d6753d39 100755
--- a/compiler/luci/tests/readverify.sh
+++ b/compiler/luci/tests/readverify.sh
@@ -7,6 +7,9 @@
# ./readverify.sh <path/to/luci_readtester> <TEST 1> <TEST 2> ...
VERIFY_SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+# set LOG enable to execute/test luci/logex codes
+export LUCI_LOG=100
+
WORKDIR="$1"; shift
VERIFY_BINARY_PATH="$1"; shift
diff --git a/compiler/luci/tests/test.lst b/compiler/luci/tests/test.lst
index 08cbd6b1a..188e29828 100644
--- a/compiler/luci/tests/test.lst
+++ b/compiler/luci/tests/test.lst
@@ -1,5 +1,8 @@
+addread(Abs_000)
addread(Add_000)
+addread(Add_001)
addread(Add_U8_000)
+addread(AddN_000)
addread(ArgMax_000)
addread(ArgMax_001)
addread(ArgMax_002)
@@ -8,44 +11,190 @@ addread(ArgMax_U8_000)
addread(ArgMax_U8_001)
addread(ArgMax_U8_002)
addread(ArgMax_U8_003)
+addread(ArgMin_000)
+addread(ArgMin_001)
+addread(ArgMin_002)
+addread(ArgMin_003)
+addread(ArgMin_U8_000)
+addread(ArgMin_U8_001)
+addread(ArgMin_U8_002)
+addread(ArgMin_U8_003)
+addread(AveragePool2D_000)
+addread(BatchMatMul_000)
+addread(BatchMatMulV2_000)
+addread(BatchMatMulV2_001)
addread(BatchToSpaceND_000)
+addread(Cast_000)
+addread(Cast_001)
+addread(Ceil_000)
addread(Concatenation_000)
addread(Concatenation_U8_000)
addread(Conv2D_000)
-addread(Conv2D_U8_000)
addread(Conv2D_002)
+addread(Conv2D_003)
+addread(Conv2D_U8_000)
addread(Cos_000)
+addread(DepthToSpace_000)
addread(DepthwiseConv2D_000)
addread(DepthwiseConv2D_U8_000)
+addread(DepthwiseConv2D_001)
addread(Div_000)
+addread(ELU_000)
addread(Equal_000)
addread(Exp_000)
+addread(ExpandDims_000)
+addread(ExpandDims_001)
+addread(ExpandDims_002)
+addread(ExpandDims_003)
+addread(Fill_000)
+addread(Fill_001)
+addread(Floor_000)
+addread(FloorDiv_000)
+addread(FloorDiv_001)
+addread(FloorMod_000)
+addread(FloorMod_001)
addread(FullyConnected_000)
addread(FullyConnected_001)
+addread(FullyConnected_002)
addread(FullyConnected_U8_000)
+addread(Gather_000)
+addread(GatherNd_000)
+addread(Greater_000)
+addread(GreaterEqual_000)
+addread(If_000)
+addread(If_001)
+addread(L2Normalize_000)
+addread(L2Pool2D_000)
+addread(L2Pool2D_U8_000)
+addread(LeakyRelu_000)
+addread(Less_000)
+addread(LessEqual_000)
+addread(LocalResponseNormalization_000)
+addread(Log_000)
+addread(LogicalAnd_000)
addread(LogicalNot_000)
addread(LogicalOr_000)
+addread(Logistic_000)
+addread(LogSoftmax_000)
+addread(MatMul_000)
+addread(MatrixDiag_000)
+addread(MatrixSetDiag_000)
+addread(Maximum_000)
addread(MaxPool2D_000)
addread(MaxPool2D_U8_000)
addread(Mean_000)
+addread(Mean_001)
+addread(Minimum_000)
+addread(MirrorPad_000)
addread(Mul_000)
addread(Mul_U8_000)
+addread(Neg_000)
+addread(NotEqual_000)
+addread(OneHot_000)
+addread(OneHot_001)
+addread(OneHot_002)
+addread(OneHot_003)
addread(Pack_000)
addread(Pack_U8_000)
addread(Pad_000)
+addread(Pow_000)
+addread(PRelu_000)
+addread(Range_000)
+addread(Rank_000)
+addread(ReduceAny_000)
+addread(ReduceAny_001)
+addread(ReduceAny_002)
+addread(ReduceAny_003)
+addread(ReduceMax_000)
+addread(ReduceMin_000)
+addread(ReduceProd_000)
+addread(ReduceProd_001)
+addread(ReduceProd_002)
+addread(ReduceProd_003)
addread(ReLU_000)
+addread(ReLU6_000)
+addread(ReLUN1To1_000)
addread(Reshape_000)
addread(Reshape_001)
+addread(Reshape_002)
+addread(Reshape_003)
addread(Reshape_U8_000)
+addread(ResizeBilinear_000)
+addread(ResizeNearestNeighbor_000)
+addread(ReverseSequence_000)
+addread(ReverseV2_000)
+addread(Round_000)
addread(Rsqrt_000)
+addread(ScatterNd_000)
+addread(SegmentSum_000)
+addread(Select_000)
+addread(Select_001)
+addread(Select_002)
+addread(SelectV2_000)
+addread(SelectV2_001)
+addread(SelectV2_002)
+addread(Shape_000)
+addread(Sin_000)
+addread(Slice_000)
addread(Softmax_000)
addread(Softmax_U8_000)
+addread(SpaceToBatchND_000)
+addread(SpaceToBatchND_001)
+addread(SpaceToBatchND_002)
+addread(SpaceToBatchND_003)
+addread(SpaceToDepth_000)
+addread(SparseToDense_000)
+addread(Split_000)
+addread(SplitV_000)
+addread(Sqrt_000)
+addread(Square_000)
+addread(SquaredDifference_000)
+addread(Squeeze_000)
+addread(StridedSlice_000)
+addread(StridedSlice_001)
+addread(StridedSlice_002)
addread(Sub_000)
addread(Sub_U8_000)
+addread(Sum_000)
+addread(Sum_001)
+addread(Tanh_000)
+addread(Tile_000)
+addread(Tile_U8_000)
+addread(TopKV2_000)
+addread(TopKV2_001)
addread(Transpose_000)
+addread(TransposeConv_000)
+addread(Unpack_000)
+addread(Unpack_001)
+addread(Unpack_002)
+addread(Unpack_003)
+addread(Where_000)
+addread(Where_001)
+addread(While_000)
+addread(While_001)
+addread(While_002)
+addread(While_003)
+addread(YUV_TO_RGB_U8_000)
+addread(ZerosLike_000)
+
+addread(Net_Dangle_001)
+addread(Net_InstanceNorm_001)
+addread(Net_InstanceNorm_002)
+addread(Net_UnpackAdd_001)
+addread(Net_ZeroDim_001)
+
+# from res/CircleRecipes
+addread(BCQFullyConnected_000)
+addread(BCQFullyConnected_001)
+addread(BCQGather_000)
+addread(CircleBatchMatMul_000)
+addread(InstanceNorm_000)
+addwrite(Abs_000)
addwrite(Add_000)
+addwrite(Add_001)
addwrite(Add_U8_000)
+addwrite(AddN_000)
addwrite(ArgMax_000)
addwrite(ArgMax_001)
addwrite(ArgMax_002)
@@ -54,38 +203,181 @@ addwrite(ArgMax_U8_000)
addwrite(ArgMax_U8_001)
addwrite(ArgMax_U8_002)
addwrite(ArgMax_U8_003)
+addwrite(ArgMin_000)
+addwrite(ArgMin_001)
+addwrite(ArgMin_002)
+addwrite(ArgMin_003)
+addwrite(ArgMin_U8_000)
+addwrite(ArgMin_U8_001)
+addwrite(ArgMin_U8_002)
+addwrite(ArgMin_U8_003)
+addwrite(AveragePool2D_000)
+addwrite(BatchMatMul_000)
+addwrite(BatchMatMulV2_000)
+addwrite(BatchMatMulV2_001)
addwrite(BatchToSpaceND_000)
+addwrite(Cast_000)
+addwrite(Cast_001)
+addwrite(Ceil_000)
addwrite(Concatenation_000)
addwrite(Concatenation_U8_000)
addwrite(Conv2D_000)
-addwrite(Conv2D_U8_000)
addwrite(Conv2D_002)
+addwrite(Conv2D_003)
+addwrite(Conv2D_U8_000)
addwrite(Cos_000)
+addwrite(DepthToSpace_000)
addwrite(DepthwiseConv2D_000)
addwrite(DepthwiseConv2D_U8_000)
+addwrite(DepthwiseConv2D_001)
addwrite(Div_000)
+addwrite(ELU_000)
addwrite(Equal_000)
addwrite(Exp_000)
+addwrite(ExpandDims_000)
+addwrite(ExpandDims_001)
+addwrite(ExpandDims_002)
+addwrite(ExpandDims_003)
+addwrite(Fill_000)
+addwrite(Fill_001)
+addwrite(Floor_000)
+addwrite(FloorDiv_000)
+addwrite(FloorDiv_001)
+addwrite(FloorMod_000)
+addwrite(FloorMod_001)
addwrite(FullyConnected_000)
addwrite(FullyConnected_001)
+addwrite(FullyConnected_002)
addwrite(FullyConnected_U8_000)
+addwrite(Gather_000)
+addwrite(GatherNd_000)
+addwrite(Greater_000)
+addwrite(GreaterEqual_000)
+addwrite(If_000)
+addwrite(If_001)
+addwrite(L2Normalize_000)
+addwrite(L2Pool2D_000)
+addwrite(L2Pool2D_U8_000)
+addwrite(LeakyRelu_000)
+addwrite(Less_000)
+addwrite(LessEqual_000)
+addwrite(LocalResponseNormalization_000)
+addwrite(Log_000)
+addwrite(LogicalAnd_000)
addwrite(LogicalNot_000)
addwrite(LogicalOr_000)
+addwrite(Logistic_000)
+addwrite(LogSoftmax_000)
+addwrite(MatMul_000)
+addwrite(MatrixDiag_000)
+addwrite(MatrixSetDiag_000)
+addwrite(Maximum_000)
addwrite(MaxPool2D_000)
addwrite(MaxPool2D_U8_000)
addwrite(Mean_000)
+addwrite(Mean_001)
+addwrite(Minimum_000)
+addwrite(MirrorPad_000)
addwrite(Mul_000)
addwrite(Mul_U8_000)
+addwrite(Neg_000)
+addwrite(NotEqual_000)
+addwrite(OneHot_000)
+addwrite(OneHot_001)
+addwrite(OneHot_002)
+addwrite(OneHot_003)
addwrite(Pack_000)
addwrite(Pack_U8_000)
addwrite(Pad_000)
+addwrite(Pow_000)
+addwrite(PRelu_000)
+addwrite(Range_000)
+addwrite(Rank_000)
+addwrite(ReduceAny_000)
+addwrite(ReduceAny_001)
+addwrite(ReduceAny_002)
+addwrite(ReduceAny_003)
+addwrite(ReduceMax_000)
+addwrite(ReduceMin_000)
+addwrite(ReduceProd_000)
+addwrite(ReduceProd_001)
+addwrite(ReduceProd_002)
+addwrite(ReduceProd_003)
addwrite(ReLU_000)
+addwrite(ReLU6_000)
+addwrite(ReLUN1To1_000)
addwrite(Reshape_000)
addwrite(Reshape_001)
+addwrite(Reshape_002)
+addwrite(Reshape_003)
addwrite(Reshape_U8_000)
+addwrite(ResizeBilinear_000)
+addwrite(ResizeNearestNeighbor_000)
+addwrite(ReverseSequence_000)
+addwrite(ReverseV2_000)
+addwrite(Round_000)
addwrite(Rsqrt_000)
+addwrite(ScatterNd_000)
+addwrite(SegmentSum_000)
+addwrite(Select_000)
+addwrite(Select_001)
+addwrite(Select_002)
+addwrite(SelectV2_000)
+addwrite(SelectV2_001)
+addwrite(SelectV2_002)
+addwrite(Shape_000)
+addwrite(Sin_000)
+addwrite(Slice_000)
addwrite(Softmax_000)
addwrite(Softmax_U8_000)
+addwrite(SpaceToBatchND_000)
+addwrite(SpaceToBatchND_001)
+addwrite(SpaceToBatchND_002)
+addwrite(SpaceToBatchND_003)
+addwrite(SpaceToDepth_000)
+addwrite(SparseToDense_000)
+addwrite(Split_000)
+addwrite(SplitV_000)
+addwrite(Sqrt_000)
+addwrite(Square_000)
+addwrite(SquaredDifference_000)
+addwrite(Squeeze_000)
+addwrite(StridedSlice_000)
+addwrite(StridedSlice_001)
+addwrite(StridedSlice_002)
addwrite(Sub_000)
addwrite(Sub_U8_000)
+addwrite(Sum_000)
+addwrite(Sum_001)
+addwrite(Tanh_000)
+addwrite(Tile_000)
+addwrite(Tile_U8_000)
+addwrite(TopKV2_000)
+addwrite(TopKV2_001)
addwrite(Transpose_000)
+addwrite(TransposeConv_000)
+addwrite(Unpack_000)
+addwrite(Unpack_001)
+addwrite(Unpack_002)
+addwrite(Unpack_003)
+addwrite(Where_000)
+addwrite(Where_001)
+addwrite(While_000)
+addwrite(While_001)
+addwrite(While_002)
+addwrite(While_003)
+addwrite(YUV_TO_RGB_U8_000)
+addwrite(ZerosLike_000)
+
+addwrite(Net_Dangle_001)
+addwrite(Net_InstanceNorm_001)
+addwrite(Net_InstanceNorm_002)
+addwrite(Net_UnpackAdd_001)
+addwrite(Net_ZeroDim_001)
+
+# from res/CircleRecipes
+addwrite(BCQFullyConnected_000)
+addwrite(BCQFullyConnected_001)
+addwrite(BCQGather_000)
+addwrite(CircleBatchMatMul_000)
+addwrite(InstanceNorm_000)
diff --git a/compiler/mio-circle/CMakeLists.txt b/compiler/mio-circle/CMakeLists.txt
index f97ec2b99..9c1126d6f 100644
--- a/compiler/mio-circle/CMakeLists.txt
+++ b/compiler/mio-circle/CMakeLists.txt
@@ -13,7 +13,7 @@ set(SCHEMA_FILE "${NNAS_PROJECT_SOURCE_DIR}/nnpackage/schema/circle_schema.fbs")
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/schema.fbs"
COMMAND ${CMAKE_COMMAND} -E copy "${SCHEMA_FILE}" schema.fbs
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
- DEPENDS "${NNAS_PROJECT_SOURCE_DIR}/nnpackage/schema/circle_schema.fbs"
+ DEPENDS "${SCHEMA_FILE}"
)
FlatBuffers_Target(mio_circle
diff --git a/compiler/mio-tflite/CMakeLists.txt b/compiler/mio-tflite/CMakeLists.txt
index cb0795a08..2cfed1449 100644
--- a/compiler/mio-tflite/CMakeLists.txt
+++ b/compiler/mio-tflite/CMakeLists.txt
@@ -1,10 +1,15 @@
nnas_find_package(FlatBuffers QUIET)
if(NOT FlatBuffers_FOUND)
+ message(STATUS "Build mio-tflite: FAILED (missing Flatbuffers)")
return()
endif(NOT FlatBuffers_FOUND)
-nnas_find_package(TensorFlowSource EXACT 2.1.0 QUIET)
+# TODO recover official release version
+# NOTE we cannot use version number like "2.3.0-rc0" for find_package()
+# use TensorFlowSource-2.3.0-rc0 as config itself
+# nnas_find_package(TensorFlowSource EXACT 2.3.0 QUIET)
+nnas_find_package(TensorFlowSource-2.3.0-rc0 QUIET)
if(NOT TensorFlowSource_FOUND)
return()
diff --git a/compiler/mir-caffe-importer/CMakeLists.txt b/compiler/mir-caffe-importer/CMakeLists.txt
deleted file mode 100644
index 83176510e..000000000
--- a/compiler/mir-caffe-importer/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-nnas_find_package(CaffeProto QUIET)
-
-if (NOT CaffeProto_FOUND)
- return()
-endif ()
-
-set(MIR_CAFFE_IMPORTER_SOURCES
- caffe_importer.cpp
- caffe_importer.h
- caffe_op_creator.cpp
- caffe_op_creator.h
- caffe_op_types.h)
-
-add_library(mir_caffe_importer STATIC ${MIR_CAFFE_IMPORTER_SOURCES})
-set_target_properties(mir_caffe_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(mir_caffe_importer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
-target_link_libraries(mir_caffe_importer PUBLIC mir caffeproto PRIVATE stdex)
diff --git a/compiler/mir-caffe-importer/caffe_importer.cpp b/compiler/mir-caffe-importer/caffe_importer.cpp
deleted file mode 100644
index 8e5ebda15..000000000
--- a/compiler/mir-caffe-importer/caffe_importer.cpp
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "caffe_importer.h"
-#include "caffe/proto/caffe.pb.h"
-#include "caffe_op_creator.h"
-#include "caffe_op_types.h"
-
-#include "mir/ops/OutputOp.h"
-
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/text_format.h>
-
-#include <fcntl.h>
-
-#include <cassert>
-#include <cerrno>
-#include <cstring>
-#include <stdex/Memory.h>
-#include <stdexcept>
-#include <utility>
-#include <vector>
-#include <set>
-
-namespace mir_caffe
-{
-
-namespace
-{
-
-class CaffeImporter
-{
-public:
- /// @brief Load the model and convert it into a MIR Graph.
- std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
- std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
-
-private:
- std::unique_ptr<mir::Graph> importModel();
-
- std::unique_ptr<caffe::NetParameter> _net;
- std::unique_ptr<CaffeOpCreator> _opCreator;
-
- // Maps Caffe blob names to corresponding MIR operation outputs.
- std::map<std::string, mir::Operation::Output *> _blobNameToOpOutput;
-
- static const std::map<std::string, CaffeOpType> _operatorTypes;
-
- /**
- * @brief Mark output MIR nodes
- */
- void setGraphOutputs(mir::Graph *graph);
-
- /**
- * @brief Pass through caffe graph and collect unsupported by NNC layers
- * @throw PassException with message, containing detected problems
- */
- void collectUnsupportedLayers();
-
- /**
- * @brief Create MIR node from single caffe layer
- */
- void createMIRNodesFromLayer(const caffe::LayerParameter &layer);
-
- mir::Operation::Output *getOutputForBlob(const std::string &blob_name) const;
- void setOutputForBlob(const std::string &blob_name, mir::Operation::Output *output);
-
- /**
- * @brief Collect unsupported parts of caffe layer
- */
- void collectUnsupportedOp(const caffe::LayerParameter &layer, std::set<std::string> &problems);
-
- /**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given layer.
- */
- std::vector<mir::Operation::Output *> getMIRInputsForLayer(const caffe::LayerParameter &layer);
-
- void processDeprecatedInput();
-};
-
-void loadModelFromBinaryFile(const std::string &filename, caffe::NetParameter *net)
-{
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- int file_handle = open(filename.c_str(), O_RDONLY);
-
- if (file_handle == -1)
- throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
- ".");
-
- google::protobuf::io::FileInputStream file_stream(file_handle);
- file_stream.SetCloseOnDelete(true);
-
- google::protobuf::io::CodedInputStream coded_stream(&file_stream);
- coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
-
- if (!net->ParseFromCodedStream(&coded_stream))
- throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
-
- // If the file has not been consumed entirely, assume that the file is in the wrong format.
- if (!coded_stream.ConsumedEntireMessage())
- throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
-}
-
-void loadModelFromTextFile(const std::string &filename, caffe::NetParameter *net)
-{
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- int file_handle = open(filename.c_str(), O_RDONLY);
-
- if (file_handle == -1)
- throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
- ".");
-
- google::protobuf::io::FileInputStream file_stream(file_handle);
- file_stream.SetCloseOnDelete(true);
-
- if (!google::protobuf::TextFormat::Parse(&file_stream, net))
- throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
-}
-
-std::unique_ptr<mir::Graph> CaffeImporter::importModel()
-{
- auto graph = stdex::make_unique<mir::Graph>();
- _opCreator = stdex::make_unique<CaffeOpCreator>(graph.get());
-
- collectUnsupportedLayers();
-
- for (int i = 0; i < _net->layer_size(); ++i)
- createMIRNodesFromLayer(_net->layer(i));
-
- setGraphOutputs(graph.get());
-
- return std::move(graph);
-}
-
-std::unique_ptr<mir::Graph> CaffeImporter::importModelFromBinaryFile(const std::string &filename)
-{
- _net = stdex::make_unique<caffe::NetParameter>();
- loadModelFromBinaryFile(filename, _net.get());
-
- return importModel();
-}
-
-std::unique_ptr<mir::Graph> CaffeImporter::importModelFromTextFile(const std::string &filename)
-{
- _net = stdex::make_unique<caffe::NetParameter>();
- loadModelFromTextFile(filename, _net.get());
-
- return importModel();
-}
-
-void CaffeImporter::collectUnsupportedLayers()
-{
- processDeprecatedInput();
-
- std::set<std::string> problems;
-
- for (const caffe::LayerParameter &layer : _net->layer())
- collectUnsupportedOp(layer, problems);
-
- if (!problems.empty())
- {
- std::string msg("NNC can't load model. Detected problems:");
- for (const auto &problemStr : problems)
- msg.append("\n * " + problemStr);
- throw std::runtime_error(msg);
- }
-}
-
-void CaffeImporter::createMIRNodesFromLayer(const caffe::LayerParameter &layer)
-{
- std::vector<mir::Operation::Output *> inputs = getMIRInputsForLayer(layer);
- std::vector<mir::Operation::Output *> outputs;
-
- switch (_operatorTypes.at(layer.type()))
- {
- case CaffeOpType::input:
- outputs = _opCreator->convertInput(layer);
- break;
- case CaffeOpType::convolution:
- outputs = _opCreator->convertConvolution(layer, inputs);
- break;
- case CaffeOpType::innerProduct:
- outputs = _opCreator->convertInnerProduct(layer, inputs);
- break;
- case CaffeOpType::pooling:
- outputs = _opCreator->convertPooling(layer, inputs);
- break;
- case CaffeOpType::concat:
- outputs = _opCreator->convertConcat(layer, inputs);
- break;
- case CaffeOpType::reshape:
- outputs = _opCreator->convertReshape(layer, inputs);
- break;
- case CaffeOpType::ReLU:
- outputs = _opCreator->convertReLU(layer, inputs);
- break;
- case CaffeOpType::softmax:
- outputs = _opCreator->convertSoftmax(layer, inputs);
- break;
- case CaffeOpType::scale:
- outputs = _opCreator->convertScale(layer, inputs);
- break;
- case CaffeOpType::batchNorm:
- outputs = _opCreator->convertBatchNorm(layer, inputs);
- break;
- case CaffeOpType::dropout:
- outputs = _opCreator->convertDropout(layer, inputs);
- break;
- case CaffeOpType::tanh:
- outputs = _opCreator->convertTanH(layer, inputs);
- break;
- case CaffeOpType::ELU:
- outputs = _opCreator->convertELU(layer, inputs);
- break;
- case CaffeOpType::eltwise:
- outputs = _opCreator->convertEltwise(layer, inputs);
- break;
- case CaffeOpType::embed:
- outputs = _opCreator->convertEmbed(layer, inputs);
- break;
- case CaffeOpType::deconvolution:
- outputs = _opCreator->convertDeconvolution(layer, inputs);
- break;
- case CaffeOpType::split:
- outputs = _opCreator->convertSplit(layer, inputs);
- break;
- case CaffeOpType::sigmoid:
- outputs = _opCreator->convertSigmoid(layer, inputs);
- break;
- case CaffeOpType::LSTM:
- outputs = _opCreator->convertLSTM(layer, inputs);
- break;
- default:
- assert(false && "All unsupported types should have been found before this pass.");
- }
-
- assert(static_cast<int>(outputs.size()) == layer.top_size() && "Number of outputs differs.");
- for (int i = 0; i < layer.top_size(); ++i)
- setOutputForBlob(layer.top(i), outputs[i]);
-}
-
-void CaffeImporter::collectUnsupportedOp(const caffe::LayerParameter &layer,
- std::set<std::string> &problems)
-{
- auto it = _operatorTypes.find(layer.type());
- if (it == _operatorTypes.end())
- {
- problems.insert(layer.type() + ": unknown layer");
- return;
- }
-
- CaffeOpType op_type = it->second;
-
- switch (op_type)
- {
- case CaffeOpType::concat:
- case CaffeOpType::input:
- case CaffeOpType::softmax:
- case CaffeOpType::scale:
- case CaffeOpType::dropout:
- case CaffeOpType::split:
- case CaffeOpType::eltwise:
- case CaffeOpType::ELU:
- case CaffeOpType::ReLU:
- case CaffeOpType::embed:
- case CaffeOpType::sigmoid:
- case CaffeOpType::tanh:
- case CaffeOpType::innerProduct:
- // No checks
- break;
- case CaffeOpType::deconvolution:
- case CaffeOpType::convolution:
- _opCreator->checkConvolution(layer, problems);
- break;
- case CaffeOpType::pooling:
- _opCreator->checkPooling(layer, problems);
- break;
- case CaffeOpType::reshape:
- _opCreator->checkReshape(layer, problems);
- break;
- case CaffeOpType::batchNorm:
- _opCreator->checkBatchNorm(layer, problems);
- break;
- case CaffeOpType::LSTM:
- _opCreator->checkLSTM(layer, problems);
- break;
- default:
- problems.insert(layer.type() + ": unsupported layer");
- break;
- }
-}
-
-void CaffeImporter::processDeprecatedInput()
-{
- if (_net->input_dim_size() != 0 || _net->input_shape_size() != 0)
- throw std::runtime_error("Deprecated Caffe input types are not supported");
-}
-
-std::vector<mir::Operation::Output *>
-CaffeImporter::getMIRInputsForLayer(const caffe::LayerParameter &layer)
-{
- std::vector<mir::Operation::Output *> inputs;
-
- for (const auto &input_name : layer.bottom())
- inputs.push_back(getOutputForBlob(input_name));
-
- return inputs;
-}
-
-mir::Operation::Output *CaffeImporter::getOutputForBlob(const std::string &blob_name) const
-{
- return _blobNameToOpOutput.at(blob_name);
-}
-
-void CaffeImporter::setOutputForBlob(const std::string &blob_name, mir::Operation::Output *output)
-{
- const auto it = _blobNameToOpOutput.find(blob_name);
- if (it != _blobNameToOpOutput.cend())
- {
- // caffe input blob name could be same as output blob name, and next line will overwrite
- // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
- it->second->setName("");
- }
-
- // Do not overwrite the name in case of fall-through layers (ex. Dropout, Split).
- // TODO Find a way to handle it properly.
- if (output->getName().empty())
- output->setName(blob_name);
-
- _blobNameToOpOutput[blob_name] = output;
-}
-
-void CaffeImporter::setGraphOutputs(mir::Graph *graph)
-{
- // TODO For now, we assume that:
- // - there is exactly one output;
- // - the output is from the last layer.
- const auto &last_layer = *_net->layer().rbegin();
- auto output = getOutputForBlob(last_layer.top(0));
- graph->create<mir::ops::OutputOp>(output);
-}
-
-const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
- {"AbsVal", CaffeOpType::absVal},
- {"Accuracy", CaffeOpType::accuracy},
- {"ArgMax", CaffeOpType::argMax},
- {"BatchNorm", CaffeOpType::batchNorm},
- {"BatchReindex", CaffeOpType::batchReindex},
- {"Bias", CaffeOpType::bias},
- {"BNLL", CaffeOpType::BNLL},
- {"Clip", CaffeOpType::clip},
- {"Concat", CaffeOpType::concat},
- {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
- {"Convolution", CaffeOpType::convolution},
- {"Crop", CaffeOpType::crop},
- {"Data", CaffeOpType::data},
- {"Deconvolution", CaffeOpType::deconvolution},
- {"Dropout", CaffeOpType::dropout},
- {"DummyData", CaffeOpType::dummyData},
- {"Eltwise", CaffeOpType::eltwise},
- {"ELU", CaffeOpType::ELU},
- {"Embed", CaffeOpType::embed},
- {"EuclidianLoss", CaffeOpType::euclidianLoss},
- {"Exp", CaffeOpType::exp},
- {"Filter", CaffeOpType::filter},
- {"Flatten", CaffeOpType::flatten},
- {"HDF5Data", CaffeOpType::HDF5Data},
- {"HDF5Output", CaffeOpType::HDF5Output},
- {"HingeLoss", CaffeOpType::hingeLoss},
- {"Im2Col", CaffeOpType::im2Col},
- {"ImageData", CaffeOpType::imageData},
- {"InfogainLoss", CaffeOpType::infogainLoss},
- {"InnerProduct", CaffeOpType::innerProduct},
- {"Input", CaffeOpType::input},
- {"Log", CaffeOpType::log},
- {"LRN", CaffeOpType::LRN},
- {"LSTM", CaffeOpType::LSTM},
- {"MemoryData", CaffeOpType::memoryData},
- {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
- {"MVN", CaffeOpType::MVN},
- {"Parameter", CaffeOpType::parameter},
- {"Pooling", CaffeOpType::pooling},
- {"Power", CaffeOpType::power},
- {"PReLU", CaffeOpType::PReLU},
- {"Python", CaffeOpType::python},
- {"Recurrent", CaffeOpType::recurrent},
- {"Reduction", CaffeOpType::reduction},
- {"ReLU", CaffeOpType::ReLU},
- {"Reshape", CaffeOpType::reshape},
- {"RNN", CaffeOpType::RNN},
- {"Scale", CaffeOpType::scale},
- {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
- {"Sigmoid", CaffeOpType::sigmoid},
- {"Silence", CaffeOpType::silence},
- {"Softmax", CaffeOpType::softmax},
- {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
- {"SPP", CaffeOpType::SPP},
- {"Split", CaffeOpType::split},
- {"Slice", CaffeOpType::slice},
- {"TanH", CaffeOpType::tanh},
- {"Threshold", CaffeOpType::threshold},
- {"Tile", CaffeOpType::tile},
- {"WindowData", CaffeOpType::windowData}};
-} // namespace
-
-std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
-{
- CaffeImporter importer;
- return importer.importModelFromBinaryFile(filename);
-}
-
-std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename)
-{
- CaffeImporter importer;
- return importer.importModelFromTextFile(filename);
-}
-
-std::unique_ptr<mir::Graph> loadModel(const std::string &filename)
-{
- return importModelFromBinaryFile(filename);
-}
-
-} // namespace mir_caffe
diff --git a/compiler/mir-caffe-importer/caffe_op_creator.cpp b/compiler/mir-caffe-importer/caffe_op_creator.cpp
deleted file mode 100644
index 5d43d248e..000000000
--- a/compiler/mir-caffe-importer/caffe_op_creator.cpp
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "caffe_op_creator.h"
-
-#include "mir/ops/AddOp.h"
-#include "mir/ops/AvgPool2DOp.h"
-#include "mir/ops/ConcatOp.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/Deconv2DOp.h"
-#include "mir/ops/EluOp.h"
-#include "mir/ops/FullyConnectedOp.h"
-#include "mir/ops/GatherOp.h"
-#include "mir/ops/LeakyReluOp.h"
-#include "mir/ops/MaxOp.h"
-#include "mir/ops/MaxPool2DOp.h"
-#include "mir/ops/MulOp.h"
-#include "mir/ops/ReluOp.h"
-#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/SigmoidOp.h"
-#include "mir/ops/SliceOp.h"
-#include "mir/ops/SoftmaxOp.h"
-#include "mir/ops/TanhOp.h"
-#include "mir/ops/TransposeOp.h"
-#include "mir/Index.h"
-#include "mir/ShapeRange.h"
-#include "mir/Tensor.h"
-
-#include <cmath>
-#include <iostream>
-#include <set>
-
-namespace mir_caffe
-{
-
-static mir::Shape convertBlobShape(const caffe::BlobShape &shape)
-{
- mir::Shape mir_shape(shape.dim_size());
-
- for (int i = 0; i < shape.dim_size(); ++i)
- {
- mir_shape.dim(i) = shape.dim(i);
- }
-
- return mir_shape;
-}
-
-using namespace mir;
-
-/// @brief Split arg into @p num_parts equal parts along @p axis axis.
-std::vector<mir::Operation::Output *> CaffeOpCreator::createSplit(mir::Operation::Output *arg,
- int32_t num_parts, int32_t axis)
-{
- const auto &arg_shape = arg->getShape();
-
- assert(axis >= 0 && axis < arg_shape.rank());
- int32_t part_size = arg_shape.dim(axis) / num_parts;
- assert(part_size * num_parts == arg_shape.dim(axis));
-
- Shape starts(arg_shape.rank());
- Shape sizes(arg_shape);
- sizes.dim(axis) = part_size;
-
- std::vector<mir::Operation::Output *> outputs(num_parts);
- for (int32_t i = 0; i < num_parts; ++i)
- {
- outputs[i] = createOp<ops::SliceOp>(arg, starts, sizes)->getOutput(0);
- starts.dim(axis) += part_size;
- }
-
- return outputs;
-}
-
-/// @brief Helper function for creating FullyConnected operation with non-square input.
-mir::Operation::Output *CaffeOpCreator::createFullyConnected(mir::Operation::Output *input,
- mir::Operation::Output *weights,
- int32_t axis)
-{
- const auto &input_shape = input->getShape();
- const auto &weights_shape = weights->getShape();
-
- assert(axis >= 0 && axis < input_shape.rank());
- assert(weights_shape.rank() == 2);
-
- // Result shape is: input.shape[0:axis] + weights.shape[1].
- Shape result_shape = input_shape;
- result_shape.resize(axis + 1);
- result_shape.dim(axis) = weights_shape.dim(1);
-
- // Flatten input to 2-D shape.
- int32_t outer_size = 1;
- for (int32_t i = 0; i < axis; ++i)
- outer_size *= input_shape.dim(i);
- int32_t inner_size = 1;
- for (int32_t i = axis; i < input_shape.rank(); ++i)
- inner_size *= input_shape.dim(i);
-
- auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size})->getOutput(0);
- auto fc = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
- return createOp<ops::ReshapeOp>(fc, result_shape)->getOutput(0);
-}
-
-TensorVariant CaffeOpCreator::convertBlob(const caffe::BlobProto &blob)
-{
- const void *src_data;
-
- mir::DataType dtype;
- if (blob.data_size() != 0)
- {
- assert(blob.double_data_size() == 0);
- dtype = mir::DataType::FLOAT32;
- src_data = blob.data().data();
- }
- else if (blob.double_data_size() != 0)
- {
- dtype = mir::DataType::FLOAT64;
- src_data = blob.double_data().data();
- }
- else
- {
- throw std::runtime_error("No data in Caffe BlobProto, investigate");
- }
-
- const mir::Shape shape = convertBlobShape(blob.shape());
- return TensorVariant({dtype, shape}, src_data);
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertInput(const caffe::LayerParameter &layer)
-{
- const auto &params = layer.input_param();
- const auto num_inputs = layer.top_size();
- const auto num_shapes = params.shape_size();
- std::vector<mir::Operation::Output *> outputs;
-
- assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
-
- for (int i = 0; i < num_inputs; ++i)
- {
- const auto &blob_shape = params.shape(num_shapes == 1 ? 0 : i);
- mir::TensorType input_type(DataType::FLOAT32, convertBlobShape(blob_shape));
- auto input = createOp<ops::InputOp>(input_type)->getOutput(0);
- outputs.push_back(input);
- }
-
- return outputs;
-}
-
-template <class OperationAttributes>
-static void convertConvolutionParam(const caffe::ConvolutionParameter &conv_param,
- OperationAttributes &attributes)
-{
- std::int32_t stride_h, stride_w;
- if (conv_param.has_stride_h() || conv_param.has_stride_w())
- {
- // If stride_h or stride_w are set, they take precedence.
- stride_h = conv_param.stride_h();
- stride_w = conv_param.stride_w();
- }
- else if (conv_param.stride_size() == 0)
- {
- // If no strides specified, they defaults to 1.
- stride_h = stride_w = 1;
- }
- else if (conv_param.stride_size() == 1)
- {
- // If only one stride specified, all strides take the same value.
- stride_h = stride_w = conv_param.stride(0);
- }
- else
- {
- // Otherwise, there must be a stride for each dimension.
- assert(conv_param.stride_size() == 2);
- stride_h = conv_param.stride(0);
- stride_w = conv_param.stride(1);
- }
- attributes.strides = {stride_h, stride_w};
-
- std::int32_t pad_h, pad_w;
- if (conv_param.has_pad_h() || conv_param.has_pad_w())
- {
- // If pad_h or pad_w are set, they take precedence.
- pad_h = conv_param.pad_h();
- pad_w = conv_param.pad_w();
- }
- else if (conv_param.pad_size() == 0)
- {
- // If no pads specified, they defaults to 0.
- pad_h = pad_w = 0;
- }
- else if (conv_param.pad_size() == 1)
- {
- // If only one pad specified, all pads take the same value.
- pad_h = pad_w = conv_param.pad(0);
- }
- else
- {
- // Otherwise, there must be a pad for each dimension.
- assert(conv_param.pad_size() == 2);
- pad_h = conv_param.pad(0);
- pad_w = conv_param.pad(1);
- }
- attributes.padding_after = attributes.padding_before = {pad_h, pad_w};
-}
-
-void CaffeOpCreator::checkConvolution(const caffe::LayerParameter &layer,
- std::set<std::string> &problems_ops_set)
-{
- const caffe::ConvolutionParameter &params = layer.convolution_param();
-
- assert(params.stride_size() <= 2);
-
- if (params.axis() != 1)
- problems_ops_set.insert("Conv2D: Unsupported axis");
-
- if (params.pad_size() != 0 && (params.has_pad_h() || params.has_pad_w()))
- problems_ops_set.insert("Conv2D: Conflicting padding properties");
-
- if (params.pad_size() > 2)
- problems_ops_set.insert("Conv2D: Unsupported number of pads");
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertConvolution(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.convolution_param();
- Conv2DOpAttributes attributes;
-
- convertConvolutionParam(params, attributes);
- attributes.num_groups = params.group();
- attributes.data_format = DataFormat::NCHW;
-
- assert(layer.blobs(0).shape().dim_size() == 4);
- auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
- std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
- kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
- auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
-
- // Add the bias, if any.
- if (params.bias_term())
- {
- auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const caffe::ConvolutionParameter &params = layer.convolution_param();
- Deconv2DOpAttributes attributes;
-
- convertConvolutionParam(params, attributes);
- attributes.data_format = DataFormat::NCHW;
-
- if (params.group() != 1)
- {
- throw std::runtime_error("Deconvolution: 'group' != 1 is not supported.");
- }
-
- auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
- std::vector<std::size_t> perm{2, 3, 1, 0}; // IOHW -> HWOI
- kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
- auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
-
- // bias_term is optional (so might not be present) and defaults to true
- if (params.bias_term())
- {
- auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertInnerProduct(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.inner_product_param();
- auto weights = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
-
- if (!params.transpose())
- weights = createOp<ops::TransposeOp>(weights, std::vector<std::size_t>{1, 0})->getOutput(0);
-
- auto result = createFullyConnected(inputs[0], weights, params.axis());
-
- // Add the bias, if any.
- if (params.bias_term())
- {
- auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertConcat(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.concat_param();
- auto concat = createOp<ops::ConcatOp>(inputs, params.axis());
- return {concat->getOutput(0)};
-}
-
-template <class PoolingAttributes>
-static void convertPoolingParam(const caffe::PoolingParameter &params,
- const mir::Shape &input_shape, PoolingAttributes &attributes)
-{
- std::int32_t kernel_h, kernel_w;
- assert(!params.global_pooling());
- if (params.has_kernel_size())
- {
- kernel_h = kernel_w = params.kernel_size();
- }
- else
- {
- kernel_h = params.kernel_h();
- kernel_w = params.kernel_w();
- }
- attributes.window = {kernel_h, kernel_w};
-
- std::int32_t stride_h, stride_w;
- if (params.has_stride_h() || params.has_stride_w())
- {
- stride_h = params.stride_h();
- stride_w = params.stride_w();
- }
- else
- {
- stride_h = stride_w = params.stride();
- }
- attributes.strides = {stride_h, stride_w};
-
- std::int32_t pad_h, pad_w;
- if (params.has_pad_h() || params.has_pad_w())
- {
- pad_h = params.pad_h();
- pad_w = params.pad_w();
- }
- else
- {
- pad_h = pad_w = params.pad();
- }
-
- attributes.padding_before = attributes.padding_after = {pad_h, pad_w};
-
- // Caffe uses different formula for computing output shape than MIR. Adjust padding so that
- // the output shape stays the same.
- constexpr int num_spatial_dims = 2;
- for (int i = 0; i < num_spatial_dims; ++i)
- {
- // Assuming NCHW format.
- const std::int32_t padded_input =
- input_shape.dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
- if ((padded_input - attributes.window[i]) % attributes.strides[i] != 0)
- ++attributes.padding_after[i];
- }
-}
-
-void CaffeOpCreator::checkPooling(const caffe::LayerParameter &layer,
- std::set<std::string> &problems_ops_set)
-{
- const caffe::PoolingParameter &params = layer.pooling_param();
-
- if (params.has_global_pooling() && params.global_pooling())
- problems_ops_set.insert("Pooling: pooling layer global_pooling param is not supported yet");
-
- if (params.pool() != caffe::PoolingParameter::AVE &&
- params.pool() != caffe::PoolingParameter::MAX)
- problems_ops_set.insert("Pooling: unsupported pooling type");
-
- if (params.has_pad() && (params.has_pad_h() || params.has_pad_w()))
- problems_ops_set.insert("Pooling: conflicting padding properties in pooling");
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertPooling(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.pooling_param();
-
- assert(inputs.size() == 1);
- auto input = inputs[0];
-
- mir::Operation::Output *result;
-
- switch (params.pool())
- {
- case caffe::PoolingParameter::AVE:
- {
- AvgPool2DOpAttributes attributes_avg;
- attributes_avg.data_format = DataFormat::NCHW;
- convertPoolingParam(params, input->getShape(), attributes_avg);
- result = createOp<ops::AvgPool2DOp>(input, attributes_avg)->getOutput(0);
- break;
- }
- case caffe::PoolingParameter::MAX:
- {
- MaxPool2DOpAttributes attributes_max;
- attributes_max.data_format = DataFormat::NCHW;
- convertPoolingParam(params, input->getShape(), attributes_max);
- result = createOp<ops::MaxPool2DOp>(input, attributes_max)->getOutput(0);
- break;
- }
- default:
- assert(false);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertSoftmax(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.softmax_param();
-
- // CPP and ACL backends are able to perform Softmax only along the last axis.
- // FIXME Do it in backends.
- if (inputs[0]->getShape().rank() == 4)
- {
- // For now, we only account for the most common case.
- if (params.axis() != 1)
- throw std::runtime_error("Softmax: unsupported axis");
- int32_t axis = 3;
- auto input = createOp<ops::TransposeOp>(inputs[0], std::vector<std::size_t>{0, 2, 3, 1});
- auto softmax = createOp<ops::SoftmaxOp>(input->getOutput(0), axis);
- auto result =
- createOp<ops::TransposeOp>(softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
- return {result->getOutput(0)};
- }
-
- auto softmax = createOp<ops::SoftmaxOp>(inputs[0], params.axis());
- return {softmax->getOutput(0)};
-}
-
-void CaffeOpCreator::checkReshape(const caffe::LayerParameter &layer,
- std::set<std::string> &problems_ops_set)
-{
- const caffe::ReshapeParameter &params = layer.reshape_param();
-
- if (params.has_axis() || params.has_num_axes())
- problems_ops_set.insert("Reshape layer axis and num_axes params are not supported yet");
-
- if (!params.has_shape())
- problems_ops_set.insert("Reshape layer doesn't have shape parameter");
-
- const mir::Shape newShape = convertBlobShape(params.shape());
-
- for (int32_t i = 0; i < newShape.rank(); ++i)
- if (newShape.dim(i) == 0)
- problems_ops_set.insert("Reshape layer zero shape values are not supported yet");
-}
-
-/**
- * @brief Converts Caffe Reshape layer to Model IR Reshape operation.
- * @todo Support "axis" and "num_axes" parameters as needed.
- * @todo Decide how to react to the absence of "shape" parameter.
- * @todo Support zero values in "shape" parameter.
- */
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertReshape(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const caffe::ReshapeParameter &params = layer.reshape_param();
-
- const mir::Shape new_shape = convertBlobShape(params.shape());
- auto reshape = createOp<ops::ReshapeOp>(inputs[0], new_shape);
- return {reshape->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertReLU(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- mir::Operation *relu;
- if (layer.relu_param().has_negative_slope())
- {
- float alpha = layer.relu_param().negative_slope();
- relu = createOp<ops::LeakyReluOp>(inputs[0], alpha);
- }
- else
- {
- relu = createOp<ops::ReluOp>(inputs[0]);
- }
-
- return {relu->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertScale(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.scale_param();
- auto scale = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
- scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
- auto result = createOp<ops::MulOp>(inputs[0], scale)->getOutput(0);
-
- // Add the bias, if any.
- if (params.bias_term())
- {
- auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-void CaffeOpCreator::checkBatchNorm(const caffe::LayerParameter &layer,
- std::set<std::string> &problems_ops_set)
-{
- const auto &scale_shape = layer.blobs(2).shape();
-
- // Check that last blob(with scaleFactor) containing only one number
- if (scale_shape.dim_size() != 1 || scale_shape.dim(0) != 1)
- problems_ops_set.insert("Unexpected shape of scale parameter in batch norm");
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const caffe::BatchNormParameter &params = layer.batch_norm_param();
-
- auto input = inputs[0];
- auto mean_tensor = convertBlob(layer.blobs(0));
- auto var_tensor = convertBlob(layer.blobs(1));
- auto scale_tensor = convertBlob(layer.blobs(2));
- const float eps = params.eps();
-
- float scale_factor = *reinterpret_cast<float *>(scale_tensor.at(mir::Index{0}));
-
- // See https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
- // Y = (X - mean / scale_factor) / sqrt(var / scale_factor + epsilon) =
- // = (X + C1) * C2
- if (scale_factor != 0.0f)
- scale_factor = 1.0f / scale_factor;
-
- // C1 = -mean / scale_factor
- Tensor<float> mean_accessor(mean_tensor);
- for (const auto &idx : ShapeRange(mean_accessor.getShape()))
- mean_accessor.at(idx) *= -scale_factor;
- auto c1 = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
-
- // C2 = 1 / sqrt(var / scale_factor + epsilon)
- Tensor<float> var_accessor(var_tensor);
- for (const auto &idx : ShapeRange(var_accessor.getShape()))
- var_accessor.at(idx) = 1.0f / std::sqrt(var_accessor.at(idx) * scale_factor + eps);
- auto c2 = createOp<ops::ConstantOp>(var_tensor)->getOutput(0);
-
- c1 = createOp<ops::ReshapeOp>(c1, Shape{1, c1->getShape().dim(0), 1, 1})->getOutput(0);
- c2 = createOp<ops::ReshapeOp>(c2, Shape{1, c2->getShape().dim(0), 1, 1})->getOutput(0);
-
- // Y = (X + C1) * C2
- auto result = createOp<ops::AddOp>(input, c1)->getOutput(0);
- result = createOp<ops::MulOp>(result, c2)->getOutput(0);
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertDropout(const caffe::LayerParameter &,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- // This is a no-op in inference mode.
- return {inputs[0]};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertELU(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const caffe::ELUParameter &params = layer.elu_param();
-
- auto elu = createOp<ops::EluOp>(inputs[0], params.alpha());
- return {elu->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertEmbed(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.embed_param();
- auto data = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)));
- auto result = createOp<ops::GatherOp>(data->getOutput(0), inputs[0], 0)->getOutput(0);
-
- // Add the bias, if any.
- if (params.bias_term())
- {
- auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertSigmoid(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto result = createOp<ops::SigmoidOp>(inputs[0]);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertTanH(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto tanh = createOp<ops::TanhOp>(inputs[0]);
- return {tanh->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertEltwise(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto &params = layer.eltwise_param();
-
- mir::Operation::Output *result;
- switch (params.operation())
- {
- case caffe::EltwiseParameter::PROD:
- {
- result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
- for (int i = 2; i < layer.bottom_size(); ++i)
- {
- result = createOp<ops::MulOp>(result, inputs[i])->getOutput(0);
- }
- break;
- }
- case caffe::EltwiseParameter::SUM:
- {
- std::vector<mir::Operation::Output *> scaled_inputs = inputs;
- if (params.coeff_size() > 0)
- {
- assert(params.coeff_size() == layer.bottom_size());
- for (int i = 0; i < layer.bottom_size(); i++)
- {
- if (params.coeff(i) != 1.0f)
- {
- const float coeff_val = params.coeff(i);
- TensorVariant coeff_tensor({DataType::FLOAT32, {}}, &coeff_val);
- auto coeff_const = createOp<ops::ConstantOp>(coeff_tensor)->getOutput(0);
- scaled_inputs[i] = createOp<ops::MulOp>(coeff_const, inputs[i])->getOutput(0);
- }
- }
- }
- result = createOp<ops::AddOp>(scaled_inputs[0], scaled_inputs[1])->getOutput(0);
- for (int i = 2; i < layer.bottom_size(); ++i)
- {
- result = createOp<ops::AddOp>(result, scaled_inputs[i])->getOutput(0);
- }
- break;
- }
- case caffe::EltwiseParameter::MAX:
- {
- result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
- for (int i = 2; i < layer.bottom_size(); ++i)
- {
- result = createOp<ops::MaxOp>(result, inputs[i])->getOutput(0);
- }
- break;
- }
- default:
- throw std::runtime_error("Unknown element-wise operation.");
- }
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertSplit(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- std::vector<mir::Operation::Output *> outputs(layer.top_size(), inputs.at(0));
- return outputs;
-}
-
-void CaffeOpCreator::checkLSTM(const caffe::LayerParameter &layer,
- std::set<std::string> &problems_ops_set)
-{
- const auto &params = layer.recurrent_param();
- if (params.expose_hidden())
- problems_ops_set.insert("LSTM: parameter 'expose_hidden' has unsupported value: " +
- std::to_string(params.expose_hidden()));
-}
-
-static TensorVariant createZeroedTensor(const mir::Shape &shape)
-{
- // TODO For now it is hardcoded float32.
- auto elem_type = mir::DataType::FLOAT32;
- std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
- return TensorVariant({elem_type, shape}, zeros.data());
-}
-
-/* See the following links for details on implementation:
- * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/recurrent_layer.cpp
- * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_layer.cpp
- * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_unit_layer.cpp
- *
- * Inputs:
- * x -- The time-varying input. Shape: [T, N, d0, d1, ..., dn].
- * cont -- The sequence continuation indicators. Shape: [T, N].
- * x_static -- The static (non-time-varying) input. Shape: [N, ...].
- * This parameter is optional and not currently supported.
- *
- * Additional inputs when parameter "expose_hidden" is true (not currently supported):
- * h_0 -- The initial value of the hidden state. Shape: [1, N, D].
- * c_0 -- The initial value of the cell state. Shape: [1, N, D].
- *
- * Learned parameters:
- * xw -- x weights for input, output, forget and cell gates concatenated.
- * Shape: [4 * D, d0 * d1 * ... * dn].
- * xb -- x biases for input, output, forget and cell gates concatenated. Shape: [4 * D].
- * hw -- h weights for input, output, forget and cell gates concatenated. Shape: [4 * D, D].
- *
- * Outputs:
- * h -- The time-varying output. Shape: [T, N, D].
- *
- * Additional outputs when parameter "expose_hidden" is true (not currently supported):
- * h_T -- The value of the hidden state at the last timestep. Shape: [1, N, D].
- * c_T -- The value of the cell state at the last timestep. Shape: [1, N, D].
- *
- * Here:
- * T - the number of timesteps,
- * N - the number of independent streams.
- * D - the number of hidden parameters.
- *
- * Formulas:
- * c_cont = c[t-1] * cont[t]
- * h_cont = h[t-1] * cont[t]
- * i[t] = Sigmoid(x[t] . xw_i + xb_i + h_cont . hw_i)
- * f[t] = Sigmoid(x[t] . xw_f + xb_f + h_cont . hw_f)
- * o[t] = Sigmoid(x[t] . xw_o + xb_o + h_cont . hw_o)
- * g[t] = Tanh(x[t] . xw_g + xb_g + h_cont . hw_g)
- * c[t] = c_cont * f[t] + i[t] * g[t]
- * h[t] = o[t] * Tanh(c[t])
- *
- * Here:
- * t -- the timestep (ranges from 1 to T),
- * * -- the inner product,
- * . -- the Hadamard product (elementwise product).
- *
- * In this implementation the inner products for all gates are performed as single inner product for
- * efficiency.
- */
-std::vector<mir::Operation::Output *>
-CaffeOpCreator::convertLSTM(const caffe::LayerParameter &layer,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &params = layer.recurrent_param();
-
- // Inputs to the layer.
- auto x = inputs[0];
- auto cont = inputs[1];
- assert(inputs.size() == 2);
-
- const auto &x_shape = x->getShape();
- const int32_t seq_length = x_shape.dim(0);
- const int32_t batch_size = x_shape.dim(1);
- const int32_t hidden_size = params.num_output();
-
- // Learned parameters of the layer. Tensors are transposed to match the ModelIR.
- auto xw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
- auto xb = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
- auto hw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(2)))->getOutput(0);
- xw = createOp<ops::TransposeOp>(xw, std::vector<std::size_t>{1, 0})->getOutput(0);
- hw = createOp<ops::TransposeOp>(hw, std::vector<std::size_t>{1, 0})->getOutput(0);
-
- // Add a dummy dimension so that element-wise operations perform properly.
- cont = createOp<ops::ReshapeOp>(cont, Shape{seq_length, batch_size, 1})->getOutput(0);
-
- // Initialize cell and hidden states with zeros.
- auto zero_tensor = createZeroedTensor(Shape{1, batch_size, hidden_size});
- auto c_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
- auto h_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
-
- auto x_xw = createFullyConnected(x, xw, 2);
- auto x_xw_b = createOp<ops::AddOp>(x_xw, xb)->getOutput(0);
-
- // Split input and continuation tensors into seq_length slices.
- std::vector<mir::Operation::Output *> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
- std::vector<mir::Operation::Output *> cont_slices = createSplit(cont, seq_length, 0);
- std::vector<mir::Operation::Output *> h_slices(seq_length);
-
- for (int32_t t = 0; t < seq_length; t++)
- {
- auto c_cont_t = createOp<ops::MulOp>(c_t, cont_slices[t])->getOutput(0);
- auto h_cont_t = createOp<ops::MulOp>(h_t, cont_slices[t])->getOutput(0);
-
- auto x_xw_b_t = x_xw_b_slices[t];
- auto h_hw_t = createFullyConnected(h_cont_t, hw, 2);
- auto activation_inputs_concat = createOp<ops::AddOp>(x_xw_b_t, h_hw_t)->getOutput(0);
- auto activation_inputs = createSplit(activation_inputs_concat, 4, 2);
-
- auto i_t = createOp<ops::SigmoidOp>(activation_inputs[0])->getOutput(0);
- auto f_t = createOp<ops::SigmoidOp>(activation_inputs[1])->getOutput(0);
- auto o_t = createOp<ops::SigmoidOp>(activation_inputs[2])->getOutput(0);
- auto g_t = createOp<ops::TanhOp>(activation_inputs[3])->getOutput(0);
-
- c_t = createOp<ops::AddOp>(createOp<ops::MulOp>(c_cont_t, f_t)->getOutput(0),
- createOp<ops::MulOp>(i_t, g_t)->getOutput(0))
- ->getOutput(0);
- h_t = createOp<ops::MulOp>(createOp<ops::TanhOp>(c_t)->getOutput(0), o_t)->getOutput(0);
-
- h_slices[t] = h_t;
- }
-
- return {createOp<ops::ConcatOp>(h_slices, 0)->getOutput(0)};
-}
-
-} // namespace mir_caffe
diff --git a/compiler/mir-caffe-importer/requires.cmake b/compiler/mir-caffe-importer/requires.cmake
deleted file mode 100644
index 1059c50d3..000000000
--- a/compiler/mir-caffe-importer/requires.cmake
+++ /dev/null
@@ -1 +0,0 @@
-require("mir")
diff --git a/compiler/mir-caffe2-importer/CMakeLists.txt b/compiler/mir-caffe2-importer/CMakeLists.txt
deleted file mode 100644
index da55839a7..000000000
--- a/compiler/mir-caffe2-importer/CMakeLists.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-nnas_find_package(PytorchSource QUIET)
-nnas_find_package(Protobuf QUIET)
-
-if (NOT PytorchSource_FOUND OR NOT Protobuf_FOUND)
- return()
-endif()
-
-Protobuf_Generate(CAFFE2_PROTO "${CMAKE_CURRENT_BINARY_DIR}/generated/caffe2"
- "${PytorchSource_DIR}" "caffe2/proto/caffe2.proto")
-
-add_library(caffe2proto STATIC ${CAFFE2_PROTO_SOURCES})
-set_target_properties(caffe2proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(caffe2proto PUBLIC ${CAFFE2_PROTO_INCLUDE_DIRS})
-target_link_libraries(caffe2proto PUBLIC libprotobuf)
-
-
-set(MIR_CAFFE2_IMPORTER_SOURCES
- caffe2_importer.cpp
- caffe2_importer.h
- caffe2_op_creator.cpp
- caffe2_op_creator.h
- caffe2_op_types.h
- caffe2_proto_helper.cpp
- caffe2_proto_helper.h)
-
-add_library(mir_caffe2_importer STATIC ${MIR_CAFFE2_IMPORTER_SOURCES})
-set_target_properties(mir_caffe2_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(mir_caffe2_importer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
-target_link_libraries(mir_caffe2_importer PUBLIC mir caffe2proto PRIVATE stdex)
diff --git a/compiler/mir-caffe2-importer/caffe2_importer.cpp b/compiler/mir-caffe2-importer/caffe2_importer.cpp
deleted file mode 100644
index 5a6eef0aa..000000000
--- a/compiler/mir-caffe2-importer/caffe2_importer.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "caffe2_importer.h"
-#include "caffe2/proto/caffe2.pb.h"
-#include "caffe2_op_types.h"
-#include "caffe2_op_creator.h"
-#include "caffe2_proto_helper.h"
-
-#include "mir/ops/InputOp.h"
-#include "mir/ops/OutputOp.h"
-
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/io/coded_stream.h>
-
-#include <fcntl.h>
-
-#include <cassert>
-#include <cerrno>
-#include <cstring>
-#include <stdex/Memory.h>
-#include <stdexcept>
-#include <utility>
-#include <set>
-
-namespace
-{
-
-using namespace mir_caffe2;
-
-class Caffe2Importer
-{
-public:
- explicit Caffe2Importer(std::string predict_net, std::string init_net,
- const std::vector<std::vector<int>> &input_shapes);
-
- /// @brief Load the model and convert it into a MIR Graph.
- std::unique_ptr<mir::Graph> importModel();
-
- ~Caffe2Importer();
-
-private:
- std::string _predictNet;
- std::string _initNet;
- std::unique_ptr<mir::Graph> _graph;
- std::unique_ptr<caffe2::NetDef> _predict_net;
- std::unique_ptr<caffe2::NetDef> _init_net;
- std::unique_ptr<Caffe2OpCreator> _opCreator;
- std::vector<mir::Shape> _inputShapes;
-
- static const std::map<std::string, SupportedCaffe2OpType> _operatorTypes;
-
- // Maps Caffe2 operator input names to corresponding MIR operation outputs.
- std::unordered_map<std::string, mir::Operation::Output *> _blobNameToOutput;
-
- void import();
- std::unique_ptr<mir::Graph> createIR();
-
- /**
- * @brief Pass through caffe2 graph and collect ops unsupported by NNC
- * @throw PassException with message, containing detected problems
- */
- void collectUnsupportedOps();
-
- /**
- * @brief Creating MIR node from single caffe2 operator
- */
- void createMIRNodesFromOp(const ::caffe2::OperatorDef &op);
-
- /**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
- */
- std::vector<mir::Operation::Output *> getInputMIROps(const ::caffe2::OperatorDef &op);
-
- void setOutputForTensor(const std::string &tensor_name, Operation::Output *output);
- mir::Operation::Output *getOutputForTensor(const std::string &name) const;
-
- /**
- * @brief Mark output MIR nodes
- */
- void setGraphOutputs();
-};
-
-using namespace ::caffe2;
-using mir::Shape;
-
-Caffe2Importer::Caffe2Importer(std::string predict_net, std::string init_net,
- const std::vector<std::vector<int>> &input_shapes)
- : _predictNet(std::move(predict_net)), _initNet(std::move(init_net))
-{
- for (auto &shape : input_shapes)
- _inputShapes.emplace_back(shape);
-
- _graph = stdex::make_unique<mir::Graph>();
- _opCreator = stdex::make_unique<Caffe2OpCreator>(_graph.get());
-}
-
-Caffe2Importer::~Caffe2Importer() = default;
-
-static void loadModelFile(const std::string &filename, caffe2::NetDef *net)
-{
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- int file_handle = open(filename.c_str(), O_RDONLY);
-
- if (file_handle == -1)
- throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
- ".");
-
- google::protobuf::io::FileInputStream file_stream(file_handle);
- file_stream.SetCloseOnDelete(true);
-
- google::protobuf::io::CodedInputStream coded_stream(&file_stream);
- coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
-
- if (!net->ParseFromCodedStream(&coded_stream))
- throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
-
- // If the file has not been consumed entirely, assume that the file is in the wrong format.
- if (!coded_stream.ConsumedEntireMessage())
- throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
-}
-
-void Caffe2Importer::import()
-{
- _predict_net = stdex::make_unique<NetDef>();
- loadModelFile(_predictNet, _predict_net.get());
-
- _init_net = stdex::make_unique<NetDef>();
- loadModelFile(_initNet, _init_net.get());
-
- collectUnsupportedOps();
-}
-
-std::unique_ptr<mir::Graph> Caffe2Importer::createIR()
-{
- // Load initializers.
- for (const auto &op : _init_net->op())
- createMIRNodesFromOp(op);
-
- // Create inputs. This has to be done after processing initializers, because they may contain
- // fake inputs.
- // TODO Caffe2 does not provide a way to detect model inputs and outputs. For now assume that:
- // - there is exactly one input;
- // - the input is for the first layer;
- // - the input has 'float' element type.
- const auto &input_name = _predict_net->op(0).input(0);
- mir::TensorType input_type(mir::DataType::FLOAT32, _inputShapes[0]);
- auto input = _graph->create<mir::ops::InputOp>(input_type)->getOutput(0);
- setOutputForTensor(input_name, input);
-
- for (const auto &op : _predict_net->op())
- createMIRNodesFromOp(op);
-
- setGraphOutputs();
-
- return std::move(_graph);
-}
-
-std::unique_ptr<mir::Graph> Caffe2Importer::importModel()
-{
- import();
- return createIR();
-}
-
-void Caffe2Importer::collectUnsupportedOps()
-{
- std::set<std::string> unsupportedOps;
- for (const auto &op : _predict_net->op())
- {
- if (_operatorTypes.find(op.type()) == _operatorTypes.end())
- unsupportedOps.insert(op.type());
- }
-
- if (!unsupportedOps.empty())
- {
- std::string exceptionMsg("Can't load model, unsupported operators:");
- for (const auto &op : unsupportedOps)
- exceptionMsg.append("\n * " + op);
- throw std::runtime_error(exceptionMsg);
- }
-}
-
-void Caffe2Importer::createMIRNodesFromOp(const OperatorDef &op)
-{
- std::vector<mir::Operation::Output *> outputs;
-
- auto inputs = getInputMIROps(op);
-
- SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
- switch (opType)
- {
- case SupportedCaffe2OpType::constantFill:
- case SupportedCaffe2OpType::givenTensorFill:
- case SupportedCaffe2OpType::givenTensorInt64Fill:
- outputs = _opCreator->convertConstant(inputs, op);
- break;
- case SupportedCaffe2OpType::add:
- outputs = _opCreator->convertAdd(inputs, op);
- break;
- case SupportedCaffe2OpType::averagePool:
- outputs = _opCreator->convertAveragePool(inputs, op);
- break;
- case SupportedCaffe2OpType::conv:
- outputs = _opCreator->convertConv(inputs, op);
- break;
- case SupportedCaffe2OpType::concat:
- outputs = _opCreator->convertConcat(inputs, op);
- break;
- case SupportedCaffe2OpType::dropout:
- outputs = _opCreator->convertDropout(inputs, op);
- break;
- case SupportedCaffe2OpType::FC:
- outputs = _opCreator->convertFC(inputs, op);
- break;
- case SupportedCaffe2OpType::maxPool:
- outputs = _opCreator->convertMaxPool(inputs, op);
- break;
- case SupportedCaffe2OpType::mul:
- outputs = _opCreator->convertMul(inputs, op);
- break;
- case SupportedCaffe2OpType::relu:
- outputs = _opCreator->convertRelu(inputs);
- break;
- case SupportedCaffe2OpType::resizeNearest:
- outputs = _opCreator->convertResizeNearest(inputs, op);
- break;
- case SupportedCaffe2OpType::sigmoid:
- outputs = _opCreator->convertSigmoid(inputs);
- break;
- case SupportedCaffe2OpType::softmax:
- outputs = _opCreator->convertSoftmax(inputs, op);
- break;
- case SupportedCaffe2OpType::spatialBN:
- outputs = _opCreator->convertSpatialBN(inputs, op);
- break;
- case SupportedCaffe2OpType::sum:
- outputs = _opCreator->convertSum(inputs);
- break;
- case SupportedCaffe2OpType::clip:
- outputs = _opCreator->convertClip(inputs, op);
- break;
- case SupportedCaffe2OpType::reshape:
- outputs = _opCreator->convertReshape(inputs, op);
- break;
- default:
- assert(false && "All unsupported types should have been found before this pass.");
- }
-
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- setOutputForTensor(op.output(i), outputs[i]);
- }
-}
-
-std::vector<mir::Operation::Output *> Caffe2Importer::getInputMIROps(const OperatorDef &op)
-{
- std::vector<mir::Operation::Output *> inputs;
-
- for (const auto &input_name : op.input())
- {
- inputs.push_back(getOutputForTensor(input_name));
- }
-
- return inputs;
-}
-
-void Caffe2Importer::setOutputForTensor(const std::string &tensor_name, Operation::Output *output)
-{
- auto it = _blobNameToOutput.find(tensor_name);
- if (it != _blobNameToOutput.cend())
- {
- // caffe2 input blob name could be same as output blob name, and next line will overwrite
- // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
- it->second->setName("");
- }
- output->setName(tensor_name);
- _blobNameToOutput[tensor_name] = output;
-}
-
-mir::Operation::Output *Caffe2Importer::getOutputForTensor(const std::string &name) const
-{
- return _blobNameToOutput.at(name);
-}
-
-void Caffe2Importer::setGraphOutputs()
-{
- // Create outputs.
- // TODO Caffe2 does not provide a way to detect model inputs and outputs. For now assume that:
- // - there is exactly one output;
- // - the output is from the last layer.
- const auto &output_name = _predict_net->op().rbegin()->output(0);
- auto output = getOutputForTensor(output_name);
- _graph->create<mir::ops::OutputOp>(output);
-}
-
-const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
- {"Add", SupportedCaffe2OpType::add},
- {"AveragePool", SupportedCaffe2OpType::averagePool},
- {"Conv", SupportedCaffe2OpType::conv},
- {"Concat", SupportedCaffe2OpType::concat},
- {"ConstantFill", SupportedCaffe2OpType::constantFill},
- {"Dropout", SupportedCaffe2OpType::dropout},
- {"FC", SupportedCaffe2OpType::FC},
- {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
- {"MaxPool", SupportedCaffe2OpType::maxPool},
- {"Mul", SupportedCaffe2OpType::mul},
- {"Relu", SupportedCaffe2OpType::relu},
- {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
- {"Sigmoid", SupportedCaffe2OpType::sigmoid},
- {"Softmax", SupportedCaffe2OpType::softmax},
- {"SpatialBN", SupportedCaffe2OpType::spatialBN},
- {"Sum", SupportedCaffe2OpType::sum},
- {"Clip", SupportedCaffe2OpType::clip},
- {"Reshape", SupportedCaffe2OpType::reshape},
- {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
-};
-}
-
-namespace mir_caffe2
-{
-
-std::unique_ptr<mir::Graph> loadModel(std::string predict_net, std::string init_net,
- const std::vector<std::vector<int>> &input_shapes)
-{
- Caffe2Importer importer(std::move(predict_net), std::move(init_net), input_shapes);
- return importer.importModel();
-}
-
-} // namespace mir_caffe2
diff --git a/compiler/mir-caffe2-importer/caffe2_op_creator.cpp b/compiler/mir-caffe2-importer/caffe2_op_creator.cpp
deleted file mode 100644
index d279fb1ed..000000000
--- a/compiler/mir-caffe2-importer/caffe2_op_creator.cpp
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "caffe2_op_creator.h"
-#include "caffe2_proto_helper.h"
-
-#include "mir/ops/AddOp.h"
-#include "mir/ops/AvgPool2DOp.h"
-#include "mir/ops/CappedReluOp.h"
-#include "mir/ops/ConcatOp.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/FullyConnectedOp.h"
-#include "mir/ops/MaxPool2DOp.h"
-#include "mir/ops/MulOp.h"
-#include "mir/ops/ReluOp.h"
-#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/ResizeOp.h"
-#include "mir/ops/SigmoidOp.h"
-#include "mir/ops/SoftmaxOp.h"
-#include "mir/ops/TransposeOp.h"
-
-#include "mir/Index.h"
-#include "mir/Shape.h"
-#include "mir/ShapeRange.h"
-#include "mir/Tensor.h"
-#include "mir/TensorUtil.h"
-
-#include <cmath>
-#include <vector>
-
-namespace mir_caffe2
-{
-
-using namespace ::caffe2;
-using namespace mir;
-
-//
-// Helper functions
-//
-
-static std::pair<std::vector<int32_t>, std::vector<int32_t>>
-getPadding(const ::caffe2::OperatorDef &op)
-{
-
- if (hasArgument(op.arg(), "pads"))
- {
- // pads order: t l b r
- auto pads_arg = findArgumentByName(op.arg(), "pads");
-
- std::vector<int32_t> paddings;
- for (const auto &pad : pads_arg.ints())
- paddings.push_back(static_cast<int32_t>(pad));
-
- assert(paddings.size() == 4);
-
- int32_t pad_t = paddings[0];
- int32_t pad_l = paddings[1];
- int32_t pad_b = paddings[2];
- int32_t pad_r = paddings[3];
-
- std::vector<int32_t> padding_before{pad_t, pad_l};
- std::vector<int32_t> padding_after{pad_b, pad_r};
- return {padding_before, padding_after};
- }
-
- bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
- hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
-
- if (has_custom_pad)
- {
- int32_t pad_l = getSingleArgument(op, "pad_l", 0);
- int32_t pad_t = getSingleArgument(op, "pad_t", 0);
- int32_t pad_r = getSingleArgument(op, "pad_r", 0);
- int32_t pad_b = getSingleArgument(op, "pad_b", 0);
-
- std::vector<int32_t> padding_before{pad_t, pad_l};
- std::vector<int32_t> padding_after{pad_b, pad_r};
- return {padding_before, padding_after};
- }
-
- int32_t pad = getSingleArgument(op, "pad", 0);
- return {{pad, pad}, {pad, pad}};
-}
-
-static std::vector<std::int32_t> getStrides(const ::caffe2::OperatorDef &op)
-{
- std::vector<std::int32_t> strides;
-
- if (hasArgument(op.arg(), "stride"))
- {
- std::int32_t stride = getSingleArgument(op, "stride", 1);
- strides = {stride, stride};
- }
-
- if (hasArgument(op.arg(), "strides"))
- {
- // strides order: h w
- auto strides_arg = findArgumentByName(op.arg(), "strides");
- for (const auto &s : strides_arg.ints())
- strides.push_back(s);
- }
-
- assert(!strides.empty() && "Strides not found");
-
- return strides;
-}
-
-static std::vector<std::int32_t> getWindowSize(const ::caffe2::OperatorDef &op,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
- bool has_custom_kernel_size =
- hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
- bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
-
- int kernel_h(0), kernel_w(0);
- if (is_global_pooling)
- {
- const auto &input_shape = inputs[0]->getShape();
- assert(input_shape.rank() == 4 && "getWindowSize() inputs must be of rank 4");
- kernel_h = input_shape.dim(2);
- kernel_w = input_shape.dim(3);
- }
- else
- {
- if (has_custom_kernel_size)
- {
- kernel_h = getSingleArgument(op, "kernel_h", 0);
- kernel_w = getSingleArgument(op, "kernel_w", 0);
- }
- else
- {
- if (has_custom_kernels_size)
- {
- // kernels order: h w
- std::vector<int32_t> kernels;
- auto kernels_arg = findArgumentByName(op.arg(), "kernels");
- for (const auto &ker : kernels_arg.ints())
- kernels.push_back(static_cast<int32_t>(ker));
- assert(kernels.size() == 2);
- kernel_h = kernels[0];
- kernel_w = kernels[1];
- }
- else
- {
- kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
- }
- }
- }
- return {kernel_h, kernel_w};
-}
-
-//
-// Check functions
-//
-
-static void checkLayout(const OperatorDef &op)
-{
- if (getSingleArgument(op, "order", "NCHW") != "NCHW")
- throw std::runtime_error(op.type() + ": only 'NCHW' axis order is supported");
-}
-
-static void checkConvLikeOp(const ::caffe2::OperatorDef &op)
-{
- checkLayout(op);
-
- // Padding
- bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
- hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
-
- if (has_custom_pad && hasArgument(op.arg(), "pad"))
- throw std::runtime_error("Custom pad can't be combined with overall pad");
-
- if (has_custom_pad &&
- !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
- hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
- throw std::runtime_error("If one custom pad specified - all custom pads must be specified");
-
- // Kernel size
- bool has_custom_kernel_size =
- hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
-
- if (has_custom_kernel_size && hasArgument(op.arg(), "kernel"))
- throw std::runtime_error("Custom kernel size can't be combined with overall kernel size");
-
- if (has_custom_kernel_size &&
- !(hasArgument(op.arg(), "kernel_h") && hasArgument(op.arg(), "kernel_w")))
- throw std::runtime_error(
- "If one custom kernel size specified - all custom kernel sizes must be specified");
-}
-
-static mir::TensorVariant createTensor(const OperatorDef &op)
-{
- assert(hasArgument(op.arg(), "shape") && hasArgument(op.arg(), "values"));
-
- const auto &shape = findArgumentByName(op.arg(), "shape");
- const auto &values = findArgumentByName(op.arg(), "values");
-
- mir::DataType element_type;
- const void *src_data;
- // if values on floats
- if (!values.floats().empty())
- {
- element_type = mir::DataType::FLOAT32;
- src_data = values.floats().data();
- }
- else
- {
- assert(!values.ints().empty());
- if (op.type() == "GivenTensorInt64Fill")
- {
- element_type = mir::DataType::INT64;
- }
- else
- {
- element_type = mir::DataType::INT32;
- }
- src_data = values.ints().data();
- }
-
- mir::Shape tensor_shape(shape.ints_size());
-
- for (int i = 0; i < shape.ints_size(); ++i)
- {
- tensor_shape.dim(i) = shape.ints(i);
- }
-
- return mir::TensorVariant({element_type, tensor_shape}, src_data);
-}
-
-//
-// Convert functions
-//
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertConstant(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- // Constant may not contain any data if it is a fake input.
- if (!hasArgument(op.arg(), "values"))
- return {};
-
- return {createOp<ops::ConstantOp>(createTensor(op))->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- assert(inputs.size() == 2);
- auto lhs = inputs[0];
- auto rhs = inputs[1];
-
- if (getSingleArgument(op, "broadcast", 0) != 0)
- {
- // FIXME This only works when 'axis' == 1 and the second input is 1-D.
- rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
- auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
- return {result};
- }
-
- auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output *> &inputs,
- const OperatorDef &op)
-{
- checkConvLikeOp(op);
-
- assert(inputs.size() == 1);
- auto input = inputs[0];
-
- AvgPool2DOpAttributes attributes;
- std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
- attributes.window = getWindowSize(op, inputs);
- attributes.strides = getStrides(op);
- attributes.include_pad = false;
- attributes.data_format = DataFormat::NCHW;
- auto result = createOp<ops::AvgPool2DOp>(input, attributes)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- // dilation order: h w (not used)
- mir::Conv2DOpAttributes attributes;
- attributes.strides = getStrides(op);
- std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
- attributes.num_groups = getSingleArgument(op, "group", 1);
- attributes.data_format = DataFormat::NCHW;
-
- std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
- auto kernel = createOp<ops::TransposeOp>(inputs[1], perm)->getOutput(0);
- auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
-
- if (op.input_size() > 2)
- {
- auto bias = inputs[2];
- bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- checkLayout(op);
-
- // `1` corresponds to the default (channels) axis.
- int axis = getSingleArgument(op, "axis", 1);
- auto result = createOp<ops::ConcatOp>(inputs, axis);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &)
-{
- // This is a no-op in inference mode.
- return {inputs[0]};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertFC(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- for (auto &s : {"axis", "axis_w", "float16_compute"})
- if (hasArgument(op.arg(), s))
- throw std::runtime_error(std::string("FC: only default '") + s + "' value is supported");
-
- const auto &input_shape = inputs[0]->getShape();
- // Transform input into 2-D tensor by flattening axes
- Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
-
- auto reshape = createOp<ops::ReshapeOp>(inputs[0], shape)->getOutput(0);
- auto weights =
- createOp<ops::TransposeOp>(inputs[1], std::vector<std::size_t>{1, 0})->getOutput(0);
- auto result = createOp<ops::FullyConnectedOp>(reshape, weights)->getOutput(0);
- result = createOp<ops::AddOp>(result, inputs[2])->getOutput(0);
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output *> &inputs,
- const OperatorDef &op)
-{
- checkConvLikeOp(op);
-
- assert(inputs.size() == 1);
- auto input = inputs[0];
-
- MaxPool2DOpAttributes attributes;
- std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
- attributes.window = getWindowSize(op, inputs);
- attributes.strides = getStrides(op);
- attributes.data_format = DataFormat::NCHW;
- auto result = createOp<ops::MaxPool2DOp>(input, attributes)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- assert(inputs.size() == 2);
- auto lhs = inputs[0];
- auto rhs = inputs[1];
-
- if (getSingleArgument(op, "broadcast", 0) != 0)
- {
- // FIXME This only works when `axis` == 1 and the second input is 1-D.
- rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
- auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
- return {result};
- }
-
- auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto relu = createOp<ops::ReluOp>(inputs[0]);
- return {relu->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- std::vector<float> scales(4);
- assert(inputs[0]->getShape().rank() == 4 && "only 4d tensors is supported");
- // Assuming NCHW format.
- scales[0] = 1.0f;
- scales[1] = 1.0f;
- scales[2] = getSingleArgument(op, "height_scale", 1.0f);
- scales[3] = getSingleArgument(op, "width_scale", 1.0f);
- auto result =
- createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor, scales)
- ->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto result = createOp<ops::SigmoidOp>(inputs[0]);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- int axis = getSingleArgument(op, "axis", 1);
- auto softmax = createOp<ops::SoftmaxOp>(inputs[0], axis);
- return {softmax->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- checkLayout(op);
-
- // Sanity checks
- if (op.input_size() != 5)
- throw std::runtime_error(
- "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
- if (getSingleArgument(op, "is_test", 1) != 1)
- throw std::runtime_error("SpatialBN: only test mode supported");
-
- // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
-
- auto scale_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- auto bias_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[2]->getNode());
- auto mean_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[3]->getNode());
- auto var_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[4]->getNode());
- if (scale_op == nullptr || bias_op == nullptr || mean_op == nullptr || var_op == nullptr)
- throw std::runtime_error(
- "SpatialBN: non-constant 'scale', 'bias', 'mean' and 'var' inputs are not supported yet.");
-
- const auto &scale_tensor = scale_op->getValue();
- const auto &bias_tensor = bias_op->getValue();
- const auto &mean_tensor = mean_op->getValue();
- const auto &var_tensor = var_op->getValue();
- float eps = getSingleArgument(op, "epsilon", 1e-5f);
-
- // res1 = X - mean
- Tensor<float> bias_data(mean_tensor);
- for (auto &idx : ShapeRange(bias_data.getShape()))
- bias_data.at(idx) *= -1;
-
- auto mean = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
- mean = createOp<ops::ReshapeOp>(mean, Shape{1, mean->getShape().dim(0), 1, 1})->getOutput(0);
- auto result = createOp<ops::AddOp>(inputs[0], mean)->getOutput(0);
-
- // res2 = res1 * scale / (var + epsilon)
- Tensor<float> multiplier(scale_tensor);
- for (auto &idx : ShapeRange(scale_tensor.getShape()))
- multiplier.at(idx) /= std::sqrt(*reinterpret_cast<float *>(var_tensor.at(idx)) + eps);
- auto scale = createOp<ops::ConstantOp>(scale_tensor)->getOutput(0);
- scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::MulOp>(result, scale)->getOutput(0);
-
- // overall_res = res2 + bias
- auto bias = createOp<ops::ConstantOp>(bias_tensor)->getOutput(0);
- bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
-
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
- for (int i = 2; i < static_cast<int>(inputs.size()); ++i)
- {
- result = createOp<ops::AddOp>(result, inputs[i])->getOutput(0);
- }
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
-
- float max = getSingleArgument(op, "max", float(0));
- float min = getSingleArgument(op, "min", float(0));
-
- assert(max > 0.0 && min == 0.0 && "Support only if clip is CappedRelu");
- auto cap_relu = createOp<ops::CappedReluOp>(inputs[0], max);
-
- return {cap_relu->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs,
- const ::caffe2::OperatorDef &op)
-{
- auto shape_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- if (shape_op == nullptr)
- throw std::runtime_error("Reshape: non-constant shape is not supported yet.");
-
- const auto &shape_tensor = shape_op->getValue();
-
- Tensor<int64_t> out_shape_tensor(shape_tensor);
-
- ShapeRange range(out_shape_tensor.getShape());
- std::vector<int32_t> shape_vec;
- for (const auto &index : range)
- {
- shape_vec.push_back(static_cast<int32_t>(out_shape_tensor.at(index)));
- }
- Shape out_shape(shape_vec);
-
- auto reshape = createOp<ops::ReshapeOp>(inputs[0], out_shape);
-
- return {reshape->getOutput(0)};
-}
-
-} // namespace mir_caffe2
diff --git a/compiler/mir-caffe2-importer/caffe2_proto_helper.cpp b/compiler/mir-caffe2-importer/caffe2_proto_helper.cpp
deleted file mode 100644
index a7cde64cf..000000000
--- a/compiler/mir-caffe2-importer/caffe2_proto_helper.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "caffe2_proto_helper.h"
-
-namespace mir_caffe2
-{
-
-const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name)
-{
- for (auto &arg : args)
- if (arg.name() == name)
- return arg;
- throw std::runtime_error("Can't find argument with name: " + name);
-}
-
-const bool hasArgument(RepArgument args, const std::string &name)
-{
- for (auto &arg : args)
- if (arg.name() == name)
- return true;
- return false;
-}
-
-int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- const int default_value)
-{
- if (hasArgument(op.arg(), argument_name))
- return static_cast<int>(findArgumentByName(op.arg(), argument_name).i());
- return default_value;
-}
-
-float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- const float default_value)
-{
- if (hasArgument(op.arg(), argument_name))
- return findArgumentByName(op.arg(), argument_name).f();
- return default_value;
-}
-
-std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- const std::string &default_value)
-{
- if (hasArgument(op.arg(), argument_name))
- return findArgumentByName(op.arg(), argument_name).s();
- return default_value;
-}
-
-} // namespace mir_caffe2
diff --git a/compiler/mir-caffe2-importer/caffe2_proto_helper.h b/compiler/mir-caffe2-importer/caffe2_proto_helper.h
deleted file mode 100644
index 4c47edec8..000000000
--- a/compiler/mir-caffe2-importer/caffe2_proto_helper.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MIR_CAFFE2_PROTO_HELPER_H
-#define MIR_CAFFE2_PROTO_HELPER_H
-
-#include "caffe2/proto/caffe2.pb.h"
-
-namespace mir_caffe2
-{
-
-using RepArgument = const ::google::protobuf::RepeatedPtrField<::caffe2::Argument> &;
-
-const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name);
-
-const bool hasArgument(RepArgument args, const std::string &name);
-
-int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- int default_value);
-float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- float default_value);
-std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
- const std::string &default_value);
-
-} // namespace mir_caffe2
-
-#endif // MIR_CAFFE2_PROTO_HELPER_H
diff --git a/compiler/mir-caffe2-importer/requires.cmake b/compiler/mir-caffe2-importer/requires.cmake
deleted file mode 100644
index 1059c50d3..000000000
--- a/compiler/mir-caffe2-importer/requires.cmake
+++ /dev/null
@@ -1 +0,0 @@
-require("mir")
diff --git a/compiler/mir-interpreter/README.md b/compiler/mir-interpreter/README.md
new file mode 100644
index 000000000..4ed0c7350
--- /dev/null
+++ b/compiler/mir-interpreter/README.md
@@ -0,0 +1 @@
+# mir-interpreter
diff --git a/compiler/mir-onnx-importer/AttributeHelpers.h b/compiler/mir-onnx-importer/AttributeHelpers.h
deleted file mode 100644
index d5cc1501a..000000000
--- a/compiler/mir-onnx-importer/AttributeHelpers.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MIR_ONNX_ATTRIBUTE_HELPERS_H
-#define MIR_ONNX_ATTRIBUTE_HELPERS_H
-
-#include "onnx/onnx.pb.h"
-
-#include <algorithm>
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace mir_onnx
-{
-
-template <typename T> T getAttributeValue(const onnx::AttributeProto &attribute) = delete;
-
-template <> inline float getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::FLOAT);
- return attribute.f();
-}
-
-template <> inline std::int64_t getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::INT);
- return attribute.i();
-}
-
-template <> inline std::string getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::STRING);
- return attribute.s();
-}
-
-template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::TENSOR);
- return attribute.t();
-}
-
-template <>
-inline std::vector<std::int32_t> getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::INTS);
- // TODO Check that values fit.
- return {attribute.ints().cbegin(), attribute.ints().cend()};
-}
-
-template <>
-inline std::vector<std::int64_t> getAttributeValue(const onnx::AttributeProto &attribute)
-{
- assert(attribute.type() == onnx::AttributeProto::INTS);
- return {attribute.ints().cbegin(), attribute.ints().cend()};
-}
-
-inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node,
- const std::string &name)
-{
- const auto &attributes = node.attribute();
- const auto it = std::find_if(
- attributes.cbegin(), attributes.cend(),
- [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
- if (it == attributes.cend())
- return nullptr;
- return &*it;
-}
-
-template <typename T> T getAttributeValue(const onnx::NodeProto &node, const std::string &name)
-{
- const auto *attribute = findAttribute(node, name);
- if (attribute == nullptr)
- throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'.");
- return getAttributeValue<T>(*attribute);
-}
-
-template <typename T>
-T getAttributeValue(const onnx::NodeProto &node, const std::string &name, T default_value)
-{
- const auto *attribute = findAttribute(node, name);
- if (attribute == nullptr)
- return std::move(default_value);
- return getAttributeValue<T>(*attribute);
-}
-
-} // namespace mir_onnx
-
-#endif // MIR_ONNX_ATTRIBUTE_HELPERS_H
diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt
deleted file mode 100644
index 5f27bc041..000000000
--- a/compiler/mir-onnx-importer/CMakeLists.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-nnas_find_package(ONNXSource EXACT 1.6.0 QUIET)
-nnas_find_package(Protobuf QUIET)
-
-if (NOT ONNXSource_FOUND)
- return()
-endif ()
-
-if (NOT Protobuf_FOUND)
- return()
-endif ()
-
-Protobuf_Generate(MIR_ONNX_PROTO
- ${CMAKE_CURRENT_BINARY_DIR}/generated
- ${ONNXSource_DIR}
- onnx/onnx.proto)
-
-add_library(mir_onnx_proto STATIC ${MIR_ONNX_PROTO_SOURCES})
-set_target_properties(mir_onnx_proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(mir_onnx_proto PUBLIC ${MIR_ONNX_PROTO_INCLUDE_DIRS})
-target_link_libraries(mir_onnx_proto PUBLIC libprotobuf)
-
-set(MIR_ONNX_IMPORTER_SOURCES
- AttributeHelpers.h
- ConvPoolHelpers.cpp
- ConvPoolHelpers.h
- ONNXHelpers.cpp
- ONNXHelpers.h
- ONNXImporterImpl.cpp
- ONNXImporterImpl.h
- ONNXNodeConverterRegistry.h
- ONNXNodeConverterRegistry.cpp
- ONNXOpRegistration.h
- Op/Abs.cpp
- Op/Abs.h
- Op/Add.cpp
- Op/Add.h
- Op/AveragePool.cpp
- Op/AveragePool.h
- Op/BatchNormalization.cpp
- Op/BatchNormalization.h
- Op/Concat.cpp
- Op/Concat.h
- Op/Constant.cpp
- Op/Constant.h
- Op/Conv.cpp
- Op/Conv.h
- Op/ConvTranspose.cpp
- Op/ConvTranspose.h
- Op/Div.cpp
- Op/Div.h
- Op/Dropout.cpp
- Op/Dropout.h
- Op/Equal.cpp
- Op/Equal.h
- Op/Expand.cpp
- Op/Expand.h
- Op/Flatten.cpp
- Op/Flatten.h
- Op/Gather.cpp
- Op/Gather.h
- Op/Greater.cpp
- Op/Greater.h
- Op/Gemm.cpp
- Op/Gemm.h
- Op/Identity.cpp
- Op/Identity.h
- Op/Less.cpp
- Op/Less.h
- Op/MatMul.cpp
- Op/MatMul.h
- Op/GlobalAveragePool.cpp
- Op/GlobalAveragePool.h
- Op/Max.cpp
- Op/Max.h
- Op/MaxPool.cpp
- Op/MaxPool.h
- Op/Mul.cpp
- Op/Mul.h
- Op/Pad.cpp
- Op/Pad.h
- Op/Reciprocal.cpp
- Op/Reciprocal.h
- Op/ReduceMean.cpp
- Op/ReduceMean.h
- Op/Relu.cpp
- Op/Relu.h
- Op/Reshape.cpp
- Op/Reshape.h
- Op/Shape.cpp
- Op/Shape.h
- Op/Sigmoid.cpp
- Op/Sigmoid.h
- Op/Softmax.cpp
- Op/Softmax.h
- Op/Sqrt.cpp
- Op/Sqrt.h
- Op/Sub.cpp
- Op/Sub.h
- Op/Sum.cpp
- Op/Sum.h
- Op/Tanh.cpp
- Op/Tanh.h
- Op/Transpose.cpp
- Op/Transpose.h
- Op/Unsqueeze.cpp
- Op/Unsqueeze.h
- Op/Upsample.cpp
- Op/Upsample.h)
-
-add_library(mir_onnx_importer STATIC ${MIR_ONNX_IMPORTER_SOURCES})
-set_target_properties(mir_onnx_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(mir_onnx_importer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
-target_link_libraries(mir_onnx_importer PUBLIC mir_onnx_proto mir PRIVATE stdex mir_interpreter)
-
-nnas_find_package(GTest REQUIRED)
-
-file(GLOB_RECURSE TEST_SOURCES "*.test.cpp")
-GTest_AddTest(mir_onnx_importer_test ${TEST_SOURCES})
-target_link_libraries(mir_onnx_importer_test mir_onnx_importer)
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
deleted file mode 100644
index c33104198..000000000
--- a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ONNXImporterImpl.h"
-#include "ONNXHelpers.h"
-#include "ONNXOpRegistration.h"
-#include "onnx/onnx.pb.h"
-
-#include "mir/Shape.h"
-#include "mir/TensorUtil.h"
-
-#include "mir/ops/ConstantOp.h"
-
-#include <fcntl.h>
-
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/text_format.h>
-#include <functional>
-#include <iostream>
-#include <stdex/Memory.h>
-#include <utility>
-
-namespace mir_onnx
-{
-
-namespace
-{
-
-class ONNXImporterImpl final
-{
-public:
- ONNXImporterImpl();
- ~ONNXImporterImpl();
- /// @brief Load the model and convert it into a MIR Graph.
- std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
- std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
-
-private:
- std::unique_ptr<mir::Graph> createIR();
- void createGraphInputs();
- void collectUnsupportedOps();
- std::unique_ptr<onnx::ModelProto> _model;
- std::unique_ptr<ConverterContext> _converterCtx;
- std::unique_ptr<ModelContext> _modelCtx;
- std::unique_ptr<mir::Graph> _graph;
-};
-
-ONNXImporterImpl::ONNXImporterImpl() { registerSupportedOps(); }
-
-ONNXImporterImpl::~ONNXImporterImpl() = default;
-
-void loadModelFromBinaryFile(const std::string &filename, onnx::ModelProto *model)
-{
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- int file_handle = open(filename.c_str(), O_RDONLY);
-
- if (file_handle == -1)
- throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
- ".");
-
- google::protobuf::io::FileInputStream file_stream(file_handle);
- file_stream.SetCloseOnDelete(true);
-
- google::protobuf::io::CodedInputStream coded_stream(&file_stream);
- coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
-
- if (!model->ParseFromCodedStream(&coded_stream))
- throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
-
- // If the file has not been consumed entirely, assume that the file is in the wrong format.
- if (!coded_stream.ConsumedEntireMessage())
- throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
-}
-
-void loadModelFromTextFile(const std::string &filename, onnx::ModelProto *model)
-{
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- int file_handle = open(filename.c_str(), O_RDONLY);
-
- if (file_handle == -1)
- throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
- ".");
-
- google::protobuf::io::FileInputStream file_stream(file_handle);
- file_stream.SetCloseOnDelete(true);
-
- if (!google::protobuf::TextFormat::Parse(&file_stream, model))
- throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
-}
-
-std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromBinaryFile(const std::string &filename)
-{
- _model = stdex::make_unique<onnx::ModelProto>();
- loadModelFromBinaryFile(filename, _model.get());
- _modelCtx = stdex::make_unique<ModelContext>(_model.get());
- collectUnsupportedOps();
- return createIR();
-}
-
-std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromTextFile(const std::string &filename)
-{
- _model = stdex::make_unique<onnx::ModelProto>();
- loadModelFromTextFile(filename, _model.get());
- _modelCtx = stdex::make_unique<ModelContext>(_model.get());
- collectUnsupportedOps();
- return createIR();
-}
-
-void ONNXImporterImpl::collectUnsupportedOps()
-{
- std::set<std::pair<std::string, int64_t>> problems_op_set;
-
- for (int i = 0; i < _model->graph().node_size(); i++)
- {
- const auto &onnx_node = _model->graph().node(i);
- assert(onnx_node.has_op_type());
- const auto &op_type = onnx_node.op_type();
- auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
-
- NodeConverterRegistry::ConverterFunc converter =
- NodeConverterRegistry::getInstance().lookup(op_type, opset);
-
- if (converter == nullptr)
- problems_op_set.emplace(op_type, opset);
- }
- if (!problems_op_set.empty())
- {
- std::cerr << "The following operators are not supported:\n";
- for (const auto &op : problems_op_set)
- std::cerr << op.first << " opset " << op.second << std::endl;
- throw std::runtime_error("Unsupported operators found");
- }
-}
-
-void ONNXImporterImpl::createGraphInputs()
-{
- const auto &graph = _model->graph();
- const auto &initializer = graph.initializer();
- const auto &value_info = graph.value_info();
-
- // Create all initializer Tensors
- for (const auto &tensor : initializer)
- {
- const auto mir_tensor = createTensor(&tensor);
- auto *op = _graph->create<mir::ops::ConstantOp>(mir_tensor);
- _converterCtx->setOutput(tensor.name(), op->getOutput(0));
- }
-
- for (const auto &input : graph.input())
- {
- assert(input.has_name());
-
- if (_converterCtx->getOutput(input.name()) == nullptr)
- {
- const auto &onnx_input_shape = input.type().tensor_type().shape();
- mir::Shape shape(onnx_input_shape.dim_size());
- for (int i = 0; i < onnx_input_shape.dim_size(); i++)
- {
- assert(onnx_input_shape.dim(i).has_dim_value());
- shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
- }
-
- auto elem_type = onnxDataTypeToMirDataType(
- (onnx::TensorProto_DataType)input.type().tensor_type().elem_type());
- mir::TensorType type{elem_type, shape};
- auto *op = _graph->create<mir::ops::InputOp>(type);
- _converterCtx->setOutput(input.name(), op->getOutput(0));
- }
- }
-}
-
-std::unique_ptr<mir::Graph> ONNXImporterImpl::createIR()
-{
- _graph = stdex::make_unique<mir::Graph>();
- _converterCtx = stdex::make_unique<ConverterContext>(_graph.get());
-
- createGraphInputs();
-
- // Forming partially ordered computation graph
- for (const auto &onnx_node : _model->graph().node())
- {
- assert(onnx_node.has_op_type());
- auto &op_type = onnx_node.op_type();
- auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
- // Get converter
- NodeConverterRegistry::ConverterFunc converter =
- NodeConverterRegistry::getInstance().lookup(op_type, opset);
- assert(converter != nullptr);
- converter(onnx_node, _converterCtx.get());
- }
- // Set graph outputs
- const auto &outputs = _model->graph().output();
- for (const auto &output : outputs)
- {
- assert(output.has_name());
- auto mir_output = _converterCtx->getOutput(output.name());
- if (mir_output == nullptr)
- throw std::runtime_error("Bad output name!");
-
- _graph->create<mir::ops::OutputOp>(mir_output);
- }
-
- return std::move(_graph);
-}
-
-} // namespace
-
-std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
-{
- ONNXImporterImpl importer;
- return importer.importModelFromBinaryFile(filename);
-}
-
-std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename)
-{
- ONNXImporterImpl importer;
- return importer.importModelFromTextFile(filename);
-}
-
-std::unique_ptr<mir::Graph> loadModel(const std::string &filename)
-{
- return importModelFromBinaryFile(filename);
-}
-
-} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp
deleted file mode 100644
index 504a32bb8..000000000
--- a/compiler/mir-onnx-importer/Op/Pad.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Pad.h"
-
-#include "ONNXHelpers.h"
-#include "AttributeHelpers.h"
-
-#include "mir/ops/PadOp.h"
-
-namespace mir_onnx
-{
-
-void convertPadAttrName(const std::string &pad_attr_name, const onnx::NodeProto &onnx_node,
- ConverterContext *context)
-{
- std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
- mir::Graph *graph = context->getGraph();
-
- assert(inputs.size() == 1);
- auto input = inputs[0];
-
- // 0.0f is the default value to be filled into padded cells.
- const auto value = getAttributeValue<float>(onnx_node, "value", 0.0f);
- const auto pads = getAttributeValue<std::vector<std::int64_t>>(onnx_node, pad_attr_name);
- // "constant" is the default mode.
- const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "constant");
- if (mode != "constant")
- throw std::runtime_error("Not supported Pad mode attribute!");
-
- const int num_dims = input->getShape().rank();
- assert(pads.size() == num_dims * 2);
- mir::PadOpAttributes attributes(num_dims);
- for (int i = 0; i < num_dims; i++)
- {
- attributes.padding_before[i] = pads[i];
- attributes.padding_after[i] = pads[num_dims + i];
- }
-
- attributes.padding_value = value;
-
- auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
-
- context->setNodeOutputs(onnx_node, {result});
-}
-
-void convertPadV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- convertPadAttrName("paddings", onnx_node, context);
-}
-
-void convertPadV2(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- convertPadAttrName("pads", onnx_node, context);
-}
-
-} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Transpose.cpp b/compiler/mir-onnx-importer/Op/Transpose.cpp
deleted file mode 100644
index 82bb2f122..000000000
--- a/compiler/mir-onnx-importer/Op/Transpose.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Transpose.h"
-#include "ONNXHelpers.h"
-#include "AttributeHelpers.h"
-
-#include "mir/ops/TransposeOp.h"
-
-#include <numeric>
-
-namespace mir_onnx
-{
-
-void convertTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- const auto inputs = context->getNodeInputs(onnx_node);
- mir::Graph *graph = context->getGraph();
-
- assert(inputs.size() == 1);
- auto input = inputs[0];
-
- const auto num_axes = input->getShape().rank();
- std::vector<std::size_t> axis_order(num_axes);
- const auto *perm_attr = findAttribute(onnx_node, "perm");
-
- if (perm_attr == nullptr)
- {
- // Reverse the dimensions.
- std::iota(axis_order.rbegin(), axis_order.rend(), 0);
- }
- else
- {
- const auto perm = getAttributeValue<std::vector<std::int64_t>>(*perm_attr);
- assert(perm.size() == num_axes);
- std::copy(perm.cbegin(), perm.cend(), axis_order.begin());
- }
-
- auto result = createOp<mir::ops::TransposeOp>(graph, input, axis_order)->getOutput(0);
-
- context->setNodeOutputs(onnx_node, {result});
-}
-
-} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp
deleted file mode 100644
index 49a555647..000000000
--- a/compiler/mir-onnx-importer/Op/Upsample.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Upsample.h"
-
-#include "ONNXHelpers.h"
-#include "AttributeHelpers.h"
-
-#include "mir/Tensor.h"
-
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/ResizeOp.h"
-
-namespace mir_onnx
-{
-
-void convertUpsampleV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
- mir::Graph *graph = context->getGraph();
-
- // "nearest" is the default mode.
- std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
- assert(mode == "nearest" && "Unsupported upscale mode!");
-
- const float h_scale = getAttributeValue<float>(onnx_node, "height_scale", 0.0f); // required
- const float w_scale = getAttributeValue<float>(onnx_node, "width_scale", 0.0f); // required
- if (h_scale < 1.0f || w_scale < 1.0f)
- throw std::runtime_error("Wrong scale attributes!");
-
- assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
- std::vector<float> scales_vector(4);
- // NCHW
- scales_vector.at(0) = 1.0f;
- scales_vector.at(1) = 1.0f;
- scales_vector.at(2) = h_scale;
- scales_vector.at(3) = w_scale;
-
- auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
-
- context->setNodeOutputs(onnx_node, {result});
-}
-
-void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
- mir::Graph *graph = context->getGraph();
-
- // "nearest" is the default mode.
- std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
- assert(mode == "nearest" && "Unsupported upscale mode!");
-
- const auto *scales_attr = findAttribute(onnx_node, "scales");
- if (!scales_attr)
- throw std::runtime_error("Not enough required scales attribute!");
-
- if (scales_attr->floats_size() != inputs[0]->getShape().rank())
- throw std::runtime_error(
- "Number of elements of scales should be the same as the rank of input");
-
- assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
- std::vector<float> scales_vector(4);
- // NCHW
- scales_vector.at(0) = scales_attr->floats(0);
- scales_vector.at(1) = scales_attr->floats(1);
- scales_vector.at(2) = scales_attr->floats(2);
- scales_vector.at(3) = scales_attr->floats(3);
-
- auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
-
- context->setNodeOutputs(onnx_node, {result});
-}
-
-void convertUpsampleV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
-{
- std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
- mir::Graph *graph = context->getGraph();
-
- // "nearest" is the default mode.
- const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
- assert(mode == "nearest" && "Unsupported upscale mode!");
-
- // relies on attributes being lifted to constants (ONNX optimization pass)
- assert(inputs.size() > 1);
- auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- assert(scales && "Weights could be a constant tensor only");
- auto scales_tensor = mir::Tensor<float>(scales->getValue());
- int rank = inputs[0]->getShape().rank();
- assert(scales_tensor.getShape().numElements() == rank &&
- "The number of elements of 'scales' should be the same as the rank of input 'X'");
- assert(rank == 4 && "Only rank 4 is supported");
- std::vector<float> scales_vector(4);
- assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
- for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
- scales_vector[i] = scales_tensor.atOffset(i);
-
- auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
-
- context->setNodeOutputs(onnx_node, {result});
-}
-
-} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/requires.cmake b/compiler/mir-onnx-importer/requires.cmake
deleted file mode 100644
index 52a7837df..000000000
--- a/compiler/mir-onnx-importer/requires.cmake
+++ /dev/null
@@ -1,2 +0,0 @@
-require("mir")
-require("mir-interpreter")
diff --git a/compiler/mir-tflite-importer/CMakeLists.txt b/compiler/mir-tflite-importer/CMakeLists.txt
deleted file mode 100644
index 4a06d51b8..000000000
--- a/compiler/mir-tflite-importer/CMakeLists.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-nnas_find_package(FlatBuffers REQUIRED)
-
-if (NOT FlatBuffers_FOUND)
- return()
-endif ()
-
-FlatBuffers_Target(mir_tflite_schema
- OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated/schema"
- SCHEMA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/schema"
- SCHEMA_FILES schema.fbs)
-
-
-set(MIR_TFLITE_IMPORTER_SOURCES
- tflite_importer.cpp
- tflite_importer.h
- tflite_op_creator.cpp
- tflite_op_creator.h)
-
-add_library(mir_tflite_importer STATIC ${MIR_TFLITE_IMPORTER_SOURCES})
-set_target_properties(mir_tflite_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(mir_tflite_importer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
-target_link_libraries(mir_tflite_importer PUBLIC mir mir_tflite_schema PRIVATE stdex)
diff --git a/compiler/mir-tflite-importer/requires.cmake b/compiler/mir-tflite-importer/requires.cmake
deleted file mode 100644
index 1059c50d3..000000000
--- a/compiler/mir-tflite-importer/requires.cmake
+++ /dev/null
@@ -1 +0,0 @@
-require("mir")
diff --git a/compiler/mir-tflite-importer/tflite_importer.cpp b/compiler/mir-tflite-importer/tflite_importer.cpp
deleted file mode 100644
index e3001d33d..000000000
--- a/compiler/mir-tflite-importer/tflite_importer.cpp
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite_importer.h"
-#include "tflite_op_creator.h"
-#include "schema_generated.h"
-
-#include "mir/TensorVariant.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/OutputOp.h"
-
-#include <fstream>
-#include <stdex/Memory.h>
-#include <utility>
-#include <vector>
-#include <set>
-
-namespace mir_tflite
-{
-
-namespace
-{
-
-class TfliteImporter
-{
-public:
- explicit TfliteImporter(std::string filename);
-
- /// @brief Load the model and convert it into a MIR Graph.
- std::unique_ptr<mir::Graph> importModel();
-
- ~TfliteImporter();
-
-private:
- std::string _filename;
- std::unique_ptr<tflite::ModelT> _model;
-
- std::unique_ptr<mir::Graph> _graph;
- std::unique_ptr<TFLiteOpCreator> _opCreator;
-
- // Maps TFLite tensors indices to corresponding MIR operation outputs.
- std::vector<mir::Operation::Output *> _tensorMap;
-
- void import();
-
- void walkModel(const tflite::ModelT *model);
-
- void walkSubgraph(const tflite::SubGraphT *subgraph);
-
- void walkOperator(const tflite::SubGraphT *subgraph, const tflite::OperatorT *op);
-
- /**
- * @brief Pass through tflite graph and collect operators unsupported by NNC
- * @throw PassException with message, containing detected problems
- */
- void collectUnsupportedOps();
-
- /**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
- */
- std::vector<mir::Operation::Output *> getMIRInputsForOperator(const tflite::SubGraphT *subgraph,
- const tflite::OperatorT *op);
-};
-
-TfliteImporter::TfliteImporter(std::string filename) : _filename(std::move(filename))
-{
- _graph = stdex::make_unique<mir::Graph>();
- _opCreator = stdex::make_unique<TFLiteOpCreator>(_graph.get());
-}
-
-TfliteImporter::~TfliteImporter() = default;
-
-void TfliteImporter::import()
-{
- std::ifstream stream(_filename, std::ios::in | std::ios::binary);
- if (stream.fail())
- throw std::runtime_error("Couldn't open file \"" + _filename + "\".");
-
- std::vector<char> model_buffer((std::istreambuf_iterator<char>(stream)),
- std::istreambuf_iterator<char>());
-
- if (stream.fail())
- throw std::runtime_error("Couldn't read file \"" + _filename + "\".");
-
- flatbuffers::Verifier verifier(reinterpret_cast<const std::uint8_t *>(model_buffer.data()),
- model_buffer.size());
-
- if (!tflite::VerifyModelBuffer(verifier))
- throw std::runtime_error("Could not load model: " + _filename + "\n");
-
- _model = tflite::UnPackModel(model_buffer.data());
-}
-
-static const std::set<tflite::BuiltinOperator> supportedOperators = {
- tflite::BuiltinOperator_ADD,
- tflite::BuiltinOperator_AVERAGE_POOL_2D,
- tflite::BuiltinOperator_CONCATENATION,
- tflite::BuiltinOperator_CONV_2D,
- tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- tflite::BuiltinOperator_DIV,
- tflite::BuiltinOperator_FULLY_CONNECTED,
- tflite::BuiltinOperator_HARD_SWISH,
- tflite::BuiltinOperator_LEAKY_RELU,
- tflite::BuiltinOperator_LOGISTIC,
- tflite::BuiltinOperator_MAX_POOL_2D,
- tflite::BuiltinOperator_MAXIMUM,
- tflite::BuiltinOperator_MEAN,
- tflite::BuiltinOperator_MUL,
- tflite::BuiltinOperator_PAD,
- tflite::BuiltinOperator_RELU,
- tflite::BuiltinOperator_RELU6,
- tflite::BuiltinOperator_RESHAPE,
- tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- tflite::BuiltinOperator_RSQRT,
- tflite::BuiltinOperator_SHAPE,
- tflite::BuiltinOperator_SLICE,
- tflite::BuiltinOperator_SOFTMAX,
- tflite::BuiltinOperator_SQRT,
- tflite::BuiltinOperator_SQUARED_DIFFERENCE,
- tflite::BuiltinOperator_SQUEEZE,
- tflite::BuiltinOperator_STRIDED_SLICE,
- tflite::BuiltinOperator_SUB,
- tflite::BuiltinOperator_TANH,
- tflite::BuiltinOperator_TRANSPOSE,
- tflite::BuiltinOperator_TRANSPOSE_CONV,
-};
-
-void TfliteImporter::collectUnsupportedOps()
-{
- std::set<std::string> errors;
- for (const auto &subgraph : _model->subgraphs)
- for (const auto &op : subgraph->operators)
- {
- tflite::BuiltinOperator opcode = _model->operator_codes[op->opcode_index]->builtin_code;
- if (supportedOperators.find(opcode) == supportedOperators.end())
- {
- if (opcode <= tflite::BuiltinOperator_MAX)
- errors.insert(std::string(EnumNameBuiltinOperator(opcode)) + ": unsupported operator");
- else
- errors.insert(std::to_string(opcode) + ": unsuppored in tflite custom opcode");
- }
- }
-
- if (!errors.empty())
- {
- std::string msg("NNC can't load model. Detected problems:");
- for (const auto &e : errors)
- msg.append("\n * " + e);
- throw std::runtime_error(msg);
- }
-}
-
-std::unique_ptr<mir::Graph> TfliteImporter::importModel()
-{
- import();
- collectUnsupportedOps();
- walkModel(_model.get());
- return std::move(_graph);
-}
-
-void TfliteImporter::walkModel(const tflite::ModelT *model)
-{
- for (const auto &subgraph : model->subgraphs)
- walkSubgraph(subgraph.get());
-}
-
-mir::DataType convertElementType(tflite::TensorType type)
-{
- switch (type)
- {
- case tflite::TensorType_INT32:
- return mir::DataType::INT32;
- case tflite::TensorType_FLOAT32:
- return mir::DataType::FLOAT32;
- case tflite::TensorType_INT64:
- return mir::DataType::INT64;
- case tflite::TensorType_UINT8:
- return mir::DataType::UINT8;
- default:
- throw std::runtime_error(std::string("Unsupported tensor type: ") + EnumNameTensorType(type));
- }
-}
-
-mir::TensorType getMirTensorType(const tflite::TensorT &tensor)
-{
- mir::DataType element_type = convertElementType(tensor.type);
-
- mir::Shape shape(tensor.shape.size());
- for (std::size_t i = 0; i < tensor.shape.size(); ++i)
- {
- shape.dim(i) = tensor.shape[i];
- }
-
- if (tensor.quantization != nullptr)
- {
- const tflite::QuantizationParametersT &params = *tensor.quantization;
-
- if (params.details.type != tflite::QuantizationDetails_NONE)
- throw std::runtime_error("Custom quantization is not supported.");
-
- // Empty parameters mean no quantization at all.
- if (params.scale.empty() && params.zero_point.empty())
- return mir::TensorType{element_type, shape};
-
- if (params.scale.size() != 1 || params.zero_point.size() != 1)
- throw std::runtime_error("Non-scalar quantization is not supported.");
-
- mir::AffineQuantization quantization{params.scale[0], static_cast<int>(params.zero_point[0])};
-
- return mir::TensorType{element_type, shape, quantization};
- }
- else
- {
- return mir::TensorType{element_type, shape};
- }
-}
-
-void TfliteImporter::walkSubgraph(const tflite::SubGraphT *subgraph)
-{
- _tensorMap.assign(subgraph->tensors.size(), nullptr);
-
- for (const auto input_tensor_index : subgraph->inputs)
- {
- const tflite::TensorT &tensor = *subgraph->tensors[input_tensor_index];
-
- mir::TensorType input_type = getMirTensorType(tensor);
- auto input = _graph->create<mir::ops::InputOp>(input_type)->getOutput(0);
- input->setName(tensor.name);
-
- assert(_tensorMap[input_tensor_index] == nullptr);
- _tensorMap[input_tensor_index] = input;
- }
-
- for (const auto &op : subgraph->operators)
- {
- walkOperator(subgraph, op.get());
- }
-
- for (const auto output_tensor_index : subgraph->outputs)
- {
- auto output = _tensorMap[output_tensor_index];
- _graph->create<mir::ops::OutputOp>(output);
- }
-}
-
-void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflite::OperatorT *op)
-{
- std::vector<mir::Operation::Output *> inputs = getMIRInputsForOperator(subgraph, op);
- std::vector<mir::Operation::Output *> outputs;
-
- tflite::BuiltinOperator opcode = _model->operator_codes[op->opcode_index]->builtin_code;
- switch (opcode)
- {
- case tflite::BuiltinOperator_CONV_2D:
- outputs = _opCreator->convertConv2D(op->builtin_options.AsConv2DOptions(), inputs);
- break;
- case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
- outputs = _opCreator->convertDepthwiseConv2D(op->builtin_options.AsDepthwiseConv2DOptions(),
- inputs);
- break;
- case tflite::BuiltinOperator_MAX_POOL_2D:
- outputs = _opCreator->convertMaxPool2D(op->builtin_options.AsPool2DOptions(), inputs);
- break;
- case tflite::BuiltinOperator_AVERAGE_POOL_2D:
- outputs = _opCreator->convertAveragePool2D(op->builtin_options.AsPool2DOptions(), inputs);
- break;
- case tflite::BuiltinOperator_CONCATENATION:
- outputs =
- _opCreator->convertConcatenation(op->builtin_options.AsConcatenationOptions(), inputs);
- break;
- case tflite::BuiltinOperator_RESHAPE:
- outputs = _opCreator->convertReshape(op->builtin_options.AsReshapeOptions(), inputs);
- break;
- case tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
- outputs = _opCreator->convertResizeNearestNeighbor(
- op->builtin_options.AsResizeNearestNeighborOptions(), inputs);
- break;
- case tflite::BuiltinOperator_MEAN:
- outputs = _opCreator->convertMean(op->builtin_options.AsReducerOptions(), inputs);
- break;
- case tflite::BuiltinOperator_FULLY_CONNECTED:
- outputs =
- _opCreator->convertFullyConnected(op->builtin_options.AsFullyConnectedOptions(), inputs);
- break;
- case tflite::BuiltinOperator_SOFTMAX:
- outputs = _opCreator->convertSoftmax(op->builtin_options.AsSoftmaxOptions(), inputs);
- break;
- case tflite::BuiltinOperator_SLICE:
- outputs = _opCreator->convertSlice(op->builtin_options.AsSliceOptions(), inputs);
- break;
- case tflite::BuiltinOperator_SQUEEZE:
- outputs = _opCreator->convertSqueeze(op->builtin_options.AsSqueezeOptions(), inputs);
- break;
- case tflite::BuiltinOperator_LOGISTIC:
- outputs = _opCreator->convertLogistic(inputs);
- break;
- case tflite::BuiltinOperator_RSQRT:
- outputs = _opCreator->convertRsqrt(inputs);
- break;
- case tflite::BuiltinOperator_SQRT:
- outputs = _opCreator->convertSqrt(inputs);
- break;
- case tflite::BuiltinOperator_ADD:
- outputs = _opCreator->convertAdd(op->builtin_options.AsAddOptions(), inputs);
- break;
- case tflite::BuiltinOperator_SUB:
- outputs = _opCreator->convertSub(op->builtin_options.AsSubOptions(), inputs);
- break;
- case tflite::BuiltinOperator_MUL:
- outputs = _opCreator->convertMul(op->builtin_options.AsMulOptions(), inputs);
- break;
- case tflite::BuiltinOperator_DIV:
- outputs = _opCreator->convertDiv(op->builtin_options.AsDivOptions(), inputs);
- break;
- case tflite::BuiltinOperator_MAXIMUM:
- outputs = _opCreator->convertMax(inputs);
- break;
- case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
- outputs = _opCreator->convertSquaredDifference(inputs);
- break;
- case tflite::BuiltinOperator_TRANSPOSE_CONV:
- outputs =
- _opCreator->convertTransposeConv(op->builtin_options.AsTransposeConvOptions(), inputs);
- break;
- case tflite::BuiltinOperator_PAD:
- outputs = _opCreator->convertPad(op->builtin_options.AsPadOptions(), inputs);
- break;
- case tflite::BuiltinOperator_TANH:
- outputs = _opCreator->convertTanh(inputs);
- break;
- case tflite::BuiltinOperator_RELU:
- outputs = _opCreator->convertReLU(inputs);
- break;
- case tflite::BuiltinOperator_RELU6:
- outputs = _opCreator->convertReLU6(inputs);
- break;
- case tflite::BuiltinOperator_TRANSPOSE:
- outputs = _opCreator->convertTranspose(op->builtin_options.AsTransposeOptions(), inputs);
- break;
- case tflite::BuiltinOperator_STRIDED_SLICE:
- outputs =
- _opCreator->convertStridedSlice(op->builtin_options.AsStridedSliceOptions(), inputs);
- break;
- case tflite::BuiltinOperator_LEAKY_RELU:
- outputs = _opCreator->convertLeakyReLU(op->builtin_options.AsLeakyReluOptions(), inputs);
- break;
- case tflite::BuiltinOperator_SHAPE:
- outputs = _opCreator->convertShape(op->builtin_options.AsShapeOptions(), inputs);
- break;
- case tflite::BuiltinOperator_HARD_SWISH:
- outputs = _opCreator->convertHardSwish(op->builtin_options.AsHardSwishOptions(), inputs);
- break;
- default:
- assert(false && "All unsupported types should have been found before this pass.");
- }
-
- assert(outputs.size() == op->outputs.size());
- for (std::size_t i = 0; i < op->outputs.size(); ++i)
- {
- const auto tensor_index = op->outputs[i];
- const tflite::TensorT &tensor = *subgraph->tensors[tensor_index];
-
- mir::TensorType output_type = getMirTensorType(tensor);
-
- // The type should have been inferred correctly, except for quantization information.
- assert(outputs[i]->getType().getElementType() == output_type.getElementType() &&
- outputs[i]->getType().getShape() == output_type.getShape());
-
- outputs[i]->setName(tensor.name);
- outputs[i]->setType(output_type);
-
- assert(_tensorMap[tensor_index] == nullptr);
- _tensorMap[tensor_index] = outputs[i];
- }
-}
-
-std::vector<mir::Operation::Output *>
-TfliteImporter::getMIRInputsForOperator(const tflite::SubGraphT *subgraph,
- const tflite::OperatorT *op)
-{
- std::vector<mir::Operation::Output *> inputs;
-
- for (const auto tensor_index : op->inputs)
- {
- const tflite::TensorT &tensor = *subgraph->tensors[tensor_index];
- const tflite::BufferT &buffer = *_model->buffers[tensor.buffer];
- if (!buffer.data.empty())
- {
- assert(_tensorMap[tensor_index] == nullptr);
- mir::TensorType type = getMirTensorType(tensor);
- mir::TensorVariant mir_tensor{type, buffer.data.data()};
- inputs.emplace_back(_graph->create<mir::ops::ConstantOp>(mir_tensor)->getOutput(0));
- }
- else
- {
- assert(_tensorMap[tensor_index] != nullptr);
- // By this point every input for the operation "op" should have corresponding
- // Model IR operations that output its inputs. This assumption is provided by the fact
- // that TFLite format specifies all operations in the execution order.
- inputs.emplace_back(_tensorMap[tensor_index]);
- }
- }
-
- return inputs;
-}
-
-} // namespace
-
-std::unique_ptr<mir::Graph> loadModel(std::string filename)
-{
- TfliteImporter importer(std::move(filename));
- return importer.importModel();
-}
-
-} // namespace mir_tflite
diff --git a/compiler/mir-tflite-importer/tflite_op_creator.cpp b/compiler/mir-tflite-importer/tflite_op_creator.cpp
deleted file mode 100644
index 5f4279f55..000000000
--- a/compiler/mir-tflite-importer/tflite_op_creator.cpp
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite_op_creator.h"
-#include "schema_generated.h"
-
-#include "mir/ops/AddOp.h"
-#include "mir/ops/AvgPool2DOp.h"
-#include "mir/ops/CappedReluOp.h"
-#include "mir/ops/ConcatOp.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/Deconv2DOp.h"
-#include "mir/ops/DepthwiseConv2DOp.h"
-#include "mir/ops/DivOp.h"
-#include "mir/ops/FullyConnectedOp.h"
-#include "mir/ops/HardSwishOp.h"
-#include "mir/ops/LeakyReluOp.h"
-#include "mir/ops/MaxOp.h"
-#include "mir/ops/MaxPool2DOp.h"
-#include "mir/ops/MulOp.h"
-#include "mir/ops/PadOp.h"
-#include "mir/ops/ReduceMeanOp.h"
-#include "mir/ops/ReluOp.h"
-#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/ResizeOp.h"
-#include "mir/ops/SigmoidOp.h"
-#include "mir/ops/SliceOp.h"
-#include "mir/ops/SoftmaxOp.h"
-#include "mir/ops/SqrtOp.h"
-#include "mir/ops/SqueezeOp.h"
-#include "mir/ops/SubOp.h"
-#include "mir/ops/TanhOp.h"
-#include "mir/ops/TransposeOp.h"
-
-#include "mir/Shape.h"
-#include "mir/ShapeRange.h"
-#include "mir/Tensor.h"
-
-namespace mir_tflite
-{
-
-namespace ops = mir::ops;
-using mir::Shape;
-
-static mir::ops::PaddingType convertPadding(tflite::Padding padding)
-{
- switch (padding)
- {
- case tflite::Padding_VALID:
- return mir::ops::PaddingType::Valid;
- case tflite::Padding_SAME:
- return mir::ops::PaddingType::SameUpper;
- default:
- assert(false);
- }
-}
-
-// TODO Move this to MIR?
-static void calculatePadding(mir::ops::PaddingType padding_type, const mir::Shape &input_shape,
- const std::vector<std::int32_t> &window_size,
- const std::vector<std::int32_t> &strides,
- std::vector<std::int32_t> &padding_before,
- std::vector<std::int32_t> &padding_after)
-{
- constexpr int num_spatial_dims = 2;
- assert(window_size.size() == num_spatial_dims);
- assert(strides.size() == num_spatial_dims);
- assert(padding_before.size() == num_spatial_dims);
- assert(padding_after.size() == num_spatial_dims);
-
- switch (padding_type)
- {
- case mir::ops::PaddingType::SameUpper:
- for (int i = 0; i < num_spatial_dims; ++i)
- {
- // Assuming NHWC format.
- const std::int32_t total_padding =
- (input_shape.dim(1 + i) % strides[i] == 0)
- ? std::max(0, window_size[i] - strides[i])
- : std::max(0, window_size[i] - input_shape.dim(1 + i) % strides[i]);
- padding_before[i] = total_padding / 2;
- padding_after[i] = total_padding - padding_before[i];
- }
- break;
- case mir::ops::PaddingType::Valid:
- for (int i = 0; i < num_spatial_dims; ++i)
- {
- padding_before[i] = 0;
- padding_after[i] = 0;
- }
- break;
- default:
- assert(false);
- }
-}
-
-template <typename VectorT>
-static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t> &tensor)
-{
- std::vector<VectorT> v;
- for (const auto &i : mir::ShapeRange(tensor.getShape()))
- v.emplace_back(static_cast<VectorT>(tensor.at(i)));
- return v;
-}
-
-static const mir::TensorVariant &extractTensor(const mir::Operation::Output *output)
-{
- auto constant_op = dynamic_cast<const ops::ConstantOp *>(output->getNode());
- if (constant_op == nullptr)
- throw std::runtime_error("Non-constant input is not supported.");
- return constant_op->getValue();
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertConv2D(const tflite::Conv2DOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- auto kernel = inputs.at(1);
- auto bias = inputs.at(2);
-
- mir::Conv2DOpAttributes attributes;
- attributes.strides = {opts->stride_h, opts->stride_w};
-
- const auto padding_type = convertPadding(opts->padding);
- const auto &input_shape = input->getShape();
- const auto &kernel_shape = kernel->getShape();
- const auto &strides = attributes.strides;
- auto &pad_before = attributes.padding_before;
- auto &pad_after = attributes.padding_after;
- std::vector<std::int32_t> kernel_size{kernel_shape.dim(1), kernel_shape.dim(2)};
- calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
-
- mir::Operation::Output *result;
- if (input->getType().isQuantized())
- {
- result = createOp<ops::Conv2DOp>(input, kernel, bias, attributes)->getOutput(0);
- }
- else // TODO Fuse bias to other backends
- {
- result = createOp<ops::Conv2DOp>(input, kernel, attributes)->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertDepthwiseConv2D(const tflite::DepthwiseConv2DOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- auto kernel = inputs.at(1);
- auto bias = inputs.at(2);
-
- // OHWI -> HWIO
- const std::vector<std::size_t> axis_order{1, 2, 3, 0};
- kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
-
- mir::Conv2DOpAttributes attributes;
- attributes.strides = {opts->stride_h, opts->stride_w};
-
- const auto padding_type = convertPadding(opts->padding);
- const auto &input_shape = input->getShape();
- const auto &kernel_shape = kernel->getShape();
- std::vector<std::int32_t> kernel_size{kernel_shape.dim(0), kernel_shape.dim(1)};
- const auto &strides = attributes.strides;
- auto &pad_before = attributes.padding_before;
- auto &pad_after = attributes.padding_after;
- calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
-
- mir::Operation::Output *result;
- if (input->getType().isQuantized())
- {
- result = createOp<ops::DepthwiseConv2DOp>(input, kernel, bias, attributes)->getOutput(0);
- }
- else // TODO Fuse bias to other backends
- {
- result = createOp<ops::DepthwiseConv2DOp>(input, kernel, attributes)->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertConcatenation(const tflite::ConcatenationOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto result = createOp<ops::ConcatOp>(inputs, opts->axis);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertMaxPool2D(const tflite::Pool2DOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- const auto &input_shape = input->getShape();
-
- mir::MaxPool2DOpAttributes attributes;
- attributes.window = {opts->filter_height, opts->filter_width};
- attributes.strides = {opts->stride_h, opts->stride_w};
-
- const auto padding_type = convertPadding(opts->padding);
- const auto &window_size = attributes.window;
- const auto &strides = attributes.strides;
- auto &pad_before = attributes.padding_before;
- auto &pad_after = attributes.padding_after;
- calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
-
- auto result = createOp<ops::MaxPool2DOp>(input, attributes);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertAveragePool2D(const tflite::Pool2DOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- const auto &input_shape = input->getShape();
-
- mir::AvgPool2DOpAttributes attributes;
- attributes.window = {opts->filter_height, opts->filter_width};
- attributes.strides = {opts->stride_h, opts->stride_w};
- attributes.include_pad = false;
-
- const auto padding_type = convertPadding(opts->padding);
- const auto &window_size = attributes.window;
- const auto &strides = attributes.strides;
- auto &pad_before = attributes.padding_before;
- auto &pad_after = attributes.padding_after;
- calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
-
- auto result = createOp<ops::AvgPool2DOp>(input, attributes);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSoftmax(const tflite::SoftmaxOptionsT * /*opts*/,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- // Softmax in TFLite is always 2-D.
- assert(input->getShape().rank() == 2);
- const int32_t axis = 1;
- auto result = createOp<ops::SoftmaxOp>(input, axis);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSlice(const tflite::SliceOptionsT * /*opts*/,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
- mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
-
- Shape starts(convertIntTensorToVector<int32_t>(begin_tensor));
- Shape sizes(convertIntTensorToVector<int32_t>(size_tensor));
- auto result = createOp<ops::SliceOp>(input, starts, sizes);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertReshape(const tflite::ReshapeOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- // TODO: we should also support "-1" values in new_shape, which means that correct
- // shape values must be calculated. Better do it in the shape inference module.
- Shape new_shape(opts->new_shape.size());
- for (int i = 0; i < opts->new_shape.size(); ++i)
- {
- new_shape.dim(i) = opts->new_shape[i];
- }
- auto result = createOp<ops::ReshapeOp>(input, new_shape);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertTransposeConv(const tflite::TransposeConvOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
- auto kernel = inputs.at(1);
- auto input = inputs.at(2);
-
- mir::Deconv2DOpAttributes attributes;
- attributes.strides = {opts->stride_h, opts->stride_w};
- Shape output_shape(convertIntTensorToVector<int32_t>(output_shape_tensor));
-
- // OHWI -> HWOI
- const std::vector<std::size_t> axis_order{1, 2, 0, 3};
- kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
-
- attributes.padding_type = convertPadding(opts->padding);
- auto result = createOp<ops::DeConv2DOp>(input, kernel, attributes, output_shape)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertResizeNearestNeighbor(const tflite::ResizeNearestNeighborOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- if (opts->align_corners)
- throw std::runtime_error("'align_corners' is not currently supported");
-
- auto input = inputs.at(0);
- mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
-
- const auto &input_shape = input->getShape();
- Shape res_shape{input_shape.dim(0), size_tensor.at(mir::Index{0}), size_tensor.at(mir::Index{1}),
- input_shape.dim(3)};
- auto result =
- createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertAdd(const tflite::AddOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSub(const tflite::SubOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertMul(const tflite::MulOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertDiv(const tflite::DivOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::DivOp>(inputs[0], inputs[1])->getOutput(0);
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 2);
- auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
- result = createOp<ops::MulOp>(result, result)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertMean(const tflite::ReducerOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
-
- std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
- auto result = createOp<ops::ReduceMeanOp>(input, axes, opts->keep_dims);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertFullyConnected(const tflite::FullyConnectedOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- auto weights = inputs.at(1);
- auto bias = inputs.at(2);
-
- // Flatten input to 2-D shape.
- const auto &input_shape = input->getShape();
- int32_t outer_size = input_shape.dim(0);
- int32_t inner_size = input_shape.numElements() / outer_size;
- auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size})->getOutput(0);
-
- // Transpose the weights.
- const std::vector<std::size_t> axis_order{1, 0};
- weights = createOp<ops::TransposeOp>(weights, axis_order)->getOutput(0);
-
- mir::Operation::Output *result;
- if (input->getType().isQuantized())
- {
- result = createOp<ops::FullyConnectedOp>(flatten, weights, bias)->getOutput(0);
- }
- else // TODO Fuse bias to other backends
- {
- result = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
- result = createOp<ops::AddOp>(result, bias)->getOutput(0);
- }
- return {addFusedActivation(result, opts->fused_activation_function)};
-}
-
-mir::Operation::Output *
-TFLiteOpCreator::addFusedActivation(mir::Operation::Output *input,
- tflite::ActivationFunctionType activation_type)
-{
- switch (activation_type)
- {
- case tflite::ActivationFunctionType_NONE:
- return input;
- case tflite::ActivationFunctionType_RELU:
- return createOp<ops::ReluOp>(input)->getOutput(0);
- case tflite::ActivationFunctionType_RELU6:
- return createOp<ops::CappedReluOp>(input, 6)->getOutput(0);
- case tflite::ActivationFunctionType_TANH:
- return createOp<ops::TanhOp>(input)->getOutput(0);
- default:
- throw std::runtime_error(std::string("Unsupported activation type: ") +
- tflite::EnumNameActivationFunctionType(activation_type));
- }
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSqueeze(const tflite::SqueezeOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- std::vector<int32_t> squeeze_dims(opts->squeeze_dims.begin(), opts->squeeze_dims.end());
- auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertPad(const tflite::PadOptionsT * /*opts*/,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
-
- const auto &input_shape = input->getShape();
- const int num_dims = input_shape.rank();
-
- mir::PadOpAttributes attributes(num_dims);
- for (int i = 0; i < num_dims; i++)
- {
- attributes.padding_before[i] = paddings_tensor.at(mir::Index({i, 0}));
- attributes.padding_after[i] = paddings_tensor.at(mir::Index({i, 1}));
- }
-
- auto result = createOp<ops::PadOp>(input, attributes)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::TanhOp>(input);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::ReluOp>(input);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::CappedReluOp>(input, 6);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertRsqrt(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- const float one_value = 1.0f;
- mir::TensorVariant one_tensor({mir::DataType::FLOAT32, {}}, &one_value);
- auto one = createOp<ops::ConstantOp>(one_tensor)->getOutput(0);
- auto sqrt = createOp<ops::SqrtOp>(input)->getOutput(0);
- auto result = createOp<ops::DivOp>(one, sqrt)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::SqrtOp>(input)->getOutput(0);
- return {result};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::SigmoidOp>(input);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertTranspose(const tflite::TransposeOptionsT * /*opts*/,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
- mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
-
- std::vector<std::size_t> axis_order = convertIntTensorToVector<std::size_t>(perm_tensor);
- auto result = createOp<ops::TransposeOp>(input, axis_order);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertStridedSlice(const tflite::StridedSliceOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- if (opts->ellipsis_mask != 0)
- throw std::runtime_error("StridedSlice: parameter 'ellipsis_mask' is not supported.");
-
- if (opts->new_axis_mask != 0)
- throw std::runtime_error("StridedSlice: parameter 'new_axis_mask' is not supported.");
-
- auto input = inputs.at(0);
- mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
- mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
- mir::Tensor<int32_t> strides_tensor(extractTensor(inputs.at(3)));
-
- std::vector<int32_t> begin = convertIntTensorToVector<int32_t>(begin_tensor);
- std::vector<int32_t> end = convertIntTensorToVector<int32_t>(end_tensor);
- std::vector<int32_t> strides = convertIntTensorToVector<int32_t>(strides_tensor);
-
- int32_t begin_mask = opts->begin_mask;
- int32_t end_mask = opts->end_mask;
- int32_t shrink_axis_mask = opts->shrink_axis_mask;
-
- const auto &input_shape = input->getShape();
- int32_t num_dims = input_shape.rank();
-
- for (int32_t stride : strides)
- {
- if (stride != 1)
- throw std::runtime_error("StridedSlice: parameter 'strides' is not supported");
- }
-
- Shape start(num_dims);
- Shape size(num_dims);
- std::vector<int32_t> squeeze_dims;
- for (int axis = 0; axis < num_dims; axis++)
- {
- if (static_cast<uint32_t>(begin_mask) & (1u << static_cast<uint32_t>(axis)))
- start.dim(axis) = 0;
- else
- start.dim(axis) = begin.at(static_cast<uint64_t>(axis));
-
- if (static_cast<uint32_t>(end_mask) & (1u << static_cast<uint32_t>(axis)))
- size.dim(axis) = input_shape.dim(axis) - start.dim(axis);
- else
- size.dim(axis) = end.at(static_cast<uint64_t>(axis)) - start.dim(axis);
-
- if (static_cast<uint32_t>(shrink_axis_mask) & (1u << static_cast<uint32_t>(axis)))
- squeeze_dims.push_back(axis);
- }
-
- auto result = createOp<ops::SliceOp>(input, start, size);
- result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertLeakyReLU(const tflite::LeakyReluOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto input = inputs.at(0);
-
- auto result = createOp<ops::LeakyReluOp>(input, opts->alpha);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertShape(const tflite::ShapeOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- if (opts->out_type != tflite::TensorType_INT32)
- {
- throw std::runtime_error(std::string("SHAPE: Unsupported tensor type: ") +
- EnumNameTensorType(opts->out_type));
- }
-
- const auto &input_shape = inputs[0]->getShape();
- int32_t rank = input_shape.rank();
- std::vector<int32_t> data;
- data.reserve(static_cast<uint64_t>(rank));
- for (int32_t i = 0; i < rank; i++)
- data.emplace_back(input_shape.dim(i));
- mir::TensorVariant tensor({mir::DataType::INT32, {rank}}, data.data());
- auto result = createOp<ops::ConstantOp>(tensor);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-TFLiteOpCreator::convertHardSwish(const tflite::HardSwishOptionsT *opts,
- const std::vector<mir::Operation::Output *> &inputs)
-{
- auto result = createOp<ops::HardSwishOp>(inputs[0])->getOutput(0);
- return {result};
-}
-
-} // namespace mir_tflite
diff --git a/compiler/mir/CMakeLists.txt b/compiler/mir/CMakeLists.txt
index ecb1de2db..4c89893cc 100644
--- a/compiler/mir/CMakeLists.txt
+++ b/compiler/mir/CMakeLists.txt
@@ -36,3 +36,8 @@ set_target_properties(mir PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(mir PROPERTIES LINKER_LANGUAGE CXX)
add_subdirectory(unittests)
+
+add_subdirectory(src/mir_caffe_importer)
+add_subdirectory(src/mir_caffe2_importer)
+add_subdirectory(src/mir_tflite_importer)
+add_subdirectory(src/mir_onnx_importer)
diff --git a/compiler/mir-caffe2-importer/caffe2_importer.h b/compiler/mir/include/mir_caffe2_importer/caffe2_importer.h
index 213fbe98d..213fbe98d 100644
--- a/compiler/mir-caffe2-importer/caffe2_importer.h
+++ b/compiler/mir/include/mir_caffe2_importer/caffe2_importer.h
diff --git a/compiler/mir-caffe-importer/caffe_importer.h b/compiler/mir/include/mir_caffe_importer/caffe_importer.h
index cf2c055bc..cf2c055bc 100644
--- a/compiler/mir-caffe-importer/caffe_importer.h
+++ b/compiler/mir/include/mir_caffe_importer/caffe_importer.h
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.h b/compiler/mir/include/mir_onnx_importer/ONNXImporterImpl.h
index 02a49b330..02a49b330 100644
--- a/compiler/mir-onnx-importer/ONNXImporterImpl.h
+++ b/compiler/mir/include/mir_onnx_importer/ONNXImporterImpl.h
diff --git a/compiler/mir-tflite-importer/tflite_importer.h b/compiler/mir/include/mir_tflite_importer/tflite_importer.h
index 85cd01ee9..85cd01ee9 100644
--- a/compiler/mir-tflite-importer/tflite_importer.h
+++ b/compiler/mir/include/mir_tflite_importer/tflite_importer.h
diff --git a/compiler/mir/src/mir_caffe2_importer/CMakeLists.txt b/compiler/mir/src/mir_caffe2_importer/CMakeLists.txt
new file mode 100644
index 000000000..a5537815d
--- /dev/null
+++ b/compiler/mir/src/mir_caffe2_importer/CMakeLists.txt
@@ -0,0 +1,28 @@
+nnas_find_package(PytorchSource QUIET)
+nnas_find_package(Protobuf QUIET)
+
+if (NOT PytorchSource_FOUND OR NOT Protobuf_FOUND)
+ return()
+endif()
+
+Protobuf_Generate(CAFFE2_PROTO "${CMAKE_CURRENT_BINARY_DIR}/generated/caffe2"
+ "${PytorchSource_DIR}" "caffe2/proto/caffe2.proto")
+
+add_library(caffe2proto STATIC ${CAFFE2_PROTO_SOURCES})
+set_target_properties(caffe2proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(caffe2proto PUBLIC ${CAFFE2_PROTO_INCLUDE_DIRS})
+target_link_libraries(caffe2proto PUBLIC libprotobuf)
+
+
+set(MIR_CAFFE2_IMPORTER_SOURCES
+ caffe2_importer.cpp
+ caffe2_op_creator.cpp
+ caffe2_op_creator.h
+ caffe2_op_types.h
+ caffe2_proto_helper.cpp
+ caffe2_proto_helper.h)
+
+add_library(mir_caffe2_importer STATIC ${MIR_CAFFE2_IMPORTER_SOURCES})
+set_target_properties(mir_caffe2_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_caffe2_importer PUBLIC ../../include/mir_caffe2_importer)
+target_link_libraries(mir_caffe2_importer PUBLIC mir PRIVATE caffe2proto nncc_common)
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp b/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp
new file mode 100644
index 000000000..812fcc5cc
--- /dev/null
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "caffe2_importer.h"
+#include "caffe2/proto/caffe2.pb.h"
+#include "caffe2_op_types.h"
+#include "caffe2_op_creator.h"
+#include "caffe2_proto_helper.h"
+
+#include "mir/ops/InputOp.h"
+#include "mir/ops/OutputOp.h"
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+
+#include <fcntl.h>
+
+#include <cassert>
+#include <cerrno>
+#include <cstring>
+#include <memory>
+#include <stdexcept>
+#include <utility>
+#include <set>
+
+namespace
+{
+
+using namespace mir_caffe2;
+
+class Caffe2Importer
+{
+public:
+ explicit Caffe2Importer(std::string predict_net, std::string init_net,
+ const std::vector<std::vector<int>> &input_shapes);
+
+ /// @brief Load the model and convert it into a MIR Graph.
+ std::unique_ptr<mir::Graph> importModel();
+
+ ~Caffe2Importer();
+
+private:
+ std::string _predictNet;
+ std::string _initNet;
+ std::unique_ptr<mir::Graph> _graph;
+ std::unique_ptr<caffe2::NetDef> _predict_net;
+ std::unique_ptr<caffe2::NetDef> _init_net;
+ std::unique_ptr<Caffe2OpCreator> _opCreator;
+ std::vector<mir::Shape> _inputShapes;
+
+ static const std::map<std::string, SupportedCaffe2OpType> _operatorTypes;
+
+ // Maps Caffe2 operator input names to corresponding MIR operation outputs.
+ std::unordered_map<std::string, mir::Operation::Output *> _blobNameToOutput;
+
+ void import();
+ std::unique_ptr<mir::Graph> createIR();
+
+ /**
+ * @brief Pass through caffe2 graph and collect ops unsupported by NNC
+ * @throw PassException with message, containing detected problems
+ */
+ void collectUnsupportedOps();
+
+ /**
+ * @brief Creating MIR node from single caffe2 operator
+ */
+ void createMIRNodesFromOp(const ::caffe2::OperatorDef &op);
+
+ /**
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
+ */
+ std::vector<mir::Operation::Output *> getInputMIROps(const ::caffe2::OperatorDef &op);
+
+ void setOutputForTensor(const std::string &tensor_name, Operation::Output *output);
+ mir::Operation::Output *getOutputForTensor(const std::string &name) const;
+
+ /**
+ * @brief Mark output MIR nodes
+ */
+ void setGraphOutputs();
+};
+
+using namespace ::caffe2;
+using mir::Shape;
+
+Caffe2Importer::Caffe2Importer(std::string predict_net, std::string init_net,
+ const std::vector<std::vector<int>> &input_shapes)
+ : _predictNet(std::move(predict_net)), _initNet(std::move(init_net))
+{
+ for (auto &shape : input_shapes)
+ _inputShapes.emplace_back(shape);
+
+ _graph = std::make_unique<mir::Graph>();
+ _opCreator = std::make_unique<Caffe2OpCreator>(_graph.get());
+}
+
+Caffe2Importer::~Caffe2Importer() = default;
+
+static void loadModelFile(const std::string &filename, caffe2::NetDef *net)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ google::protobuf::io::CodedInputStream coded_stream(&file_stream);
+ coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+
+ if (!net->ParseFromCodedStream(&coded_stream))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+
+ // If the file has not been consumed entirely, assume that the file is in the wrong format.
+ if (!coded_stream.ConsumedEntireMessage())
+ throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
+}
+
+void Caffe2Importer::import()
+{
+ _predict_net = std::make_unique<NetDef>();
+ loadModelFile(_predictNet, _predict_net.get());
+
+ _init_net = std::make_unique<NetDef>();
+ loadModelFile(_initNet, _init_net.get());
+
+ collectUnsupportedOps();
+}
+
+std::unique_ptr<mir::Graph> Caffe2Importer::createIR()
+{
+ // Load initializers.
+ for (const auto &op : _init_net->op())
+ createMIRNodesFromOp(op);
+
+ // Create inputs. This has to be done after processing initializers, because they may contain
+ // fake inputs.
+ // TODO Caffe2 does not provide a way to detect model inputs and outputs. For now assume that:
+ // - there is exactly one input;
+ // - the input is for the first layer;
+ // - the input has 'float' element type.
+ const auto &input_name = _predict_net->op(0).input(0);
+ mir::TensorType input_type(mir::DataType::FLOAT32, _inputShapes[0]);
+ auto input = _graph->create<mir::ops::InputOp>(input_type)->getOutput(0);
+ setOutputForTensor(input_name, input);
+
+ for (const auto &op : _predict_net->op())
+ createMIRNodesFromOp(op);
+
+ setGraphOutputs();
+
+ return std::move(_graph);
+}
+
+std::unique_ptr<mir::Graph> Caffe2Importer::importModel()
+{
+ import();
+ return createIR();
+}
+
+void Caffe2Importer::collectUnsupportedOps()
+{
+ std::set<std::string> unsupportedOps;
+ for (const auto &op : _predict_net->op())
+ {
+ if (_operatorTypes.find(op.type()) == _operatorTypes.end())
+ unsupportedOps.insert(op.type());
+ }
+
+ if (!unsupportedOps.empty())
+ {
+ std::string exceptionMsg("Can't load model, unsupported operators:");
+ for (const auto &op : unsupportedOps)
+ exceptionMsg.append("\n * " + op);
+ throw std::runtime_error(exceptionMsg);
+ }
+}
+
+void Caffe2Importer::createMIRNodesFromOp(const OperatorDef &op)
+{
+ std::vector<mir::Operation::Output *> outputs;
+
+ auto inputs = getInputMIROps(op);
+
+ SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
+ switch (opType)
+ {
+ case SupportedCaffe2OpType::constantFill:
+ case SupportedCaffe2OpType::givenTensorFill:
+ case SupportedCaffe2OpType::givenTensorInt64Fill:
+ outputs = _opCreator->convertConstant(inputs, op);
+ break;
+ case SupportedCaffe2OpType::add:
+ outputs = _opCreator->convertAdd(inputs, op);
+ break;
+ case SupportedCaffe2OpType::averagePool:
+ outputs = _opCreator->convertAveragePool(inputs, op);
+ break;
+ case SupportedCaffe2OpType::conv:
+ outputs = _opCreator->convertConv(inputs, op);
+ break;
+ case SupportedCaffe2OpType::concat:
+ outputs = _opCreator->convertConcat(inputs, op);
+ break;
+ case SupportedCaffe2OpType::dropout:
+ outputs = _opCreator->convertDropout(inputs, op);
+ break;
+ case SupportedCaffe2OpType::FC:
+ outputs = _opCreator->convertFC(inputs, op);
+ break;
+ case SupportedCaffe2OpType::maxPool:
+ outputs = _opCreator->convertMaxPool(inputs, op);
+ break;
+ case SupportedCaffe2OpType::mul:
+ outputs = _opCreator->convertMul(inputs, op);
+ break;
+ case SupportedCaffe2OpType::relu:
+ outputs = _opCreator->convertRelu(inputs);
+ break;
+ case SupportedCaffe2OpType::resizeNearest:
+ outputs = _opCreator->convertResizeNearest(inputs, op);
+ break;
+ case SupportedCaffe2OpType::sigmoid:
+ outputs = _opCreator->convertSigmoid(inputs);
+ break;
+ case SupportedCaffe2OpType::softmax:
+ outputs = _opCreator->convertSoftmax(inputs, op);
+ break;
+ case SupportedCaffe2OpType::spatialBN:
+ outputs = _opCreator->convertSpatialBN(inputs, op);
+ break;
+ case SupportedCaffe2OpType::sum:
+ outputs = _opCreator->convertSum(inputs);
+ break;
+ case SupportedCaffe2OpType::clip:
+ outputs = _opCreator->convertClip(inputs, op);
+ break;
+ case SupportedCaffe2OpType::reshape:
+ outputs = _opCreator->convertReshape(inputs, op);
+ break;
+ default:
+ assert(false && "All unsupported types should have been found before this pass.");
+ }
+
+ for (size_t i = 0; i < outputs.size(); ++i)
+ {
+ setOutputForTensor(op.output(i), outputs[i]);
+ }
+}
+
+std::vector<mir::Operation::Output *> Caffe2Importer::getInputMIROps(const OperatorDef &op)
+{
+ std::vector<mir::Operation::Output *> inputs;
+
+ for (const auto &input_name : op.input())
+ {
+ inputs.push_back(getOutputForTensor(input_name));
+ }
+
+ return inputs;
+}
+
+void Caffe2Importer::setOutputForTensor(const std::string &tensor_name, Operation::Output *output)
+{
+ auto it = _blobNameToOutput.find(tensor_name);
+ if (it != _blobNameToOutput.cend())
+ {
+ // caffe2 input blob name could be same as output blob name, and next line will overwrite
+ // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
+ it->second->setName("");
+ }
+ output->setName(tensor_name);
+ _blobNameToOutput[tensor_name] = output;
+}
+
+mir::Operation::Output *Caffe2Importer::getOutputForTensor(const std::string &name) const
+{
+ return _blobNameToOutput.at(name);
+}
+
+void Caffe2Importer::setGraphOutputs()
+{
+ // Create outputs.
+ // TODO Caffe2 does not provide a way to detect model inputs and outputs. For now assume that:
+ // - there is exactly one output;
+ // - the output is from the last layer.
+ const auto &output_name = _predict_net->op().rbegin()->output(0);
+ auto output = getOutputForTensor(output_name);
+ _graph->create<mir::ops::OutputOp>(output);
+}
+
+const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
+ {"Add", SupportedCaffe2OpType::add},
+ {"AveragePool", SupportedCaffe2OpType::averagePool},
+ {"Conv", SupportedCaffe2OpType::conv},
+ {"Concat", SupportedCaffe2OpType::concat},
+ {"ConstantFill", SupportedCaffe2OpType::constantFill},
+ {"Dropout", SupportedCaffe2OpType::dropout},
+ {"FC", SupportedCaffe2OpType::FC},
+ {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
+ {"MaxPool", SupportedCaffe2OpType::maxPool},
+ {"Mul", SupportedCaffe2OpType::mul},
+ {"Relu", SupportedCaffe2OpType::relu},
+ {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
+ {"Sigmoid", SupportedCaffe2OpType::sigmoid},
+ {"Softmax", SupportedCaffe2OpType::softmax},
+ {"SpatialBN", SupportedCaffe2OpType::spatialBN},
+ {"Sum", SupportedCaffe2OpType::sum},
+ {"Clip", SupportedCaffe2OpType::clip},
+ {"Reshape", SupportedCaffe2OpType::reshape},
+ {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
+};
+}
+
+namespace mir_caffe2
+{
+
+std::unique_ptr<mir::Graph> loadModel(std::string predict_net, std::string init_net,
+ const std::vector<std::vector<int>> &input_shapes)
+{
+ Caffe2Importer importer(std::move(predict_net), std::move(init_net), input_shapes);
+ return importer.importModel();
+}
+
+} // namespace mir_caffe2
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp
new file mode 100644
index 000000000..3390f4482
--- /dev/null
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "caffe2_op_creator.h"
+#include "caffe2_proto_helper.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/AvgPool2DOp.h"
+#include "mir/ops/CappedReluOp.h"
+#include "mir/ops/ConcatOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/FullyConnectedOp.h"
+#include "mir/ops/MaxPool2DOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/ReluOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/ResizeOp.h"
+#include "mir/ops/SigmoidOp.h"
+#include "mir/ops/SoftmaxOp.h"
+#include "mir/ops/TransposeOp.h"
+
+#include "mir/Index.h"
+#include "mir/Shape.h"
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+#include "mir/TensorUtil.h"
+
+#include <cmath>
+#include <stdexcept>
+#include <vector>
+
+namespace mir_caffe2
+{
+
+using namespace ::caffe2;
+using namespace mir;
+
+//
+// Helper functions
+//
+
+static std::pair<std::vector<int32_t>, std::vector<int32_t>>
+getPadding(const ::caffe2::OperatorDef &op)
+{
+
+ if (hasArgument(op.arg(), "pads"))
+ {
+ // pads order: t l b r
+ auto pads_arg = findArgumentByName(op.arg(), "pads");
+
+ std::vector<int32_t> paddings;
+ for (const auto &pad : pads_arg.ints())
+ paddings.push_back(static_cast<int32_t>(pad));
+
+ assert(paddings.size() == 4);
+
+ int32_t pad_t = paddings[0];
+ int32_t pad_l = paddings[1];
+ int32_t pad_b = paddings[2];
+ int32_t pad_r = paddings[3];
+
+ std::vector<int32_t> padding_before{pad_t, pad_l};
+ std::vector<int32_t> padding_after{pad_b, pad_r};
+ return {padding_before, padding_after};
+ }
+
+ bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
+ hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
+
+ if (has_custom_pad)
+ {
+ int32_t pad_l = getSingleArgument(op, "pad_l", 0);
+ int32_t pad_t = getSingleArgument(op, "pad_t", 0);
+ int32_t pad_r = getSingleArgument(op, "pad_r", 0);
+ int32_t pad_b = getSingleArgument(op, "pad_b", 0);
+
+ std::vector<int32_t> padding_before{pad_t, pad_l};
+ std::vector<int32_t> padding_after{pad_b, pad_r};
+ return {padding_before, padding_after};
+ }
+
+ int32_t pad = getSingleArgument(op, "pad", 0);
+ return {{pad, pad}, {pad, pad}};
+}
+
+static std::vector<std::int32_t> getStrides(const ::caffe2::OperatorDef &op)
+{
+ std::vector<std::int32_t> strides;
+
+ if (hasArgument(op.arg(), "stride"))
+ {
+ std::int32_t stride = getSingleArgument(op, "stride", 1);
+ strides = {stride, stride};
+ }
+
+ if (hasArgument(op.arg(), "strides"))
+ {
+ // strides order: h w
+ auto strides_arg = findArgumentByName(op.arg(), "strides");
+ for (const auto &s : strides_arg.ints())
+ strides.push_back(s);
+ }
+
+ assert(!strides.empty() && "Strides not found");
+
+ return strides;
+}
+
+static std::vector<std::int32_t> getWindowSize(const ::caffe2::OperatorDef &op,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
+ bool has_custom_kernel_size =
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
+ bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
+
+ int kernel_h(0), kernel_w(0);
+ if (is_global_pooling)
+ {
+ const auto &input_shape = inputs[0]->getShape();
+ assert(input_shape.rank() == 4 && "getWindowSize() inputs must be of rank 4");
+ kernel_h = input_shape.dim(2);
+ kernel_w = input_shape.dim(3);
+ }
+ else
+ {
+ if (has_custom_kernel_size)
+ {
+ kernel_h = getSingleArgument(op, "kernel_h", 0);
+ kernel_w = getSingleArgument(op, "kernel_w", 0);
+ }
+ else
+ {
+ if (has_custom_kernels_size)
+ {
+ // kernels order: h w
+ std::vector<int32_t> kernels;
+ auto kernels_arg = findArgumentByName(op.arg(), "kernels");
+ for (const auto &ker : kernels_arg.ints())
+ kernels.push_back(static_cast<int32_t>(ker));
+ assert(kernels.size() == 2);
+ kernel_h = kernels[0];
+ kernel_w = kernels[1];
+ }
+ else
+ {
+ kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
+ }
+ }
+ }
+ return {kernel_h, kernel_w};
+}
+
+//
+// Check functions
+//
+
+static void checkLayout(const OperatorDef &op)
+{
+ if (getSingleArgument(op, "order", "NCHW") != "NCHW")
+ throw std::runtime_error(op.type() + ": only 'NCHW' axis order is supported");
+}
+
+static void checkConvLikeOp(const ::caffe2::OperatorDef &op)
+{
+ checkLayout(op);
+
+ // Padding
+ bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
+ hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
+
+ if (has_custom_pad && hasArgument(op.arg(), "pad"))
+ throw std::runtime_error("Custom pad can't be combined with overall pad");
+
+ if (has_custom_pad &&
+ !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
+ hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
+ throw std::runtime_error("If one custom pad specified - all custom pads must be specified");
+
+ // Kernel size
+ bool has_custom_kernel_size =
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
+
+ if (has_custom_kernel_size && hasArgument(op.arg(), "kernel"))
+ throw std::runtime_error("Custom kernel size can't be combined with overall kernel size");
+
+ if (has_custom_kernel_size &&
+ !(hasArgument(op.arg(), "kernel_h") && hasArgument(op.arg(), "kernel_w")))
+ throw std::runtime_error(
+ "If one custom kernel size specified - all custom kernel sizes must be specified");
+}
+
+static mir::TensorVariant createTensor(const OperatorDef &op)
+{
+ assert(hasArgument(op.arg(), "shape") && hasArgument(op.arg(), "values"));
+
+ const auto &shape = findArgumentByName(op.arg(), "shape");
+ const auto &values = findArgumentByName(op.arg(), "values");
+
+ mir::DataType element_type;
+ const void *src_data;
+ // if values on floats
+ if (!values.floats().empty())
+ {
+ element_type = mir::DataType::FLOAT32;
+ src_data = values.floats().data();
+ }
+ else
+ {
+ assert(!values.ints().empty());
+ if (op.type() == "GivenTensorInt64Fill")
+ {
+ element_type = mir::DataType::INT64;
+ }
+ else
+ {
+ element_type = mir::DataType::INT32;
+ }
+ src_data = values.ints().data();
+ }
+
+ mir::Shape tensor_shape(shape.ints_size());
+
+ for (int i = 0; i < shape.ints_size(); ++i)
+ {
+ tensor_shape.dim(i) = shape.ints(i);
+ }
+
+ return mir::TensorVariant({element_type, tensor_shape}, src_data);
+}
+
+//
+// Convert functions
+//
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertConstant(const std::vector<mir::Operation::Output *> &,
+ const ::caffe2::OperatorDef &op)
+{
+ // Constant may not contain any data if it is a fake input.
+ if (!hasArgument(op.arg(), "values"))
+ return {};
+
+ return {createOp<ops::ConstantOp>(createTensor(op))->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ assert(inputs.size() == 2);
+ auto lhs = inputs[0];
+ auto rhs = inputs[1];
+
+ if (getSingleArgument(op, "broadcast", 0) != 0)
+ {
+ // FIXME This only works when 'axis' == 1 and the second input is 1-D.
+ rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
+ auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
+ return {result};
+ }
+
+ auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output *> &inputs,
+ const OperatorDef &op)
+{
+ checkConvLikeOp(op);
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ AvgPool2DOpAttributes attributes;
+ std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
+ attributes.window = getWindowSize(op, inputs);
+ attributes.strides = getStrides(op);
+ attributes.include_pad = false;
+ attributes.data_format = DataFormat::NCHW;
+ auto result = createOp<ops::AvgPool2DOp>(input, attributes)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ // dilation order: h w (not used)
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = getStrides(op);
+ std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
+ attributes.num_groups = getSingleArgument(op, "group", 1);
+ attributes.data_format = DataFormat::NCHW;
+
+ std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
+ auto kernel = createOp<ops::TransposeOp>(inputs[1], perm)->getOutput(0);
+ auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
+
+ if (op.input_size() > 2)
+ {
+ auto bias = inputs[2];
+ bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ checkLayout(op);
+
+ // `1` corresponds to the default (channels) axis.
+ int axis = getSingleArgument(op, "axis", 1);
+ auto result = createOp<ops::ConcatOp>(inputs, axis);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &)
+{
+ // This is a no-op in inference mode.
+ return {inputs[0]};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertFC(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ for (auto &s : {"axis", "axis_w", "float16_compute"})
+ if (hasArgument(op.arg(), s))
+ throw std::runtime_error(std::string("FC: only default '") + s + "' value is supported");
+
+ const auto &input_shape = inputs[0]->getShape();
+ // Transform input into 2-D tensor by flattening axes
+ Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
+
+ auto reshape = createOp<ops::ReshapeOp>(inputs[0], shape)->getOutput(0);
+ auto weights =
+ createOp<ops::TransposeOp>(inputs[1], std::vector<std::size_t>{1, 0})->getOutput(0);
+ auto result = createOp<ops::FullyConnectedOp>(reshape, weights)->getOutput(0);
+ result = createOp<ops::AddOp>(result, inputs[2])->getOutput(0);
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output *> &inputs,
+ const OperatorDef &op)
+{
+ checkConvLikeOp(op);
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ MaxPool2DOpAttributes attributes;
+ std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
+ attributes.window = getWindowSize(op, inputs);
+ attributes.strides = getStrides(op);
+ attributes.data_format = DataFormat::NCHW;
+ auto result = createOp<ops::MaxPool2DOp>(input, attributes)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ assert(inputs.size() == 2);
+ auto lhs = inputs[0];
+ auto rhs = inputs[1];
+
+ if (getSingleArgument(op, "broadcast", 0) != 0)
+ {
+ // FIXME This only works when `axis` == 1 and the second input is 1-D.
+ rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
+ auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
+ return {result};
+ }
+
+ auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto relu = createOp<ops::ReluOp>(inputs[0]);
+ return {relu->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ std::vector<float> scales(4);
+ assert(inputs[0]->getShape().rank() == 4 && "only 4d tensors is supported");
+ // Assuming NCHW format.
+ scales[0] = 1.0f;
+ scales[1] = 1.0f;
+ scales[2] = getSingleArgument(op, "height_scale", 1.0f);
+ scales[3] = getSingleArgument(op, "width_scale", 1.0f);
+ auto result =
+ createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor, scales)
+ ->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto result = createOp<ops::SigmoidOp>(inputs[0]);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ int axis = getSingleArgument(op, "axis", 1);
+ auto softmax = createOp<ops::SoftmaxOp>(inputs[0], axis);
+ return {softmax->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+ checkLayout(op);
+
+ // Sanity checks
+ if (op.input_size() != 5)
+ throw std::runtime_error(
+ "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
+ if (getSingleArgument(op, "is_test", 1) != 1)
+ throw std::runtime_error("SpatialBN: only test mode supported");
+
+ // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
+
+ auto scale_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ auto bias_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[2]->getNode());
+ auto mean_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[3]->getNode());
+ auto var_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[4]->getNode());
+ if (scale_op == nullptr || bias_op == nullptr || mean_op == nullptr || var_op == nullptr)
+ throw std::runtime_error(
+ "SpatialBN: non-constant 'scale', 'bias', 'mean' and 'var' inputs are not supported yet.");
+
+ const auto &scale_tensor = scale_op->getValue();
+ const auto &bias_tensor = bias_op->getValue();
+ const auto &mean_tensor = mean_op->getValue();
+ const auto &var_tensor = var_op->getValue();
+ float eps = getSingleArgument(op, "epsilon", 1e-5f);
+
+ // res1 = X - mean
+ Tensor<float> bias_data(mean_tensor);
+ for (auto &idx : ShapeRange(bias_data.getShape()))
+ bias_data.at(idx) *= -1;
+
+ auto mean = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
+ mean = createOp<ops::ReshapeOp>(mean, Shape{1, mean->getShape().dim(0), 1, 1})->getOutput(0);
+ auto result = createOp<ops::AddOp>(inputs[0], mean)->getOutput(0);
+
+ // res2 = res1 * scale / (var + epsilon)
+ Tensor<float> multiplier(scale_tensor);
+ for (auto &idx : ShapeRange(scale_tensor.getShape()))
+ multiplier.at(idx) /= std::sqrt(*reinterpret_cast<float *>(var_tensor.at(idx)) + eps);
+ auto scale = createOp<ops::ConstantOp>(scale_tensor)->getOutput(0);
+ scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::MulOp>(result, scale)->getOutput(0);
+
+ // overall_res = res2 + bias
+ auto bias = createOp<ops::ConstantOp>(bias_tensor)->getOutput(0);
+ bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
+ for (int i = 2; i < static_cast<int>(inputs.size()); ++i)
+ {
+ result = createOp<ops::AddOp>(result, inputs[i])->getOutput(0);
+ }
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
+
+ float max = getSingleArgument(op, "max", float(0));
+ float min = getSingleArgument(op, "min", float(0));
+
+ if (min != 0.0f)
+ throw std::runtime_error("Clip: min != 0 is not supported.");
+ if (max <= min)
+ throw std::runtime_error("Clip: max <= min is not supported.");
+ auto cap_relu = createOp<ops::CappedReluOp>(inputs[0], max);
+
+ return {cap_relu->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &)
+{
+ auto shape_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ if (shape_op == nullptr)
+ throw std::runtime_error("Reshape: non-constant shape is not supported yet.");
+
+ const auto &shape_tensor = shape_op->getValue();
+
+ Tensor<int64_t> out_shape_tensor(shape_tensor);
+
+ ShapeRange range(out_shape_tensor.getShape());
+ std::vector<int32_t> shape_vec;
+ for (const auto &index : range)
+ {
+ shape_vec.push_back(static_cast<int32_t>(out_shape_tensor.at(index)));
+ }
+ Shape out_shape(shape_vec);
+
+ auto reshape = createOp<ops::ReshapeOp>(inputs[0], out_shape);
+
+ return {reshape->getOutput(0)};
+}
+
+} // namespace mir_caffe2
diff --git a/compiler/mir-caffe2-importer/caffe2_op_creator.h b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.h
index 2b29378e9..2b29378e9 100644
--- a/compiler/mir-caffe2-importer/caffe2_op_creator.h
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.h
diff --git a/compiler/mir-caffe2-importer/caffe2_op_types.h b/compiler/mir/src/mir_caffe2_importer/caffe2_op_types.h
index b5e7e7631..b5e7e7631 100644
--- a/compiler/mir-caffe2-importer/caffe2_op_types.h
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_op_types.h
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.cpp b/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.cpp
new file mode 100644
index 000000000..7bb9cf06d
--- /dev/null
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "caffe2_proto_helper.h"
+
+namespace mir_caffe2
+{
+
+const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name)
+{
+ for (auto &arg : args)
+ if (arg.name() == name)
+ return arg;
+ throw std::runtime_error("Can't find argument with name: " + name);
+}
+
+bool hasArgument(RepArgument args, const std::string &name)
+{
+ for (auto &arg : args)
+ if (arg.name() == name)
+ return true;
+ return false;
+}
+
+int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const int default_value)
+{
+ if (hasArgument(op.arg(), argument_name))
+ return static_cast<int>(findArgumentByName(op.arg(), argument_name).i());
+ return default_value;
+}
+
+float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const float default_value)
+{
+ if (hasArgument(op.arg(), argument_name))
+ return findArgumentByName(op.arg(), argument_name).f();
+ return default_value;
+}
+
+std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const std::string &default_value)
+{
+ if (hasArgument(op.arg(), argument_name))
+ return findArgumentByName(op.arg(), argument_name).s();
+ return default_value;
+}
+
+} // namespace mir_caffe2
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.h b/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.h
new file mode 100644
index 000000000..4f5c4ef39
--- /dev/null
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_proto_helper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_CAFFE2_PROTO_HELPER_H
+#define MIR_CAFFE2_PROTO_HELPER_H
+
+#include "caffe2/proto/caffe2.pb.h"
+
+namespace mir_caffe2
+{
+
+using RepArgument = const ::google::protobuf::RepeatedPtrField<::caffe2::Argument> &;
+
+const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name);
+
+bool hasArgument(RepArgument args, const std::string &name);
+
+int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ int default_value);
+float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ float default_value);
+std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const std::string &default_value);
+
+} // namespace mir_caffe2
+
+#endif // MIR_CAFFE2_PROTO_HELPER_H
diff --git a/compiler/mir/src/mir_caffe_importer/CMakeLists.txt b/compiler/mir/src/mir_caffe_importer/CMakeLists.txt
new file mode 100644
index 000000000..34be520fc
--- /dev/null
+++ b/compiler/mir/src/mir_caffe_importer/CMakeLists.txt
@@ -0,0 +1,16 @@
+nnas_find_package(CaffeProto QUIET)
+
+if (NOT CaffeProto_FOUND)
+ return()
+endif ()
+
+set(MIR_CAFFE_IMPORTER_SOURCES
+ caffe_importer.cpp
+ caffe_op_creator.cpp
+ caffe_op_creator.h
+ caffe_op_types.h)
+
+add_library(mir_caffe_importer STATIC ${MIR_CAFFE_IMPORTER_SOURCES})
+set_target_properties(mir_caffe_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_caffe_importer PUBLIC ../../include/mir_caffe_importer)
+target_link_libraries(mir_caffe_importer PUBLIC mir PRIVATE caffeproto nncc_common)
diff --git a/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp b/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp
new file mode 100644
index 000000000..49f13fbd8
--- /dev/null
+++ b/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "caffe_importer.h"
+#include "caffe/proto/caffe.pb.h"
+#include "caffe_op_creator.h"
+#include "caffe_op_types.h"
+
+#include "mir/ops/OutputOp.h"
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/text_format.h>
+
+#include <fcntl.h>
+
+#include <cassert>
+#include <cerrno>
+#include <cstring>
+#include <memory>
+#include <stdexcept>
+#include <utility>
+#include <vector>
+#include <set>
+
+namespace mir_caffe
+{
+
+namespace
+{
+
+class CaffeImporter
+{
+public:
+ /// @brief Load the model and convert it into a MIR Graph.
+ std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
+ std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
+
+private:
+ std::unique_ptr<mir::Graph> importModel();
+
+ std::unique_ptr<caffe::NetParameter> _net;
+ std::unique_ptr<CaffeOpCreator> _opCreator;
+
+ // Maps Caffe blob names to corresponding MIR operation outputs.
+ std::map<std::string, mir::Operation::Output *> _blobNameToOpOutput;
+
+ static const std::map<std::string, CaffeOpType> _operatorTypes;
+
+ /**
+ * @brief Mark output MIR nodes
+ */
+ void setGraphOutputs(mir::Graph *graph);
+
+ /**
+ * @brief Pass through caffe graph and collect unsupported by NNC layers
+ * @throw PassException with message, containing detected problems
+ */
+ void collectUnsupportedLayers();
+
+ /**
+ * @brief Create MIR node from single caffe layer
+ */
+ void createMIRNodesFromLayer(const caffe::LayerParameter &layer);
+
+ mir::Operation::Output *getOutputForBlob(const std::string &blob_name) const;
+ void setOutputForBlob(const std::string &blob_name, mir::Operation::Output *output);
+
+ /**
+ * @brief Collect unsupported parts of caffe layer
+ */
+ void collectUnsupportedOp(const caffe::LayerParameter &layer, std::set<std::string> &problems);
+
+ /**
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given layer.
+ */
+ std::vector<mir::Operation::Output *> getMIRInputsForLayer(const caffe::LayerParameter &layer);
+
+ void processDeprecatedInput();
+};
+
+void loadModelFromBinaryFile(const std::string &filename, caffe::NetParameter *net)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ google::protobuf::io::CodedInputStream coded_stream(&file_stream);
+ coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+
+ if (!net->ParseFromCodedStream(&coded_stream))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+
+ // If the file has not been consumed entirely, assume that the file is in the wrong format.
+ if (!coded_stream.ConsumedEntireMessage())
+ throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
+}
+
+void loadModelFromTextFile(const std::string &filename, caffe::NetParameter *net)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ if (!google::protobuf::TextFormat::Parse(&file_stream, net))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+}
+
+std::unique_ptr<mir::Graph> CaffeImporter::importModel()
+{
+ auto graph = std::make_unique<mir::Graph>();
+ _opCreator = std::make_unique<CaffeOpCreator>(graph.get());
+
+ collectUnsupportedLayers();
+
+ for (int i = 0; i < _net->layer_size(); ++i)
+ createMIRNodesFromLayer(_net->layer(i));
+
+ setGraphOutputs(graph.get());
+
+ return graph;
+}
+
+std::unique_ptr<mir::Graph> CaffeImporter::importModelFromBinaryFile(const std::string &filename)
+{
+ _net = std::make_unique<caffe::NetParameter>();
+ loadModelFromBinaryFile(filename, _net.get());
+
+ return importModel();
+}
+
+std::unique_ptr<mir::Graph> CaffeImporter::importModelFromTextFile(const std::string &filename)
+{
+ _net = std::make_unique<caffe::NetParameter>();
+ loadModelFromTextFile(filename, _net.get());
+
+ return importModel();
+}
+
+void CaffeImporter::collectUnsupportedLayers()
+{
+ processDeprecatedInput();
+
+ std::set<std::string> problems;
+
+ for (const caffe::LayerParameter &layer : _net->layer())
+ collectUnsupportedOp(layer, problems);
+
+ if (!problems.empty())
+ {
+ std::string msg("NNC can't load model. Detected problems:");
+ for (const auto &problemStr : problems)
+ msg.append("\n * " + problemStr);
+ throw std::runtime_error(msg);
+ }
+}
+
+void CaffeImporter::createMIRNodesFromLayer(const caffe::LayerParameter &layer)
+{
+ std::vector<mir::Operation::Output *> inputs = getMIRInputsForLayer(layer);
+ std::vector<mir::Operation::Output *> outputs;
+
+ switch (_operatorTypes.at(layer.type()))
+ {
+ case CaffeOpType::input:
+ outputs = _opCreator->convertInput(layer);
+ break;
+ case CaffeOpType::convolution:
+ outputs = _opCreator->convertConvolution(layer, inputs);
+ break;
+ case CaffeOpType::innerProduct:
+ outputs = _opCreator->convertInnerProduct(layer, inputs);
+ break;
+ case CaffeOpType::pooling:
+ outputs = _opCreator->convertPooling(layer, inputs);
+ break;
+ case CaffeOpType::concat:
+ outputs = _opCreator->convertConcat(layer, inputs);
+ break;
+ case CaffeOpType::reshape:
+ outputs = _opCreator->convertReshape(layer, inputs);
+ break;
+ case CaffeOpType::ReLU:
+ outputs = _opCreator->convertReLU(layer, inputs);
+ break;
+ case CaffeOpType::softmax:
+ outputs = _opCreator->convertSoftmax(layer, inputs);
+ break;
+ case CaffeOpType::scale:
+ outputs = _opCreator->convertScale(layer, inputs);
+ break;
+ case CaffeOpType::batchNorm:
+ outputs = _opCreator->convertBatchNorm(layer, inputs);
+ break;
+ case CaffeOpType::dropout:
+ outputs = _opCreator->convertDropout(layer, inputs);
+ break;
+ case CaffeOpType::tanh:
+ outputs = _opCreator->convertTanH(layer, inputs);
+ break;
+ case CaffeOpType::ELU:
+ outputs = _opCreator->convertELU(layer, inputs);
+ break;
+ case CaffeOpType::eltwise:
+ outputs = _opCreator->convertEltwise(layer, inputs);
+ break;
+ case CaffeOpType::embed:
+ outputs = _opCreator->convertEmbed(layer, inputs);
+ break;
+ case CaffeOpType::deconvolution:
+ outputs = _opCreator->convertDeconvolution(layer, inputs);
+ break;
+ case CaffeOpType::split:
+ outputs = _opCreator->convertSplit(layer, inputs);
+ break;
+ case CaffeOpType::sigmoid:
+ outputs = _opCreator->convertSigmoid(layer, inputs);
+ break;
+ case CaffeOpType::LSTM:
+ outputs = _opCreator->convertLSTM(layer, inputs);
+ break;
+ default:
+ assert(false && "All unsupported types should have been found before this pass.");
+ }
+
+ assert(static_cast<int>(outputs.size()) == layer.top_size() && "Number of outputs differs.");
+ for (int i = 0; i < layer.top_size(); ++i)
+ setOutputForBlob(layer.top(i), outputs[i]);
+}
+
+void CaffeImporter::collectUnsupportedOp(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems)
+{
+ auto it = _operatorTypes.find(layer.type());
+ if (it == _operatorTypes.end())
+ {
+ problems.insert(layer.type() + ": unknown layer");
+ return;
+ }
+
+ CaffeOpType op_type = it->second;
+
+ switch (op_type)
+ {
+ case CaffeOpType::concat:
+ case CaffeOpType::input:
+ case CaffeOpType::softmax:
+ case CaffeOpType::scale:
+ case CaffeOpType::dropout:
+ case CaffeOpType::split:
+ case CaffeOpType::eltwise:
+ case CaffeOpType::ELU:
+ case CaffeOpType::ReLU:
+ case CaffeOpType::embed:
+ case CaffeOpType::sigmoid:
+ case CaffeOpType::tanh:
+ case CaffeOpType::innerProduct:
+ // No checks
+ break;
+ case CaffeOpType::deconvolution:
+ case CaffeOpType::convolution:
+ _opCreator->checkConvolution(layer, problems);
+ break;
+ case CaffeOpType::pooling:
+ _opCreator->checkPooling(layer, problems);
+ break;
+ case CaffeOpType::reshape:
+ _opCreator->checkReshape(layer, problems);
+ break;
+ case CaffeOpType::batchNorm:
+ _opCreator->checkBatchNorm(layer, problems);
+ break;
+ case CaffeOpType::LSTM:
+ _opCreator->checkLSTM(layer, problems);
+ break;
+ default:
+ problems.insert(layer.type() + ": unsupported layer");
+ break;
+ }
+}
+
+void CaffeImporter::processDeprecatedInput()
+{
+ if (_net->input_dim_size() != 0 || _net->input_shape_size() != 0)
+ throw std::runtime_error("Deprecated Caffe input types are not supported");
+}
+
+std::vector<mir::Operation::Output *>
+CaffeImporter::getMIRInputsForLayer(const caffe::LayerParameter &layer)
+{
+ std::vector<mir::Operation::Output *> inputs;
+
+ for (const auto &input_name : layer.bottom())
+ inputs.push_back(getOutputForBlob(input_name));
+
+ return inputs;
+}
+
+mir::Operation::Output *CaffeImporter::getOutputForBlob(const std::string &blob_name) const
+{
+ return _blobNameToOpOutput.at(blob_name);
+}
+
+void CaffeImporter::setOutputForBlob(const std::string &blob_name, mir::Operation::Output *output)
+{
+ const auto it = _blobNameToOpOutput.find(blob_name);
+ if (it != _blobNameToOpOutput.cend())
+ {
+ // caffe input blob name could be same as output blob name, and next line will overwrite
+ // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
+ it->second->setName("");
+ }
+
+ // Do not overwrite the name in case of fall-through layers (ex. Dropout, Split).
+ // TODO Find a way to handle it properly.
+ if (output->getName().empty())
+ output->setName(blob_name);
+
+ _blobNameToOpOutput[blob_name] = output;
+}
+
+void CaffeImporter::setGraphOutputs(mir::Graph *graph)
+{
+ // TODO For now, we assume that:
+ // - there is exactly one output;
+ // - the output is from the last layer.
+ const auto &last_layer = *_net->layer().rbegin();
+ auto output = getOutputForBlob(last_layer.top(0));
+ graph->create<mir::ops::OutputOp>(output);
+}
+
+const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
+ {"AbsVal", CaffeOpType::absVal},
+ {"Accuracy", CaffeOpType::accuracy},
+ {"ArgMax", CaffeOpType::argMax},
+ {"BatchNorm", CaffeOpType::batchNorm},
+ {"BatchReindex", CaffeOpType::batchReindex},
+ {"Bias", CaffeOpType::bias},
+ {"BNLL", CaffeOpType::BNLL},
+ {"Clip", CaffeOpType::clip},
+ {"Concat", CaffeOpType::concat},
+ {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
+ {"Convolution", CaffeOpType::convolution},
+ {"Crop", CaffeOpType::crop},
+ {"Data", CaffeOpType::data},
+ {"Deconvolution", CaffeOpType::deconvolution},
+ {"Dropout", CaffeOpType::dropout},
+ {"DummyData", CaffeOpType::dummyData},
+ {"Eltwise", CaffeOpType::eltwise},
+ {"ELU", CaffeOpType::ELU},
+ {"Embed", CaffeOpType::embed},
+ {"EuclidianLoss", CaffeOpType::euclidianLoss},
+ {"Exp", CaffeOpType::exp},
+ {"Filter", CaffeOpType::filter},
+ {"Flatten", CaffeOpType::flatten},
+ {"HDF5Data", CaffeOpType::HDF5Data},
+ {"HDF5Output", CaffeOpType::HDF5Output},
+ {"HingeLoss", CaffeOpType::hingeLoss},
+ {"Im2Col", CaffeOpType::im2Col},
+ {"ImageData", CaffeOpType::imageData},
+ {"InfogainLoss", CaffeOpType::infogainLoss},
+ {"InnerProduct", CaffeOpType::innerProduct},
+ {"Input", CaffeOpType::input},
+ {"Log", CaffeOpType::log},
+ {"LRN", CaffeOpType::LRN},
+ {"LSTM", CaffeOpType::LSTM},
+ {"MemoryData", CaffeOpType::memoryData},
+ {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
+ {"MVN", CaffeOpType::MVN},
+ {"Parameter", CaffeOpType::parameter},
+ {"Pooling", CaffeOpType::pooling},
+ {"Power", CaffeOpType::power},
+ {"PReLU", CaffeOpType::PReLU},
+ {"Python", CaffeOpType::python},
+ {"Recurrent", CaffeOpType::recurrent},
+ {"Reduction", CaffeOpType::reduction},
+ {"ReLU", CaffeOpType::ReLU},
+ {"Reshape", CaffeOpType::reshape},
+ {"RNN", CaffeOpType::RNN},
+ {"Scale", CaffeOpType::scale},
+ {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
+ {"Sigmoid", CaffeOpType::sigmoid},
+ {"Silence", CaffeOpType::silence},
+ {"Softmax", CaffeOpType::softmax},
+ {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
+ {"SPP", CaffeOpType::SPP},
+ {"Split", CaffeOpType::split},
+ {"Slice", CaffeOpType::slice},
+ {"TanH", CaffeOpType::tanh},
+ {"Threshold", CaffeOpType::threshold},
+ {"Tile", CaffeOpType::tile},
+ {"WindowData", CaffeOpType::windowData}};
+} // namespace
+
+std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
+{
+ CaffeImporter importer;
+ return importer.importModelFromBinaryFile(filename);
+}
+
+std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename)
+{
+ CaffeImporter importer;
+ return importer.importModelFromTextFile(filename);
+}
+
+std::unique_ptr<mir::Graph> loadModel(const std::string &filename)
+{
+ return importModelFromBinaryFile(filename);
+}
+
+} // namespace mir_caffe
diff --git a/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp
new file mode 100644
index 000000000..37edc69c4
--- /dev/null
+++ b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp
@@ -0,0 +1,835 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "caffe_op_creator.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/AvgPool2DOp.h"
+#include "mir/ops/ConcatOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/Deconv2DOp.h"
+#include "mir/ops/EluOp.h"
+#include "mir/ops/FullyConnectedOp.h"
+#include "mir/ops/GatherOp.h"
+#include "mir/ops/LeakyReluOp.h"
+#include "mir/ops/MaxOp.h"
+#include "mir/ops/MaxPool2DOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/ReluOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/SigmoidOp.h"
+#include "mir/ops/SliceOp.h"
+#include "mir/ops/SoftmaxOp.h"
+#include "mir/ops/TanhOp.h"
+#include "mir/ops/TransposeOp.h"
+#include "mir/Index.h"
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+
+#include <cmath>
+#include <iostream>
+#include <set>
+#include <stdexcept>
+
+namespace mir_caffe
+{
+
+static mir::Shape convertBlobShape(const caffe::BlobShape &shape)
+{
+ mir::Shape mir_shape(shape.dim_size());
+
+ for (int i = 0; i < shape.dim_size(); ++i)
+ {
+ mir_shape.dim(i) = shape.dim(i);
+ }
+
+ return mir_shape;
+}
+
+using namespace mir;
+
+/// @brief Split arg into @p num_parts equal parts along @p axis axis.
+std::vector<mir::Operation::Output *> CaffeOpCreator::createSplit(mir::Operation::Output *arg,
+ int32_t num_parts, int32_t axis)
+{
+ const auto &arg_shape = arg->getShape();
+
+ assert(axis >= 0 && axis < arg_shape.rank());
+ int32_t part_size = arg_shape.dim(axis) / num_parts;
+ assert(part_size * num_parts == arg_shape.dim(axis));
+
+ Shape starts(arg_shape.rank());
+ Shape sizes(arg_shape);
+ sizes.dim(axis) = part_size;
+
+ std::vector<mir::Operation::Output *> outputs(num_parts);
+ for (int32_t i = 0; i < num_parts; ++i)
+ {
+ outputs[i] = createOp<ops::SliceOp>(arg, starts, sizes)->getOutput(0);
+ starts.dim(axis) += part_size;
+ }
+
+ return outputs;
+}
+
+/// @brief Helper function for creating FullyConnected operation with non-square input.
+mir::Operation::Output *CaffeOpCreator::createFullyConnected(mir::Operation::Output *input,
+ mir::Operation::Output *weights,
+ int32_t axis)
+{
+ const auto &input_shape = input->getShape();
+ const auto &weights_shape = weights->getShape();
+
+ assert(axis >= 0 && axis < input_shape.rank());
+ assert(weights_shape.rank() == 2);
+
+ // Result shape is: input.shape[0:axis] + weights.shape[1].
+ Shape result_shape = input_shape;
+ result_shape.resize(axis + 1);
+ result_shape.dim(axis) = weights_shape.dim(1);
+
+ // Flatten input to 2-D shape.
+ int32_t outer_size = 1;
+ for (int32_t i = 0; i < axis; ++i)
+ outer_size *= input_shape.dim(i);
+ int32_t inner_size = 1;
+ for (int32_t i = axis; i < input_shape.rank(); ++i)
+ inner_size *= input_shape.dim(i);
+
+ auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size})->getOutput(0);
+ auto fc = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
+ return createOp<ops::ReshapeOp>(fc, result_shape)->getOutput(0);
+}
+
+TensorVariant CaffeOpCreator::convertBlob(const caffe::BlobProto &blob)
+{
+ const void *src_data;
+
+ mir::DataType dtype;
+ if (blob.data_size() != 0)
+ {
+ assert(blob.double_data_size() == 0);
+ dtype = mir::DataType::FLOAT32;
+ src_data = blob.data().data();
+ }
+ else if (blob.double_data_size() != 0)
+ {
+ dtype = mir::DataType::FLOAT64;
+ src_data = blob.double_data().data();
+ }
+ else
+ {
+ throw std::runtime_error("No data in Caffe BlobProto, investigate");
+ }
+
+ const mir::Shape shape = convertBlobShape(blob.shape());
+ return TensorVariant({dtype, shape}, src_data);
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertInput(const caffe::LayerParameter &layer)
+{
+ const auto &params = layer.input_param();
+ const auto num_inputs = layer.top_size();
+ const auto num_shapes = params.shape_size();
+ std::vector<mir::Operation::Output *> outputs;
+
+ assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
+
+ for (int i = 0; i < num_inputs; ++i)
+ {
+ const auto &blob_shape = params.shape(num_shapes == 1 ? 0 : i);
+ mir::TensorType input_type(DataType::FLOAT32, convertBlobShape(blob_shape));
+ auto input = createOp<ops::InputOp>(input_type)->getOutput(0);
+ outputs.push_back(input);
+ }
+
+ return outputs;
+}
+
+template <class OperationAttributes>
+static void convertConvolutionParam(const caffe::ConvolutionParameter &conv_param,
+ OperationAttributes &attributes)
+{
+ std::int32_t stride_h, stride_w;
+ if (conv_param.has_stride_h() || conv_param.has_stride_w())
+ {
+ // If stride_h or stride_w are set, they take precedence.
+ stride_h = conv_param.stride_h();
+ stride_w = conv_param.stride_w();
+ }
+ else if (conv_param.stride_size() == 0)
+ {
+ // If no strides specified, they defaults to 1.
+ stride_h = stride_w = 1;
+ }
+ else if (conv_param.stride_size() == 1)
+ {
+ // If only one stride specified, all strides take the same value.
+ stride_h = stride_w = conv_param.stride(0);
+ }
+ else
+ {
+ // Otherwise, there must be a stride for each dimension.
+ assert(conv_param.stride_size() == 2);
+ stride_h = conv_param.stride(0);
+ stride_w = conv_param.stride(1);
+ }
+ attributes.strides = {stride_h, stride_w};
+
+ std::int32_t pad_h, pad_w;
+ if (conv_param.has_pad_h() || conv_param.has_pad_w())
+ {
+ // If pad_h or pad_w are set, they take precedence.
+ pad_h = conv_param.pad_h();
+ pad_w = conv_param.pad_w();
+ }
+ else if (conv_param.pad_size() == 0)
+ {
+ // If no pads specified, they defaults to 0.
+ pad_h = pad_w = 0;
+ }
+ else if (conv_param.pad_size() == 1)
+ {
+ // If only one pad specified, all pads take the same value.
+ pad_h = pad_w = conv_param.pad(0);
+ }
+ else
+ {
+ // Otherwise, there must be a pad for each dimension.
+ assert(conv_param.pad_size() == 2);
+ pad_h = conv_param.pad(0);
+ pad_w = conv_param.pad(1);
+ }
+ attributes.padding_after = attributes.padding_before = {pad_h, pad_w};
+}
+
+void CaffeOpCreator::checkConvolution(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const caffe::ConvolutionParameter &params = layer.convolution_param();
+
+ assert(params.stride_size() <= 2);
+
+ if (params.axis() != 1)
+ problems_ops_set.insert("Conv2D: Unsupported axis");
+
+ if (params.pad_size() != 0 && (params.has_pad_h() || params.has_pad_w()))
+ problems_ops_set.insert("Conv2D: Conflicting padding properties");
+
+ if (params.pad_size() > 2)
+ problems_ops_set.insert("Conv2D: Unsupported number of pads");
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertConvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.convolution_param();
+ Conv2DOpAttributes attributes;
+
+ convertConvolutionParam(params, attributes);
+ attributes.num_groups = params.group();
+ attributes.data_format = DataFormat::NCHW;
+
+ assert(layer.blobs(0).shape().dim_size() == 4);
+ auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
+ std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
+ kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
+ auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
+
+ // Add the bias, if any.
+ if (params.bias_term())
+ {
+ auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const caffe::ConvolutionParameter &params = layer.convolution_param();
+ Deconv2DOpAttributes attributes;
+
+ convertConvolutionParam(params, attributes);
+ attributes.data_format = DataFormat::NCHW;
+
+ if (params.group() != 1)
+ {
+ throw std::runtime_error("Deconvolution: 'group' != 1 is not supported.");
+ }
+
+ auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
+ std::vector<std::size_t> perm{2, 3, 1, 0}; // IOHW -> HWOI
+ kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
+ auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
+
+ // bias_term is optional (so might not be present) and defaults to true
+ if (params.bias_term())
+ {
+ auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertInnerProduct(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.inner_product_param();
+ auto weights = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
+
+ if (!params.transpose())
+ weights = createOp<ops::TransposeOp>(weights, std::vector<std::size_t>{1, 0})->getOutput(0);
+
+ auto result = createFullyConnected(inputs[0], weights, params.axis());
+
+ // Add the bias, if any.
+ if (params.bias_term())
+ {
+ auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertConcat(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.concat_param();
+ auto concat = createOp<ops::ConcatOp>(inputs, params.axis());
+ return {concat->getOutput(0)};
+}
+
+template <class PoolingAttributes>
+static void convertPoolingParam(const caffe::PoolingParameter &params,
+ const mir::Shape &input_shape, PoolingAttributes &attributes)
+{
+ std::int32_t kernel_h, kernel_w;
+ assert(!params.global_pooling());
+ if (params.has_kernel_size())
+ {
+ kernel_h = kernel_w = params.kernel_size();
+ }
+ else
+ {
+ kernel_h = params.kernel_h();
+ kernel_w = params.kernel_w();
+ }
+ attributes.window = {kernel_h, kernel_w};
+
+ std::int32_t stride_h, stride_w;
+ if (params.has_stride_h() || params.has_stride_w())
+ {
+ stride_h = params.stride_h();
+ stride_w = params.stride_w();
+ }
+ else
+ {
+ stride_h = stride_w = params.stride();
+ }
+ attributes.strides = {stride_h, stride_w};
+
+ std::int32_t pad_h, pad_w;
+ if (params.has_pad_h() || params.has_pad_w())
+ {
+ pad_h = params.pad_h();
+ pad_w = params.pad_w();
+ }
+ else
+ {
+ pad_h = pad_w = params.pad();
+ }
+
+ attributes.padding_before = attributes.padding_after = {pad_h, pad_w};
+
+ // Caffe uses different formula for computing output shape than MIR. Adjust padding so that
+ // the output shape stays the same.
+ constexpr int num_spatial_dims = 2;
+ for (int i = 0; i < num_spatial_dims; ++i)
+ {
+ // Assuming NCHW format.
+ const std::int32_t padded_input =
+ input_shape.dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
+ if ((padded_input - attributes.window[i]) % attributes.strides[i] != 0)
+ ++attributes.padding_after[i];
+ }
+}
+
+void CaffeOpCreator::checkPooling(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const caffe::PoolingParameter &params = layer.pooling_param();
+
+ if (params.has_global_pooling() && params.global_pooling())
+ problems_ops_set.insert("Pooling: pooling layer global_pooling param is not supported yet");
+
+ if (params.pool() != caffe::PoolingParameter::AVE &&
+ params.pool() != caffe::PoolingParameter::MAX)
+ problems_ops_set.insert("Pooling: unsupported pooling type");
+
+ if (params.has_pad() && (params.has_pad_h() || params.has_pad_w()))
+ problems_ops_set.insert("Pooling: conflicting padding properties in pooling");
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertPooling(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.pooling_param();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ mir::Operation::Output *result;
+
+ switch (params.pool())
+ {
+ case caffe::PoolingParameter::AVE:
+ {
+ AvgPool2DOpAttributes attributes_avg;
+ attributes_avg.data_format = DataFormat::NCHW;
+ convertPoolingParam(params, input->getShape(), attributes_avg);
+ result = createOp<ops::AvgPool2DOp>(input, attributes_avg)->getOutput(0);
+ break;
+ }
+ case caffe::PoolingParameter::MAX:
+ {
+ MaxPool2DOpAttributes attributes_max;
+ attributes_max.data_format = DataFormat::NCHW;
+ convertPoolingParam(params, input->getShape(), attributes_max);
+ result = createOp<ops::MaxPool2DOp>(input, attributes_max)->getOutput(0);
+ break;
+ }
+ default:
+ throw std::runtime_error("Unsupported PoolMethod: " + std::to_string(params.pool()));
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSoftmax(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.softmax_param();
+
+ // CPP and ACL backends are able to perform Softmax only along the last axis.
+ // FIXME Do it in backends.
+ if (inputs[0]->getShape().rank() == 4)
+ {
+ // For now, we only account for the most common case.
+ if (params.axis() != 1)
+ throw std::runtime_error("Softmax: unsupported axis");
+ int32_t axis = 3;
+ auto input = createOp<ops::TransposeOp>(inputs[0], std::vector<std::size_t>{0, 2, 3, 1});
+ auto softmax = createOp<ops::SoftmaxOp>(input->getOutput(0), axis);
+ auto result =
+ createOp<ops::TransposeOp>(softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
+ return {result->getOutput(0)};
+ }
+
+ auto softmax = createOp<ops::SoftmaxOp>(inputs[0], params.axis());
+ return {softmax->getOutput(0)};
+}
+
+void CaffeOpCreator::checkReshape(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const caffe::ReshapeParameter &params = layer.reshape_param();
+
+ if (params.has_axis() || params.has_num_axes())
+ problems_ops_set.insert("Reshape layer axis and num_axes params are not supported yet");
+
+ if (!params.has_shape())
+ problems_ops_set.insert("Reshape layer doesn't have shape parameter");
+
+ const mir::Shape newShape = convertBlobShape(params.shape());
+
+ for (int32_t i = 0; i < newShape.rank(); ++i)
+ if (newShape.dim(i) == 0)
+ problems_ops_set.insert("Reshape layer zero shape values are not supported yet");
+}
+
+/**
+ * @brief Converts Caffe Reshape layer to Model IR Reshape operation.
+ * @todo Support "axis" and "num_axes" parameters as needed.
+ * @todo Decide how to react to the absence of "shape" parameter.
+ * @todo Support zero values in "shape" parameter.
+ */
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertReshape(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const caffe::ReshapeParameter &params = layer.reshape_param();
+
+ const mir::Shape new_shape = convertBlobShape(params.shape());
+ auto reshape = createOp<ops::ReshapeOp>(inputs[0], new_shape);
+ return {reshape->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertReLU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ mir::Operation *relu;
+ if (layer.relu_param().has_negative_slope())
+ {
+ float alpha = layer.relu_param().negative_slope();
+ relu = createOp<ops::LeakyReluOp>(inputs[0], alpha);
+ }
+ else
+ {
+ relu = createOp<ops::ReluOp>(inputs[0]);
+ }
+
+ return {relu->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertScale(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.scale_param();
+ auto scale = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
+ scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
+ auto result = createOp<ops::MulOp>(inputs[0], scale)->getOutput(0);
+
+ // Add the bias, if any.
+ if (params.bias_term())
+ {
+ auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+void CaffeOpCreator::checkBatchNorm(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const auto &scale_shape = layer.blobs(2).shape();
+
+ // Check that last blob(with scaleFactor) containing only one number
+ if (scale_shape.dim_size() != 1 || scale_shape.dim(0) != 1)
+ problems_ops_set.insert("Unexpected shape of scale parameter in batch norm");
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const caffe::BatchNormParameter &params = layer.batch_norm_param();
+
+ auto input = inputs[0];
+ auto mean_tensor = convertBlob(layer.blobs(0));
+ auto var_tensor = convertBlob(layer.blobs(1));
+ auto scale_tensor = convertBlob(layer.blobs(2));
+ const float eps = params.eps();
+
+ float scale_factor = *reinterpret_cast<float *>(scale_tensor.at(mir::Index{0}));
+
+ // See https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
+ // Y = (X - mean / scale_factor) / sqrt(var / scale_factor + epsilon) =
+ // = (X + C1) * C2
+ if (scale_factor != 0.0f)
+ scale_factor = 1.0f / scale_factor;
+
+ // C1 = -mean / scale_factor
+ Tensor<float> mean_accessor(mean_tensor);
+ for (const auto &idx : ShapeRange(mean_accessor.getShape()))
+ mean_accessor.at(idx) *= -scale_factor;
+ auto c1 = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
+
+ // C2 = 1 / sqrt(var / scale_factor + epsilon)
+ Tensor<float> var_accessor(var_tensor);
+ for (const auto &idx : ShapeRange(var_accessor.getShape()))
+ var_accessor.at(idx) = 1.0f / std::sqrt(var_accessor.at(idx) * scale_factor + eps);
+ auto c2 = createOp<ops::ConstantOp>(var_tensor)->getOutput(0);
+
+ c1 = createOp<ops::ReshapeOp>(c1, Shape{1, c1->getShape().dim(0), 1, 1})->getOutput(0);
+ c2 = createOp<ops::ReshapeOp>(c2, Shape{1, c2->getShape().dim(0), 1, 1})->getOutput(0);
+
+ // Y = (X + C1) * C2
+ auto result = createOp<ops::AddOp>(input, c1)->getOutput(0);
+ result = createOp<ops::MulOp>(result, c2)->getOutput(0);
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertDropout(const caffe::LayerParameter &,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ // This is a no-op in inference mode.
+ return {inputs[0]};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertELU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const caffe::ELUParameter &params = layer.elu_param();
+
+ auto elu = createOp<ops::EluOp>(inputs[0], params.alpha());
+ return {elu->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertEmbed(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.embed_param();
+ auto data = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)));
+ auto result = createOp<ops::GatherOp>(data->getOutput(0), inputs[0], 0)->getOutput(0);
+
+ // Add the bias, if any.
+ if (params.bias_term())
+ {
+ auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSigmoid(const caffe::LayerParameter &,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto result = createOp<ops::SigmoidOp>(inputs[0]);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertTanH(const caffe::LayerParameter &,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto tanh = createOp<ops::TanhOp>(inputs[0]);
+ return {tanh->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertEltwise(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &params = layer.eltwise_param();
+
+ mir::Operation::Output *result;
+ switch (params.operation())
+ {
+ case caffe::EltwiseParameter::PROD:
+ {
+ result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
+ for (int i = 2; i < layer.bottom_size(); ++i)
+ {
+ result = createOp<ops::MulOp>(result, inputs[i])->getOutput(0);
+ }
+ break;
+ }
+ case caffe::EltwiseParameter::SUM:
+ {
+ std::vector<mir::Operation::Output *> scaled_inputs = inputs;
+ if (params.coeff_size() > 0)
+ {
+ assert(params.coeff_size() == layer.bottom_size());
+ for (int i = 0; i < layer.bottom_size(); i++)
+ {
+ if (params.coeff(i) != 1.0f)
+ {
+ const float coeff_val = params.coeff(i);
+ TensorVariant coeff_tensor({DataType::FLOAT32, {}}, &coeff_val);
+ auto coeff_const = createOp<ops::ConstantOp>(coeff_tensor)->getOutput(0);
+ scaled_inputs[i] = createOp<ops::MulOp>(coeff_const, inputs[i])->getOutput(0);
+ }
+ }
+ }
+ result = createOp<ops::AddOp>(scaled_inputs[0], scaled_inputs[1])->getOutput(0);
+ for (int i = 2; i < layer.bottom_size(); ++i)
+ {
+ result = createOp<ops::AddOp>(result, scaled_inputs[i])->getOutput(0);
+ }
+ break;
+ }
+ case caffe::EltwiseParameter::MAX:
+ {
+ result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
+ for (int i = 2; i < layer.bottom_size(); ++i)
+ {
+ result = createOp<ops::MaxOp>(result, inputs[i])->getOutput(0);
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error("Unknown element-wise operation.");
+ }
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSplit(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ std::vector<mir::Operation::Output *> outputs(layer.top_size(), inputs.at(0));
+ return outputs;
+}
+
+void CaffeOpCreator::checkLSTM(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const auto &params = layer.recurrent_param();
+ if (params.expose_hidden())
+ problems_ops_set.insert("LSTM: parameter 'expose_hidden' has unsupported value: " +
+ std::to_string(params.expose_hidden()));
+}
+
+static TensorVariant createZeroedTensor(const mir::Shape &shape)
+{
+ // TODO For now it is hardcoded float32.
+ auto elem_type = mir::DataType::FLOAT32;
+ std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
+ return TensorVariant({elem_type, shape}, zeros.data());
+}
+
+/* See the following links for details on implementation:
+ * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/recurrent_layer.cpp
+ * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_layer.cpp
+ * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_unit_layer.cpp
+ *
+ * Inputs:
+ * x -- The time-varying input. Shape: [T, N, d0, d1, ..., dn].
+ * cont -- The sequence continuation indicators. Shape: [T, N].
+ * x_static -- The static (non-time-varying) input. Shape: [N, ...].
+ * This parameter is optional and not currently supported.
+ *
+ * Additional inputs when parameter "expose_hidden" is true (not currently supported):
+ * h_0 -- The initial value of the hidden state. Shape: [1, N, D].
+ * c_0 -- The initial value of the cell state. Shape: [1, N, D].
+ *
+ * Learned parameters:
+ * xw -- x weights for input, output, forget and cell gates concatenated.
+ * Shape: [4 * D, d0 * d1 * ... * dn].
+ * xb -- x biases for input, output, forget and cell gates concatenated. Shape: [4 * D].
+ * hw -- h weights for input, output, forget and cell gates concatenated. Shape: [4 * D, D].
+ *
+ * Outputs:
+ * h -- The time-varying output. Shape: [T, N, D].
+ *
+ * Additional outputs when parameter "expose_hidden" is true (not currently supported):
+ * h_T -- The value of the hidden state at the last timestep. Shape: [1, N, D].
+ * c_T -- The value of the cell state at the last timestep. Shape: [1, N, D].
+ *
+ * Here:
+ * T - the number of timesteps,
+ * N - the number of independent streams.
+ * D - the number of hidden parameters.
+ *
+ * Formulas:
+ * c_cont = c[t-1] * cont[t]
+ * h_cont = h[t-1] * cont[t]
+ * i[t] = Sigmoid(x[t] . xw_i + xb_i + h_cont . hw_i)
+ * f[t] = Sigmoid(x[t] . xw_f + xb_f + h_cont . hw_f)
+ * o[t] = Sigmoid(x[t] . xw_o + xb_o + h_cont . hw_o)
+ * g[t] = Tanh(x[t] . xw_g + xb_g + h_cont . hw_g)
+ * c[t] = c_cont * f[t] + i[t] * g[t]
+ * h[t] = o[t] * Tanh(c[t])
+ *
+ * Here:
+ * t -- the timestep (ranges from 1 to T),
+ * * -- the inner product,
+ * . -- the Hadamard product (elementwise product).
+ *
+ * In this implementation the inner products for all gates are performed as single inner product for
+ * efficiency.
+ */
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertLSTM(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &params = layer.recurrent_param();
+
+ // Inputs to the layer.
+ auto x = inputs[0];
+ auto cont = inputs[1];
+ assert(inputs.size() == 2);
+
+ const auto &x_shape = x->getShape();
+ const int32_t seq_length = x_shape.dim(0);
+ const int32_t batch_size = x_shape.dim(1);
+ const int32_t hidden_size = params.num_output();
+
+ // Learned parameters of the layer. Tensors are transposed to match the ModelIR.
+ auto xw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
+ auto xb = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
+ auto hw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(2)))->getOutput(0);
+ xw = createOp<ops::TransposeOp>(xw, std::vector<std::size_t>{1, 0})->getOutput(0);
+ hw = createOp<ops::TransposeOp>(hw, std::vector<std::size_t>{1, 0})->getOutput(0);
+
+ // Add a dummy dimension so that element-wise operations perform properly.
+ cont = createOp<ops::ReshapeOp>(cont, Shape{seq_length, batch_size, 1})->getOutput(0);
+
+ // Initialize cell and hidden states with zeros.
+ auto zero_tensor = createZeroedTensor(Shape{1, batch_size, hidden_size});
+ auto c_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
+ auto h_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
+
+ auto x_xw = createFullyConnected(x, xw, 2);
+ auto x_xw_b = createOp<ops::AddOp>(x_xw, xb)->getOutput(0);
+
+ // Split input and continuation tensors into seq_length slices.
+ std::vector<mir::Operation::Output *> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
+ std::vector<mir::Operation::Output *> cont_slices = createSplit(cont, seq_length, 0);
+ std::vector<mir::Operation::Output *> h_slices(seq_length);
+
+ for (int32_t t = 0; t < seq_length; t++)
+ {
+ auto c_cont_t = createOp<ops::MulOp>(c_t, cont_slices[t])->getOutput(0);
+ auto h_cont_t = createOp<ops::MulOp>(h_t, cont_slices[t])->getOutput(0);
+
+ auto x_xw_b_t = x_xw_b_slices[t];
+ auto h_hw_t = createFullyConnected(h_cont_t, hw, 2);
+ auto activation_inputs_concat = createOp<ops::AddOp>(x_xw_b_t, h_hw_t)->getOutput(0);
+ auto activation_inputs = createSplit(activation_inputs_concat, 4, 2);
+
+ auto i_t = createOp<ops::SigmoidOp>(activation_inputs[0])->getOutput(0);
+ auto f_t = createOp<ops::SigmoidOp>(activation_inputs[1])->getOutput(0);
+ auto o_t = createOp<ops::SigmoidOp>(activation_inputs[2])->getOutput(0);
+ auto g_t = createOp<ops::TanhOp>(activation_inputs[3])->getOutput(0);
+
+ c_t = createOp<ops::AddOp>(createOp<ops::MulOp>(c_cont_t, f_t)->getOutput(0),
+ createOp<ops::MulOp>(i_t, g_t)->getOutput(0))
+ ->getOutput(0);
+ h_t = createOp<ops::MulOp>(createOp<ops::TanhOp>(c_t)->getOutput(0), o_t)->getOutput(0);
+
+ h_slices[t] = h_t;
+ }
+
+ return {createOp<ops::ConcatOp>(h_slices, 0)->getOutput(0)};
+}
+
+} // namespace mir_caffe
diff --git a/compiler/mir-caffe-importer/caffe_op_creator.h b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.h
index 721bb90b8..721bb90b8 100644
--- a/compiler/mir-caffe-importer/caffe_op_creator.h
+++ b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.h
diff --git a/compiler/mir-caffe-importer/caffe_op_types.h b/compiler/mir/src/mir_caffe_importer/caffe_op_types.h
index 30fce7d5f..30fce7d5f 100644
--- a/compiler/mir-caffe-importer/caffe_op_types.h
+++ b/compiler/mir/src/mir_caffe_importer/caffe_op_types.h
diff --git a/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h b/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h
new file mode 100644
index 000000000..9a93b5b7d
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_ATTRIBUTE_HELPERS_H
+#define MIR_ONNX_ATTRIBUTE_HELPERS_H
+
+#include "onnx/onnx.pb.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace mir_onnx
+{
+
+template <typename T> T getAttributeValue(const onnx::AttributeProto &attribute) = delete;
+
+template <> inline float getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::FLOAT);
+ return attribute.f();
+}
+
+template <> inline std::int64_t getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INT);
+ return attribute.i();
+}
+
+template <> inline std::string getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::STRING);
+ return attribute.s();
+}
+
+template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::TENSOR);
+ return attribute.t();
+}
+
+template <>
+inline std::vector<std::int32_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INTS);
+ // TODO Check that values fit.
+ return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+template <>
+inline std::vector<std::int64_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INTS);
+ return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node,
+ const std::string &name)
+{
+ const auto &attributes = node.attribute();
+ const auto it = std::find_if(
+ attributes.cbegin(), attributes.cend(),
+ [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
+ if (it == attributes.cend())
+ return nullptr;
+ return &*it;
+}
+
+template <typename T> T getAttributeValue(const onnx::NodeProto &node, const std::string &name)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'.");
+ return getAttributeValue<T>(*attribute);
+}
+
+template <typename T>
+T getAttributeValue(const onnx::NodeProto &node, const std::string &name, T default_value)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ return default_value;
+ return getAttributeValue<T>(*attribute);
+}
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_ATTRIBUTE_HELPERS_H
diff --git a/compiler/mir/src/mir_onnx_importer/CMakeLists.txt b/compiler/mir/src/mir_onnx_importer/CMakeLists.txt
new file mode 100644
index 000000000..e6eb13b93
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/CMakeLists.txt
@@ -0,0 +1,119 @@
+nnas_find_package(ONNXSource EXACT 1.6.0 QUIET)
+nnas_find_package(Protobuf QUIET)
+
+if (NOT ONNXSource_FOUND)
+ return()
+endif ()
+
+if (NOT Protobuf_FOUND)
+ return()
+endif ()
+
+Protobuf_Generate(MIR_ONNX_PROTO
+ ${CMAKE_CURRENT_BINARY_DIR}/generated
+ ${ONNXSource_DIR}
+ onnx/onnx.proto)
+
+add_library(mir_onnx_proto STATIC ${MIR_ONNX_PROTO_SOURCES})
+set_target_properties(mir_onnx_proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_onnx_proto PUBLIC ${MIR_ONNX_PROTO_INCLUDE_DIRS})
+target_link_libraries(mir_onnx_proto PUBLIC libprotobuf)
+
+set(MIR_ONNX_IMPORTER_SOURCES
+ AttributeHelpers.h
+ ConvPoolHelpers.cpp
+ ConvPoolHelpers.h
+ ONNXHelpers.cpp
+ ONNXHelpers.h
+ ONNXImporterImpl.cpp
+ ONNXNodeConverterRegistry.h
+ ONNXNodeConverterRegistry.cpp
+ ONNXOpRegistration.h
+ Op/Abs.cpp
+ Op/Abs.h
+ Op/Add.cpp
+ Op/Add.h
+ Op/AveragePool.cpp
+ Op/AveragePool.h
+ Op/BatchNormalization.cpp
+ Op/BatchNormalization.h
+ Op/Concat.cpp
+ Op/Concat.h
+ Op/Constant.cpp
+ Op/Constant.h
+ Op/Conv.cpp
+ Op/Conv.h
+ Op/ConvTranspose.cpp
+ Op/ConvTranspose.h
+ Op/Div.cpp
+ Op/Div.h
+ Op/Dropout.cpp
+ Op/Dropout.h
+ Op/Equal.cpp
+ Op/Equal.h
+ Op/Expand.cpp
+ Op/Expand.h
+ Op/Flatten.cpp
+ Op/Flatten.h
+ Op/Gather.cpp
+ Op/Gather.h
+ Op/Greater.cpp
+ Op/Greater.h
+ Op/Gemm.cpp
+ Op/Gemm.h
+ Op/Identity.cpp
+ Op/Identity.h
+ Op/Less.cpp
+ Op/Less.h
+ Op/MatMul.cpp
+ Op/MatMul.h
+ Op/GlobalAveragePool.cpp
+ Op/GlobalAveragePool.h
+ Op/Max.cpp
+ Op/Max.h
+ Op/MaxPool.cpp
+ Op/MaxPool.h
+ Op/Mul.cpp
+ Op/Mul.h
+ Op/Pad.cpp
+ Op/Pad.h
+ Op/Reciprocal.cpp
+ Op/Reciprocal.h
+ Op/ReduceMean.cpp
+ Op/ReduceMean.h
+ Op/Relu.cpp
+ Op/Relu.h
+ Op/Reshape.cpp
+ Op/Reshape.h
+ Op/Shape.cpp
+ Op/Shape.h
+ Op/Sigmoid.cpp
+ Op/Sigmoid.h
+ Op/Softmax.cpp
+ Op/Softmax.h
+ Op/Sqrt.cpp
+ Op/Sqrt.h
+ Op/Sub.cpp
+ Op/Sub.h
+ Op/Sum.cpp
+ Op/Sum.h
+ Op/Tanh.cpp
+ Op/Tanh.h
+ Op/Transpose.cpp
+ Op/Transpose.h
+ Op/Unsqueeze.cpp
+ Op/Unsqueeze.h
+ Op/Upsample.cpp
+ Op/Upsample.h)
+
+add_library(mir_onnx_importer STATIC ${MIR_ONNX_IMPORTER_SOURCES})
+set_target_properties(mir_onnx_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_onnx_importer PUBLIC ../../include/mir_onnx_importer)
+target_include_directories(mir_onnx_importer PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
+target_link_libraries(mir_onnx_importer PUBLIC mir mir_onnx_proto PRIVATE mir_interpreter nncc_common)
+
+nnas_find_package(GTest REQUIRED)
+
+file(GLOB_RECURSE TEST_SOURCES "*.test.cpp")
+GTest_AddTest(mir_onnx_importer_test ${TEST_SOURCES})
+target_link_libraries(mir_onnx_importer_test mir_onnx_importer)
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.cpp b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp
index d98e6deae..d98e6deae 100644
--- a/compiler/mir-onnx-importer/ConvPoolHelpers.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.h b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.h
index 099392f4f..099392f4f 100644
--- a/compiler/mir-onnx-importer/ConvPoolHelpers.h
+++ b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.h
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.cpp b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp
index f3a9d182d..f3a9d182d 100644
--- a/compiler/mir-onnx-importer/ONNXHelpers.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.h
index 1367ab82a..1367ab82a 100644
--- a/compiler/mir-onnx-importer/ONNXHelpers.h
+++ b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.h
diff --git a/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp b/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp
new file mode 100644
index 000000000..8b996244f
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXImporterImpl.h"
+#include "ONNXHelpers.h"
+#include "ONNXOpRegistration.h"
+#include "onnx/onnx.pb.h"
+
+#include "mir/Shape.h"
+#include "mir/TensorUtil.h"
+
+#include "mir/ops/ConstantOp.h"
+
+#include <fcntl.h>
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/text_format.h>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <utility>
+
+namespace mir_onnx
+{
+
+namespace
+{
+
+class ONNXImporterImpl final
+{
+public:
+ ONNXImporterImpl();
+ ~ONNXImporterImpl();
+ /// @brief Load the model and convert it into a MIR Graph.
+ std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
+ std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
+
+private:
+ std::unique_ptr<mir::Graph> createIR();
+ void createGraphInputs();
+ void collectUnsupportedOps();
+ std::unique_ptr<onnx::ModelProto> _model;
+ std::unique_ptr<ConverterContext> _converterCtx;
+ std::unique_ptr<ModelContext> _modelCtx;
+ std::unique_ptr<mir::Graph> _graph;
+};
+
+ONNXImporterImpl::ONNXImporterImpl() { registerSupportedOps(); }
+
+ONNXImporterImpl::~ONNXImporterImpl() = default;
+
+void loadModelFromBinaryFile(const std::string &filename, onnx::ModelProto *model)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ google::protobuf::io::CodedInputStream coded_stream(&file_stream);
+ coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+
+ if (!model->ParseFromCodedStream(&coded_stream))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+
+ // If the file has not been consumed entirely, assume that the file is in the wrong format.
+ if (!coded_stream.ConsumedEntireMessage())
+ throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
+}
+
+void loadModelFromTextFile(const std::string &filename, onnx::ModelProto *model)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ if (!google::protobuf::TextFormat::Parse(&file_stream, model))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromBinaryFile(const std::string &filename)
+{
+ _model = std::make_unique<onnx::ModelProto>();
+ loadModelFromBinaryFile(filename, _model.get());
+ _modelCtx = std::make_unique<ModelContext>(_model.get());
+ collectUnsupportedOps();
+ return createIR();
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromTextFile(const std::string &filename)
+{
+ _model = std::make_unique<onnx::ModelProto>();
+ loadModelFromTextFile(filename, _model.get());
+ _modelCtx = std::make_unique<ModelContext>(_model.get());
+ collectUnsupportedOps();
+ return createIR();
+}
+
+void ONNXImporterImpl::collectUnsupportedOps()
+{
+ std::set<std::pair<std::string, int64_t>> problems_op_set;
+
+ for (int i = 0; i < _model->graph().node_size(); i++)
+ {
+ const auto &onnx_node = _model->graph().node(i);
+ assert(onnx_node.has_op_type());
+ const auto &op_type = onnx_node.op_type();
+ auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
+
+ NodeConverterRegistry::ConverterFunc converter =
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
+
+ if (converter == nullptr)
+ problems_op_set.emplace(op_type, opset);
+ }
+ if (!problems_op_set.empty())
+ {
+ std::cerr << "The following operators are not supported:\n";
+ for (const auto &op : problems_op_set)
+ std::cerr << op.first << " opset " << op.second << std::endl;
+ throw std::runtime_error("Unsupported operators found");
+ }
+}
+
+void ONNXImporterImpl::createGraphInputs()
+{
+ const auto &graph = _model->graph();
+ const auto &initializer = graph.initializer();
+
+ // Create all initializer Tensors
+ for (const auto &tensor : initializer)
+ {
+ const auto mir_tensor = createTensor(&tensor);
+ auto *op = _graph->create<mir::ops::ConstantOp>(mir_tensor);
+ _converterCtx->setOutput(tensor.name(), op->getOutput(0));
+ }
+
+ for (const auto &input : graph.input())
+ {
+ assert(input.has_name());
+
+ if (_converterCtx->getOutput(input.name()) == nullptr)
+ {
+ const auto &onnx_input_shape = input.type().tensor_type().shape();
+ mir::Shape shape(onnx_input_shape.dim_size());
+ for (int i = 0; i < onnx_input_shape.dim_size(); i++)
+ {
+ assert(onnx_input_shape.dim(i).has_dim_value());
+ shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
+ }
+
+ auto elem_type = onnxDataTypeToMirDataType(
+ (onnx::TensorProto_DataType)input.type().tensor_type().elem_type());
+ mir::TensorType type{elem_type, shape};
+ auto *op = _graph->create<mir::ops::InputOp>(type);
+ _converterCtx->setOutput(input.name(), op->getOutput(0));
+ }
+ }
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::createIR()
+{
+ _graph = std::make_unique<mir::Graph>();
+ _converterCtx = std::make_unique<ConverterContext>(_graph.get());
+
+ createGraphInputs();
+
+ // Forming partially ordered computation graph
+ for (const auto &onnx_node : _model->graph().node())
+ {
+ assert(onnx_node.has_op_type());
+ auto &op_type = onnx_node.op_type();
+ auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
+ // Get converter
+ NodeConverterRegistry::ConverterFunc converter =
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
+ assert(converter != nullptr);
+ converter(onnx_node, _converterCtx.get());
+ }
+ // Set graph outputs
+ const auto &outputs = _model->graph().output();
+ for (const auto &output : outputs)
+ {
+ assert(output.has_name());
+ auto mir_output = _converterCtx->getOutput(output.name());
+ if (mir_output == nullptr)
+ throw std::runtime_error("Bad output name!");
+
+ _graph->create<mir::ops::OutputOp>(mir_output);
+ }
+
+ return std::move(_graph);
+}
+
+} // namespace
+
+std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
+{
+ ONNXImporterImpl importer;
+ return importer.importModelFromBinaryFile(filename);
+}
+
+std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename)
+{
+ ONNXImporterImpl importer;
+ return importer.importModelFromTextFile(filename);
+}
+
+std::unique_ptr<mir::Graph> loadModel(const std::string &filename)
+{
+ return importModelFromBinaryFile(filename);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp
index a11b18e89..a11b18e89 100644
--- a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.h
index ea712ad23..ea712ad23 100644
--- a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h
+++ b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.h
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.test.cpp
index dfc3e4216..dfc3e4216 100644
--- a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.test.cpp
diff --git a/compiler/mir-onnx-importer/ONNXOpRegistration.h b/compiler/mir/src/mir_onnx_importer/ONNXOpRegistration.h
index e3001b000..e3001b000 100644
--- a/compiler/mir-onnx-importer/ONNXOpRegistration.h
+++ b/compiler/mir/src/mir_onnx_importer/ONNXOpRegistration.h
diff --git a/compiler/mir-onnx-importer/Op/Abs.cpp b/compiler/mir/src/mir_onnx_importer/Op/Abs.cpp
index 350270cfd..350270cfd 100644
--- a/compiler/mir-onnx-importer/Op/Abs.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Abs.cpp
diff --git a/compiler/mir-onnx-importer/Op/Abs.h b/compiler/mir/src/mir_onnx_importer/Op/Abs.h
index 06fcd5f3c..06fcd5f3c 100644
--- a/compiler/mir-onnx-importer/Op/Abs.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Abs.h
diff --git a/compiler/mir-onnx-importer/Op/Add.cpp b/compiler/mir/src/mir_onnx_importer/Op/Add.cpp
index 8944b4e66..8944b4e66 100644
--- a/compiler/mir-onnx-importer/Op/Add.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Add.cpp
diff --git a/compiler/mir-onnx-importer/Op/Add.h b/compiler/mir/src/mir_onnx_importer/Op/Add.h
index a11aa6bb7..a11aa6bb7 100644
--- a/compiler/mir-onnx-importer/Op/Add.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Add.h
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp
index 503feffc8..503feffc8 100644
--- a/compiler/mir-onnx-importer/Op/AveragePool.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.h b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.h
index 54e406daf..54e406daf 100644
--- a/compiler/mir-onnx-importer/Op/AveragePool.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.h
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp
index 8a6d8cc51..8a6d8cc51 100644
--- a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.h b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.h
index 7c2e37a9c..7c2e37a9c 100644
--- a/compiler/mir-onnx-importer/Op/BatchNormalization.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.h
diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir/src/mir_onnx_importer/Op/Concat.cpp
index dbe752647..dbe752647 100644
--- a/compiler/mir-onnx-importer/Op/Concat.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Concat.cpp
diff --git a/compiler/mir-onnx-importer/Op/Concat.h b/compiler/mir/src/mir_onnx_importer/Op/Concat.h
index 430a2d9e4..430a2d9e4 100644
--- a/compiler/mir-onnx-importer/Op/Concat.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Concat.h
diff --git a/compiler/mir-onnx-importer/Op/Constant.cpp b/compiler/mir/src/mir_onnx_importer/Op/Constant.cpp
index 710760ed3..710760ed3 100644
--- a/compiler/mir-onnx-importer/Op/Constant.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Constant.cpp
diff --git a/compiler/mir-onnx-importer/Op/Constant.h b/compiler/mir/src/mir_onnx_importer/Op/Constant.h
index 2a4db0fb7..2a4db0fb7 100644
--- a/compiler/mir-onnx-importer/Op/Constant.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Constant.h
diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp
index 7dc6ce818..7dc6ce818 100644
--- a/compiler/mir-onnx-importer/Op/Conv.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp
diff --git a/compiler/mir-onnx-importer/Op/Conv.h b/compiler/mir/src/mir_onnx_importer/Op/Conv.h
index 2af2b8959..2af2b8959 100644
--- a/compiler/mir-onnx-importer/Op/Conv.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Conv.h
diff --git a/compiler/mir-onnx-importer/Op/ConvTranspose.cpp b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp
index 3078a1959..3078a1959 100644
--- a/compiler/mir-onnx-importer/Op/ConvTranspose.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp
diff --git a/compiler/mir-onnx-importer/Op/ConvTranspose.h b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.h
index d203dc6c1..d203dc6c1 100644
--- a/compiler/mir-onnx-importer/Op/ConvTranspose.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.h
diff --git a/compiler/mir-onnx-importer/Op/Div.cpp b/compiler/mir/src/mir_onnx_importer/Op/Div.cpp
index 40620169a..40620169a 100644
--- a/compiler/mir-onnx-importer/Op/Div.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Div.cpp
diff --git a/compiler/mir-onnx-importer/Op/Div.h b/compiler/mir/src/mir_onnx_importer/Op/Div.h
index cdc254fb8..cdc254fb8 100644
--- a/compiler/mir-onnx-importer/Op/Div.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Div.h
diff --git a/compiler/mir-onnx-importer/Op/Dropout.cpp b/compiler/mir/src/mir_onnx_importer/Op/Dropout.cpp
index ef6972784..ef6972784 100644
--- a/compiler/mir-onnx-importer/Op/Dropout.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Dropout.cpp
diff --git a/compiler/mir-onnx-importer/Op/Dropout.h b/compiler/mir/src/mir_onnx_importer/Op/Dropout.h
index 9a90ac79b..9a90ac79b 100644
--- a/compiler/mir-onnx-importer/Op/Dropout.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Dropout.h
diff --git a/compiler/mir-onnx-importer/Op/Equal.cpp b/compiler/mir/src/mir_onnx_importer/Op/Equal.cpp
index 242389eb5..242389eb5 100644
--- a/compiler/mir-onnx-importer/Op/Equal.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Equal.cpp
diff --git a/compiler/mir-onnx-importer/Op/Equal.h b/compiler/mir/src/mir_onnx_importer/Op/Equal.h
index 0672cd661..0672cd661 100644
--- a/compiler/mir-onnx-importer/Op/Equal.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Equal.h
diff --git a/compiler/mir-onnx-importer/Op/Expand.cpp b/compiler/mir/src/mir_onnx_importer/Op/Expand.cpp
index 40002dfa9..40002dfa9 100644
--- a/compiler/mir-onnx-importer/Op/Expand.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Expand.cpp
diff --git a/compiler/mir-onnx-importer/Op/Expand.h b/compiler/mir/src/mir_onnx_importer/Op/Expand.h
index 35f7af407..35f7af407 100644
--- a/compiler/mir-onnx-importer/Op/Expand.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Expand.h
diff --git a/compiler/mir-onnx-importer/Op/Flatten.cpp b/compiler/mir/src/mir_onnx_importer/Op/Flatten.cpp
index dfad6ddbf..dfad6ddbf 100644
--- a/compiler/mir-onnx-importer/Op/Flatten.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Flatten.cpp
diff --git a/compiler/mir-onnx-importer/Op/Flatten.h b/compiler/mir/src/mir_onnx_importer/Op/Flatten.h
index 174a8d906..174a8d906 100644
--- a/compiler/mir-onnx-importer/Op/Flatten.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Flatten.h
diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir/src/mir_onnx_importer/Op/Gather.cpp
index fa3746c67..fa3746c67 100644
--- a/compiler/mir-onnx-importer/Op/Gather.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Gather.cpp
diff --git a/compiler/mir-onnx-importer/Op/Gather.h b/compiler/mir/src/mir_onnx_importer/Op/Gather.h
index c4308d2be..c4308d2be 100644
--- a/compiler/mir-onnx-importer/Op/Gather.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Gather.h
diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir/src/mir_onnx_importer/Op/Gemm.cpp
index 1e0759dda..1e0759dda 100644
--- a/compiler/mir-onnx-importer/Op/Gemm.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Gemm.cpp
diff --git a/compiler/mir-onnx-importer/Op/Gemm.h b/compiler/mir/src/mir_onnx_importer/Op/Gemm.h
index d87a36e7b..d87a36e7b 100644
--- a/compiler/mir-onnx-importer/Op/Gemm.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Gemm.h
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp b/compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.cpp
index 379c8b596..379c8b596 100644
--- a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.cpp
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.h b/compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.h
index b2fb9b8c9..b2fb9b8c9 100644
--- a/compiler/mir-onnx-importer/Op/GlobalAveragePool.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/GlobalAveragePool.h
diff --git a/compiler/mir-onnx-importer/Op/Greater.cpp b/compiler/mir/src/mir_onnx_importer/Op/Greater.cpp
index deaf96d4b..deaf96d4b 100644
--- a/compiler/mir-onnx-importer/Op/Greater.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Greater.cpp
diff --git a/compiler/mir-onnx-importer/Op/Greater.h b/compiler/mir/src/mir_onnx_importer/Op/Greater.h
index 3b6a44f33..3b6a44f33 100644
--- a/compiler/mir-onnx-importer/Op/Greater.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Greater.h
diff --git a/compiler/mir-onnx-importer/Op/Identity.cpp b/compiler/mir/src/mir_onnx_importer/Op/Identity.cpp
index 6db70ffcd..6db70ffcd 100644
--- a/compiler/mir-onnx-importer/Op/Identity.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Identity.cpp
diff --git a/compiler/mir-onnx-importer/Op/Identity.h b/compiler/mir/src/mir_onnx_importer/Op/Identity.h
index ea63bab4a..ea63bab4a 100644
--- a/compiler/mir-onnx-importer/Op/Identity.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Identity.h
diff --git a/compiler/mir-onnx-importer/Op/Less.cpp b/compiler/mir/src/mir_onnx_importer/Op/Less.cpp
index 44f5d8cf4..44f5d8cf4 100644
--- a/compiler/mir-onnx-importer/Op/Less.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Less.cpp
diff --git a/compiler/mir-onnx-importer/Op/Less.h b/compiler/mir/src/mir_onnx_importer/Op/Less.h
index 682c08725..682c08725 100644
--- a/compiler/mir-onnx-importer/Op/Less.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Less.h
diff --git a/compiler/mir-onnx-importer/Op/MatMul.cpp b/compiler/mir/src/mir_onnx_importer/Op/MatMul.cpp
index 6d8ea6b83..6d8ea6b83 100644
--- a/compiler/mir-onnx-importer/Op/MatMul.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/MatMul.cpp
diff --git a/compiler/mir-onnx-importer/Op/MatMul.h b/compiler/mir/src/mir_onnx_importer/Op/MatMul.h
index 97e641ebb..97e641ebb 100644
--- a/compiler/mir-onnx-importer/Op/MatMul.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/MatMul.h
diff --git a/compiler/mir-onnx-importer/Op/Max.cpp b/compiler/mir/src/mir_onnx_importer/Op/Max.cpp
index d4c7d1775..d4c7d1775 100644
--- a/compiler/mir-onnx-importer/Op/Max.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Max.cpp
diff --git a/compiler/mir-onnx-importer/Op/Max.h b/compiler/mir/src/mir_onnx_importer/Op/Max.h
index 1f2754b62..1f2754b62 100644
--- a/compiler/mir-onnx-importer/Op/Max.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Max.h
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp
index 53e6e1556..53e6e1556 100644
--- a/compiler/mir-onnx-importer/Op/MaxPool.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.h b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.h
index 85bd9cf1a..85bd9cf1a 100644
--- a/compiler/mir-onnx-importer/Op/MaxPool.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.h
diff --git a/compiler/mir-onnx-importer/Op/Mul.cpp b/compiler/mir/src/mir_onnx_importer/Op/Mul.cpp
index dbfdd4950..dbfdd4950 100644
--- a/compiler/mir-onnx-importer/Op/Mul.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Mul.cpp
diff --git a/compiler/mir-onnx-importer/Op/Mul.h b/compiler/mir/src/mir_onnx_importer/Op/Mul.h
index 58738c81d..58738c81d 100644
--- a/compiler/mir-onnx-importer/Op/Mul.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Mul.h
diff --git a/compiler/mir/src/mir_onnx_importer/Op/Pad.cpp b/compiler/mir/src/mir_onnx_importer/Op/Pad.cpp
new file mode 100644
index 000000000..08d7c4ab5
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/Op/Pad.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pad.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/PadOp.h"
+
+namespace mir_onnx
+{
+
+void convertPadAttrName(const std::string &pad_attr_name, const onnx::NodeProto &onnx_node,
+ ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ // 0.0f is the default value to be filled into padded cells.
+ const auto value = getAttributeValue<float>(onnx_node, "value", 0.0f);
+ const auto pads = getAttributeValue<std::vector<std::int64_t>>(onnx_node, pad_attr_name);
+ // "constant" is the default mode.
+ const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "constant");
+ if (mode != "constant")
+ throw std::runtime_error("Not supported Pad mode attribute!");
+
+ const int num_dims = input->getShape().rank();
+ assert(static_cast<int>(pads.size()) == num_dims * 2);
+ mir::PadOpAttributes attributes(num_dims);
+ for (int i = 0; i < num_dims; i++)
+ {
+ attributes.padding_before[i] = pads[i];
+ attributes.padding_after[i] = pads[num_dims + i];
+ }
+
+ attributes.padding_value = value;
+
+ auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertPadV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertPadAttrName("paddings", onnx_node, context);
+}
+
+void convertPadV2(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertPadAttrName("pads", onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Pad.h b/compiler/mir/src/mir_onnx_importer/Op/Pad.h
index a0731ae4c..a0731ae4c 100644
--- a/compiler/mir-onnx-importer/Op/Pad.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Pad.h
diff --git a/compiler/mir-onnx-importer/Op/Reciprocal.cpp b/compiler/mir/src/mir_onnx_importer/Op/Reciprocal.cpp
index b063d4b8c..b063d4b8c 100644
--- a/compiler/mir-onnx-importer/Op/Reciprocal.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Reciprocal.cpp
diff --git a/compiler/mir-onnx-importer/Op/Reciprocal.h b/compiler/mir/src/mir_onnx_importer/Op/Reciprocal.h
index 747623ab5..747623ab5 100644
--- a/compiler/mir-onnx-importer/Op/Reciprocal.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Reciprocal.h
diff --git a/compiler/mir-onnx-importer/Op/ReduceMean.cpp b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp
index ec43bffb4..ec43bffb4 100644
--- a/compiler/mir-onnx-importer/Op/ReduceMean.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp
diff --git a/compiler/mir-onnx-importer/Op/ReduceMean.h b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.h
index 3553c96b5..3553c96b5 100644
--- a/compiler/mir-onnx-importer/Op/ReduceMean.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.h
diff --git a/compiler/mir-onnx-importer/Op/Relu.cpp b/compiler/mir/src/mir_onnx_importer/Op/Relu.cpp
index 72424e847..72424e847 100644
--- a/compiler/mir-onnx-importer/Op/Relu.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Relu.cpp
diff --git a/compiler/mir-onnx-importer/Op/Relu.h b/compiler/mir/src/mir_onnx_importer/Op/Relu.h
index 7159f0add..7159f0add 100644
--- a/compiler/mir-onnx-importer/Op/Relu.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Relu.h
diff --git a/compiler/mir-onnx-importer/Op/Reshape.cpp b/compiler/mir/src/mir_onnx_importer/Op/Reshape.cpp
index 5cd4985e2..5cd4985e2 100644
--- a/compiler/mir-onnx-importer/Op/Reshape.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Reshape.cpp
diff --git a/compiler/mir-onnx-importer/Op/Reshape.h b/compiler/mir/src/mir_onnx_importer/Op/Reshape.h
index 4ebbcb7a7..4ebbcb7a7 100644
--- a/compiler/mir-onnx-importer/Op/Reshape.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Reshape.h
diff --git a/compiler/mir-onnx-importer/Op/Shape.cpp b/compiler/mir/src/mir_onnx_importer/Op/Shape.cpp
index 8cc250b6e..8cc250b6e 100644
--- a/compiler/mir-onnx-importer/Op/Shape.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Shape.cpp
diff --git a/compiler/mir-onnx-importer/Op/Shape.h b/compiler/mir/src/mir_onnx_importer/Op/Shape.h
index e427d0330..e427d0330 100644
--- a/compiler/mir-onnx-importer/Op/Shape.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Shape.h
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.cpp b/compiler/mir/src/mir_onnx_importer/Op/Sigmoid.cpp
index 3db547186..3db547186 100644
--- a/compiler/mir-onnx-importer/Op/Sigmoid.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sigmoid.cpp
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.h b/compiler/mir/src/mir_onnx_importer/Op/Sigmoid.h
index e2d85298f..e2d85298f 100644
--- a/compiler/mir-onnx-importer/Op/Sigmoid.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sigmoid.h
diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir/src/mir_onnx_importer/Op/Softmax.cpp
index 1a2ca04ae..1a2ca04ae 100644
--- a/compiler/mir-onnx-importer/Op/Softmax.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Softmax.cpp
diff --git a/compiler/mir-onnx-importer/Op/Softmax.h b/compiler/mir/src/mir_onnx_importer/Op/Softmax.h
index 23d14c123..23d14c123 100644
--- a/compiler/mir-onnx-importer/Op/Softmax.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Softmax.h
diff --git a/compiler/mir-onnx-importer/Op/Sqrt.cpp b/compiler/mir/src/mir_onnx_importer/Op/Sqrt.cpp
index 70ef252fe..70ef252fe 100644
--- a/compiler/mir-onnx-importer/Op/Sqrt.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sqrt.cpp
diff --git a/compiler/mir-onnx-importer/Op/Sqrt.h b/compiler/mir/src/mir_onnx_importer/Op/Sqrt.h
index 51815c93c..51815c93c 100644
--- a/compiler/mir-onnx-importer/Op/Sqrt.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sqrt.h
diff --git a/compiler/mir-onnx-importer/Op/Sub.cpp b/compiler/mir/src/mir_onnx_importer/Op/Sub.cpp
index 0c3251909..0c3251909 100644
--- a/compiler/mir-onnx-importer/Op/Sub.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sub.cpp
diff --git a/compiler/mir-onnx-importer/Op/Sub.h b/compiler/mir/src/mir_onnx_importer/Op/Sub.h
index b521e71ae..b521e71ae 100644
--- a/compiler/mir-onnx-importer/Op/Sub.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sub.h
diff --git a/compiler/mir-onnx-importer/Op/Sum.cpp b/compiler/mir/src/mir_onnx_importer/Op/Sum.cpp
index c3a8dacca..c3a8dacca 100644
--- a/compiler/mir-onnx-importer/Op/Sum.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sum.cpp
diff --git a/compiler/mir-onnx-importer/Op/Sum.h b/compiler/mir/src/mir_onnx_importer/Op/Sum.h
index 74ceb6dd7..74ceb6dd7 100644
--- a/compiler/mir-onnx-importer/Op/Sum.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Sum.h
diff --git a/compiler/mir-onnx-importer/Op/Tanh.cpp b/compiler/mir/src/mir_onnx_importer/Op/Tanh.cpp
index c7faf157c..c7faf157c 100644
--- a/compiler/mir-onnx-importer/Op/Tanh.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Tanh.cpp
diff --git a/compiler/mir-onnx-importer/Op/Tanh.h b/compiler/mir/src/mir_onnx_importer/Op/Tanh.h
index 5d3199541..5d3199541 100644
--- a/compiler/mir-onnx-importer/Op/Tanh.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Tanh.h
diff --git a/compiler/mir/src/mir_onnx_importer/Op/Transpose.cpp b/compiler/mir/src/mir_onnx_importer/Op/Transpose.cpp
new file mode 100644
index 000000000..ffe0e8471
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/Op/Transpose.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Transpose.h"
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/TransposeOp.h"
+
+#include <numeric>
+
+namespace mir_onnx
+{
+
+void convertTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const int num_axes = input->getShape().rank();
+ std::vector<std::size_t> axis_order(num_axes);
+ const auto *perm_attr = findAttribute(onnx_node, "perm");
+
+ if (perm_attr == nullptr)
+ {
+ // Reverse the dimensions.
+ std::iota(axis_order.rbegin(), axis_order.rend(), 0);
+ }
+ else
+ {
+ const auto perm = getAttributeValue<std::vector<std::int64_t>>(*perm_attr);
+ assert(static_cast<int>(perm.size()) == num_axes);
+ std::copy(perm.cbegin(), perm.cend(), axis_order.begin());
+ }
+
+ auto result = createOp<mir::ops::TransposeOp>(graph, input, axis_order)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Transpose.h b/compiler/mir/src/mir_onnx_importer/Op/Transpose.h
index 1f8c4369a..1f8c4369a 100644
--- a/compiler/mir-onnx-importer/Op/Transpose.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Transpose.h
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp b/compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.cpp
index 1b5995532..1b5995532 100644
--- a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.cpp
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.h b/compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.h
index 46fea97ee..46fea97ee 100644
--- a/compiler/mir-onnx-importer/Op/Unsqueeze.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Unsqueeze.h
diff --git a/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp b/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp
new file mode 100644
index 000000000..346e22cc2
--- /dev/null
+++ b/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Upsample.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/Tensor.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ResizeOp.h"
+
+#include <stdexcept>
+
+namespace mir_onnx
+{
+
+void convertUpsampleV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ const float h_scale = getAttributeValue<float>(onnx_node, "height_scale", 0.0f); // required
+ const float w_scale = getAttributeValue<float>(onnx_node, "width_scale", 0.0f); // required
+ if (h_scale < 1.0f || w_scale < 1.0f)
+ throw std::runtime_error("Wrong scale attributes!");
+
+ assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ // NCHW
+ scales_vector.at(0) = 1.0f;
+ scales_vector.at(1) = 1.0f;
+ scales_vector.at(2) = h_scale;
+ scales_vector.at(3) = w_scale;
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ const auto *scales_attr = findAttribute(onnx_node, "scales");
+ if (!scales_attr)
+ throw std::runtime_error("Not enough required scales attribute!");
+
+ if (scales_attr->floats_size() != inputs[0]->getShape().rank())
+ throw std::runtime_error(
+ "Number of elements of scales should be the same as the rank of input");
+
+ assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ // NCHW
+ scales_vector.at(0) = scales_attr->floats(0);
+ scales_vector.at(1) = scales_attr->floats(1);
+ scales_vector.at(2) = scales_attr->floats(2);
+ scales_vector.at(3) = scales_attr->floats(3);
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertUpsampleV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ if (mode != "nearest")
+ throw std::runtime_error("Upsample: only 'nearest' mode is supported.");
+
+ // relies on attributes being lifted to constants (ONNX optimization pass)
+ assert(inputs.size() > 1);
+ auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(scales && "Weights could be a constant tensor only");
+ auto scales_tensor = mir::Tensor<float>(scales->getValue());
+ int rank = inputs[0]->getShape().rank();
+ if (rank != 4)
+ throw std::runtime_error("Upsample: only 4-D input is supported.");
+ assert(scales_tensor.getShape().numElements() == rank &&
+ "The number of elements of 'scales' should be the same as the rank of input 'X'");
+ std::vector<float> scales_vector(rank);
+ for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
+ scales_vector[i] = scales_tensor.atOffset(i);
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Upsample.h b/compiler/mir/src/mir_onnx_importer/Op/Upsample.h
index 99600eede..99600eede 100644
--- a/compiler/mir-onnx-importer/Op/Upsample.h
+++ b/compiler/mir/src/mir_onnx_importer/Op/Upsample.h
diff --git a/compiler/mir/src/mir_tflite_importer/CMakeLists.txt b/compiler/mir/src/mir_tflite_importer/CMakeLists.txt
new file mode 100644
index 000000000..952857c86
--- /dev/null
+++ b/compiler/mir/src/mir_tflite_importer/CMakeLists.txt
@@ -0,0 +1,21 @@
+nnas_find_package(FlatBuffers REQUIRED)
+
+if (NOT FlatBuffers_FOUND)
+ return()
+endif ()
+
+FlatBuffers_Target(mir_tflite_schema
+ OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated/schema"
+ SCHEMA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/schema"
+ SCHEMA_FILES schema.fbs)
+
+
+set(MIR_TFLITE_IMPORTER_SOURCES
+ tflite_importer.cpp
+ tflite_op_creator.cpp
+ tflite_op_creator.h)
+
+add_library(mir_tflite_importer STATIC ${MIR_TFLITE_IMPORTER_SOURCES})
+set_target_properties(mir_tflite_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_tflite_importer PUBLIC ../../include/mir_tflite_importer)
+target_link_libraries(mir_tflite_importer PUBLIC mir PRIVATE mir_tflite_schema nncc_common)
diff --git a/compiler/mir-tflite-importer/schema/schema.fbs b/compiler/mir/src/mir_tflite_importer/schema/schema.fbs
index dc7aab128..dc7aab128 100644
--- a/compiler/mir-tflite-importer/schema/schema.fbs
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema.fbs
diff --git a/compiler/mir-tflite-importer/schema/schema.meta b/compiler/mir/src/mir_tflite_importer/schema/schema.meta
index c86134c5a..c86134c5a 100644
--- a/compiler/mir-tflite-importer/schema/schema.meta
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema.meta
diff --git a/compiler/mir-tflite-importer/schema/schema_v0.fbs b/compiler/mir/src/mir_tflite_importer/schema/schema_v0.fbs
index 852ea988f..852ea988f 100644
--- a/compiler/mir-tflite-importer/schema/schema_v0.fbs
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v0.fbs
diff --git a/compiler/mir-tflite-importer/schema/schema_v0.meta b/compiler/mir/src/mir_tflite_importer/schema/schema_v0.meta
index 74668ab7a..74668ab7a 100644
--- a/compiler/mir-tflite-importer/schema/schema_v0.meta
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v0.meta
diff --git a/compiler/mir-tflite-importer/schema/schema_v1.fbs b/compiler/mir/src/mir_tflite_importer/schema/schema_v1.fbs
index 06cd9408e..06cd9408e 100644
--- a/compiler/mir-tflite-importer/schema/schema_v1.fbs
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v1.fbs
diff --git a/compiler/mir-tflite-importer/schema/schema_v1.meta b/compiler/mir/src/mir_tflite_importer/schema/schema_v1.meta
index 74668ab7a..74668ab7a 100644
--- a/compiler/mir-tflite-importer/schema/schema_v1.meta
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v1.meta
diff --git a/compiler/mir-tflite-importer/schema/schema_v2.fbs b/compiler/mir/src/mir_tflite_importer/schema/schema_v2.fbs
index 96731c8aa..96731c8aa 100644
--- a/compiler/mir-tflite-importer/schema/schema_v2.fbs
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v2.fbs
diff --git a/compiler/mir-tflite-importer/schema/schema_v2.meta b/compiler/mir/src/mir_tflite_importer/schema/schema_v2.meta
index 74668ab7a..74668ab7a 100644
--- a/compiler/mir-tflite-importer/schema/schema_v2.meta
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v2.meta
diff --git a/compiler/mir-tflite-importer/schema/schema_v3.fbs b/compiler/mir/src/mir_tflite_importer/schema/schema_v3.fbs
index cedefe08f..cedefe08f 100644
--- a/compiler/mir-tflite-importer/schema/schema_v3.fbs
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v3.fbs
diff --git a/compiler/mir-tflite-importer/schema/schema_v3.meta b/compiler/mir/src/mir_tflite_importer/schema/schema_v3.meta
index 74668ab7a..74668ab7a 100644
--- a/compiler/mir-tflite-importer/schema/schema_v3.meta
+++ b/compiler/mir/src/mir_tflite_importer/schema/schema_v3.meta
diff --git a/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp b/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp
new file mode 100644
index 000000000..3f245d2d4
--- /dev/null
+++ b/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite_importer.h"
+#include "tflite_op_creator.h"
+#include "schema_generated.h"
+
+#include "mir/TensorVariant.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/OutputOp.h"
+
+#include <fstream>
+#include <memory>
+#include <utility>
+#include <vector>
+#include <set>
+
+namespace mir_tflite
+{
+
+namespace
+{
+
+class TfliteImporter
+{
+public:
+ explicit TfliteImporter(std::string filename);
+
+ /// @brief Load the model and convert it into a MIR Graph.
+ std::unique_ptr<mir::Graph> importModel();
+
+ ~TfliteImporter();
+
+private:
+ std::string _filename;
+ std::unique_ptr<tflite::ModelT> _model;
+
+ std::unique_ptr<mir::Graph> _graph;
+ std::unique_ptr<TFLiteOpCreator> _opCreator;
+
+ // Maps TFLite tensors indices to corresponding MIR operation outputs.
+ std::vector<mir::Operation::Output *> _tensorMap;
+
+ void import();
+
+ void walkModel(const tflite::ModelT *model);
+
+ void walkSubgraph(const tflite::SubGraphT *subgraph);
+
+ void walkOperator(const tflite::SubGraphT *subgraph, const tflite::OperatorT *op);
+
+ /**
+ * @brief Pass through tflite graph and collect operators unsupported by NNC
+ * @throw PassException with message, containing detected problems
+ */
+ void collectUnsupportedOps();
+
+ /**
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
+ */
+ std::vector<mir::Operation::Output *> getMIRInputsForOperator(const tflite::SubGraphT *subgraph,
+ const tflite::OperatorT *op);
+};
+
+TfliteImporter::TfliteImporter(std::string filename) : _filename(std::move(filename))
+{
+ _graph = std::make_unique<mir::Graph>();
+ _opCreator = std::make_unique<TFLiteOpCreator>(_graph.get());
+}
+
+TfliteImporter::~TfliteImporter() = default;
+
+void TfliteImporter::import()
+{
+ std::ifstream stream(_filename, std::ios::in | std::ios::binary);
+ if (stream.fail())
+ throw std::runtime_error("Couldn't open file \"" + _filename + "\".");
+
+ std::vector<char> model_buffer((std::istreambuf_iterator<char>(stream)),
+ std::istreambuf_iterator<char>());
+
+ if (stream.fail())
+ throw std::runtime_error("Couldn't read file \"" + _filename + "\".");
+
+ flatbuffers::Verifier verifier(reinterpret_cast<const std::uint8_t *>(model_buffer.data()),
+ model_buffer.size());
+
+ if (!tflite::VerifyModelBuffer(verifier))
+ throw std::runtime_error("Could not load model: " + _filename + "\n");
+
+ _model = tflite::UnPackModel(model_buffer.data());
+}
+
+static const std::set<tflite::BuiltinOperator> supportedOperators = {
+ tflite::BuiltinOperator_ADD,
+ tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ tflite::BuiltinOperator_CONCATENATION,
+ tflite::BuiltinOperator_CONV_2D,
+ tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+ tflite::BuiltinOperator_DIV,
+ tflite::BuiltinOperator_FULLY_CONNECTED,
+ tflite::BuiltinOperator_HARD_SWISH,
+ tflite::BuiltinOperator_LEAKY_RELU,
+ tflite::BuiltinOperator_LOGISTIC,
+ tflite::BuiltinOperator_MAX_POOL_2D,
+ tflite::BuiltinOperator_MAXIMUM,
+ tflite::BuiltinOperator_MEAN,
+ tflite::BuiltinOperator_MUL,
+ tflite::BuiltinOperator_PAD,
+ tflite::BuiltinOperator_RELU,
+ tflite::BuiltinOperator_RELU6,
+ tflite::BuiltinOperator_RESHAPE,
+ tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+ tflite::BuiltinOperator_RSQRT,
+ tflite::BuiltinOperator_SHAPE,
+ tflite::BuiltinOperator_SLICE,
+ tflite::BuiltinOperator_SOFTMAX,
+ tflite::BuiltinOperator_SQRT,
+ tflite::BuiltinOperator_SQUARED_DIFFERENCE,
+ tflite::BuiltinOperator_SQUEEZE,
+ tflite::BuiltinOperator_STRIDED_SLICE,
+ tflite::BuiltinOperator_SUB,
+ tflite::BuiltinOperator_TANH,
+ tflite::BuiltinOperator_TRANSPOSE,
+ tflite::BuiltinOperator_TRANSPOSE_CONV,
+};
+
+void TfliteImporter::collectUnsupportedOps()
+{
+ std::set<std::string> errors;
+ for (const auto &subgraph : _model->subgraphs)
+ for (const auto &op : subgraph->operators)
+ {
+ tflite::BuiltinOperator opcode = _model->operator_codes[op->opcode_index]->builtin_code;
+ if (supportedOperators.find(opcode) == supportedOperators.end())
+ {
+ if (opcode <= tflite::BuiltinOperator_MAX)
+ errors.insert(std::string(EnumNameBuiltinOperator(opcode)) + ": unsupported operator");
+ else
+ errors.insert(std::to_string(opcode) + ": unsuppored in tflite custom opcode");
+ }
+ }
+
+ if (!errors.empty())
+ {
+ std::string msg("NNC can't load model. Detected problems:");
+ for (const auto &e : errors)
+ msg.append("\n * " + e);
+ throw std::runtime_error(msg);
+ }
+}
+
+std::unique_ptr<mir::Graph> TfliteImporter::importModel()
+{
+ import();
+ collectUnsupportedOps();
+ walkModel(_model.get());
+ return std::move(_graph);
+}
+
+void TfliteImporter::walkModel(const tflite::ModelT *model)
+{
+ for (const auto &subgraph : model->subgraphs)
+ walkSubgraph(subgraph.get());
+}
+
+mir::DataType convertElementType(tflite::TensorType type)
+{
+ switch (type)
+ {
+ case tflite::TensorType_INT32:
+ return mir::DataType::INT32;
+ case tflite::TensorType_FLOAT32:
+ return mir::DataType::FLOAT32;
+ case tflite::TensorType_INT64:
+ return mir::DataType::INT64;
+ case tflite::TensorType_UINT8:
+ return mir::DataType::UINT8;
+ default:
+ throw std::runtime_error(std::string("Unsupported tensor type: ") + EnumNameTensorType(type));
+ }
+}
+
+mir::TensorType getMirTensorType(const tflite::TensorT &tensor)
+{
+ mir::DataType element_type = convertElementType(tensor.type);
+
+ mir::Shape shape(tensor.shape.size());
+ for (std::size_t i = 0; i < tensor.shape.size(); ++i)
+ {
+ shape.dim(i) = tensor.shape[i];
+ }
+
+ if (tensor.quantization != nullptr)
+ {
+ const tflite::QuantizationParametersT &params = *tensor.quantization;
+
+ if (params.details.type != tflite::QuantizationDetails_NONE)
+ throw std::runtime_error("Custom quantization is not supported.");
+
+ // Empty parameters mean no quantization at all.
+ if (params.scale.empty() && params.zero_point.empty())
+ return mir::TensorType{element_type, shape};
+
+ if (params.scale.size() != 1 || params.zero_point.size() != 1)
+ throw std::runtime_error("Non-scalar quantization is not supported.");
+
+ mir::AffineQuantization quantization{params.scale[0], static_cast<int>(params.zero_point[0])};
+
+ return mir::TensorType{element_type, shape, quantization};
+ }
+ else
+ {
+ return mir::TensorType{element_type, shape};
+ }
+}
+
+void TfliteImporter::walkSubgraph(const tflite::SubGraphT *subgraph)
+{
+ _tensorMap.assign(subgraph->tensors.size(), nullptr);
+
+ for (const auto input_tensor_index : subgraph->inputs)
+ {
+ const tflite::TensorT &tensor = *subgraph->tensors[input_tensor_index];
+
+ mir::TensorType input_type = getMirTensorType(tensor);
+ auto input = _graph->create<mir::ops::InputOp>(input_type)->getOutput(0);
+ input->setName(tensor.name);
+
+ assert(_tensorMap[input_tensor_index] == nullptr);
+ _tensorMap[input_tensor_index] = input;
+ }
+
+ for (const auto &op : subgraph->operators)
+ {
+ walkOperator(subgraph, op.get());
+ }
+
+ for (const auto output_tensor_index : subgraph->outputs)
+ {
+ auto output = _tensorMap[output_tensor_index];
+ _graph->create<mir::ops::OutputOp>(output);
+ }
+}
+
+void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflite::OperatorT *op)
+{
+ std::vector<mir::Operation::Output *> inputs = getMIRInputsForOperator(subgraph, op);
+ std::vector<mir::Operation::Output *> outputs;
+
+ tflite::BuiltinOperator opcode = _model->operator_codes[op->opcode_index]->builtin_code;
+ switch (opcode)
+ {
+ case tflite::BuiltinOperator_CONV_2D:
+ outputs = _opCreator->convertConv2D(op->builtin_options.AsConv2DOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
+ outputs = _opCreator->convertDepthwiseConv2D(op->builtin_options.AsDepthwiseConv2DOptions(),
+ inputs);
+ break;
+ case tflite::BuiltinOperator_MAX_POOL_2D:
+ outputs = _opCreator->convertMaxPool2D(op->builtin_options.AsPool2DOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_AVERAGE_POOL_2D:
+ outputs = _opCreator->convertAveragePool2D(op->builtin_options.AsPool2DOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_CONCATENATION:
+ outputs =
+ _opCreator->convertConcatenation(op->builtin_options.AsConcatenationOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_RESHAPE:
+ outputs = _opCreator->convertReshape(op->builtin_options.AsReshapeOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
+ outputs = _opCreator->convertResizeNearestNeighbor(
+ op->builtin_options.AsResizeNearestNeighborOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_MEAN:
+ outputs = _opCreator->convertMean(op->builtin_options.AsReducerOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_FULLY_CONNECTED:
+ outputs =
+ _opCreator->convertFullyConnected(op->builtin_options.AsFullyConnectedOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_SOFTMAX:
+ outputs = _opCreator->convertSoftmax(op->builtin_options.AsSoftmaxOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_SLICE:
+ outputs = _opCreator->convertSlice(op->builtin_options.AsSliceOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_SQUEEZE:
+ outputs = _opCreator->convertSqueeze(op->builtin_options.AsSqueezeOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_LOGISTIC:
+ outputs = _opCreator->convertLogistic(inputs);
+ break;
+ case tflite::BuiltinOperator_RSQRT:
+ outputs = _opCreator->convertRsqrt(inputs);
+ break;
+ case tflite::BuiltinOperator_SQRT:
+ outputs = _opCreator->convertSqrt(inputs);
+ break;
+ case tflite::BuiltinOperator_ADD:
+ outputs = _opCreator->convertAdd(op->builtin_options.AsAddOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_SUB:
+ outputs = _opCreator->convertSub(op->builtin_options.AsSubOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_MUL:
+ outputs = _opCreator->convertMul(op->builtin_options.AsMulOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_DIV:
+ outputs = _opCreator->convertDiv(op->builtin_options.AsDivOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_MAXIMUM:
+ outputs = _opCreator->convertMax(inputs);
+ break;
+ case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
+ outputs = _opCreator->convertSquaredDifference(inputs);
+ break;
+ case tflite::BuiltinOperator_TRANSPOSE_CONV:
+ outputs =
+ _opCreator->convertTransposeConv(op->builtin_options.AsTransposeConvOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_PAD:
+ outputs = _opCreator->convertPad(op->builtin_options.AsPadOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_TANH:
+ outputs = _opCreator->convertTanh(inputs);
+ break;
+ case tflite::BuiltinOperator_RELU:
+ outputs = _opCreator->convertReLU(inputs);
+ break;
+ case tflite::BuiltinOperator_RELU6:
+ outputs = _opCreator->convertReLU6(inputs);
+ break;
+ case tflite::BuiltinOperator_TRANSPOSE:
+ outputs = _opCreator->convertTranspose(op->builtin_options.AsTransposeOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_STRIDED_SLICE:
+ outputs =
+ _opCreator->convertStridedSlice(op->builtin_options.AsStridedSliceOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_LEAKY_RELU:
+ outputs = _opCreator->convertLeakyReLU(op->builtin_options.AsLeakyReluOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_SHAPE:
+ outputs = _opCreator->convertShape(op->builtin_options.AsShapeOptions(), inputs);
+ break;
+ case tflite::BuiltinOperator_HARD_SWISH:
+ outputs = _opCreator->convertHardSwish(op->builtin_options.AsHardSwishOptions(), inputs);
+ break;
+ default:
+ assert(false && "All unsupported types should have been found before this pass.");
+ }
+
+ assert(outputs.size() == op->outputs.size());
+ for (std::size_t i = 0; i < op->outputs.size(); ++i)
+ {
+ const auto tensor_index = op->outputs[i];
+ const tflite::TensorT &tensor = *subgraph->tensors[tensor_index];
+
+ mir::TensorType output_type = getMirTensorType(tensor);
+
+ // The type should have been inferred correctly, except for quantization information.
+ assert(outputs[i]->getType().getElementType() == output_type.getElementType() &&
+ outputs[i]->getType().getShape() == output_type.getShape());
+
+ outputs[i]->setName(tensor.name);
+ outputs[i]->setType(output_type);
+
+ assert(_tensorMap[tensor_index] == nullptr);
+ _tensorMap[tensor_index] = outputs[i];
+ }
+}
+
+std::vector<mir::Operation::Output *>
+TfliteImporter::getMIRInputsForOperator(const tflite::SubGraphT *subgraph,
+ const tflite::OperatorT *op)
+{
+ std::vector<mir::Operation::Output *> inputs;
+
+ for (const auto tensor_index : op->inputs)
+ {
+ const tflite::TensorT &tensor = *subgraph->tensors[tensor_index];
+ const tflite::BufferT &buffer = *_model->buffers[tensor.buffer];
+ if (!buffer.data.empty())
+ {
+ assert(_tensorMap[tensor_index] == nullptr);
+ mir::TensorType type = getMirTensorType(tensor);
+ mir::TensorVariant mir_tensor{type, buffer.data.data()};
+ inputs.emplace_back(_graph->create<mir::ops::ConstantOp>(mir_tensor)->getOutput(0));
+ }
+ else
+ {
+ assert(_tensorMap[tensor_index] != nullptr);
+ // By this point every input for the operation "op" should have corresponding
+ // Model IR operations that output its inputs. This assumption is provided by the fact
+ // that TFLite format specifies all operations in the execution order.
+ inputs.emplace_back(_tensorMap[tensor_index]);
+ }
+ }
+
+ return inputs;
+}
+
+} // namespace
+
+std::unique_ptr<mir::Graph> loadModel(std::string filename)
+{
+ TfliteImporter importer(std::move(filename));
+ return importer.importModel();
+}
+
+} // namespace mir_tflite
diff --git a/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp
new file mode 100644
index 000000000..d9f98da55
--- /dev/null
+++ b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite_op_creator.h"
+#include "schema_generated.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/AvgPool2DOp.h"
+#include "mir/ops/CappedReluOp.h"
+#include "mir/ops/ConcatOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/Deconv2DOp.h"
+#include "mir/ops/DepthwiseConv2DOp.h"
+#include "mir/ops/DivOp.h"
+#include "mir/ops/FullyConnectedOp.h"
+#include "mir/ops/HardSwishOp.h"
+#include "mir/ops/LeakyReluOp.h"
+#include "mir/ops/MaxOp.h"
+#include "mir/ops/MaxPool2DOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/PadOp.h"
+#include "mir/ops/ReduceMeanOp.h"
+#include "mir/ops/ReluOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/ResizeOp.h"
+#include "mir/ops/SigmoidOp.h"
+#include "mir/ops/SliceOp.h"
+#include "mir/ops/SoftmaxOp.h"
+#include "mir/ops/SqrtOp.h"
+#include "mir/ops/SqueezeOp.h"
+#include "mir/ops/SubOp.h"
+#include "mir/ops/TanhOp.h"
+#include "mir/ops/TransposeOp.h"
+
+#include "mir/Shape.h"
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+
+#include <stdexcept>
+
+namespace mir_tflite
+{
+
+namespace ops = mir::ops;
+using mir::Shape;
+
+static mir::ops::PaddingType convertPadding(tflite::Padding padding)
+{
+ switch (padding)
+ {
+ case tflite::Padding_VALID:
+ return mir::ops::PaddingType::Valid;
+ case tflite::Padding_SAME:
+ return mir::ops::PaddingType::SameUpper;
+ default:
+ throw std::runtime_error(std::string("Unsupported Padding: ") +
+ tflite::EnumNamePadding(padding));
+ }
+}
+
+// TODO Move this to MIR?
+static void calculatePadding(mir::ops::PaddingType padding_type, const mir::Shape &input_shape,
+ const std::vector<std::int32_t> &window_size,
+ const std::vector<std::int32_t> &strides,
+ std::vector<std::int32_t> &padding_before,
+ std::vector<std::int32_t> &padding_after)
+{
+ constexpr int num_spatial_dims = 2;
+ assert(window_size.size() == num_spatial_dims);
+ assert(strides.size() == num_spatial_dims);
+ assert(padding_before.size() == num_spatial_dims);
+ assert(padding_after.size() == num_spatial_dims);
+
+ switch (padding_type)
+ {
+ case mir::ops::PaddingType::SameUpper:
+ for (int i = 0; i < num_spatial_dims; ++i)
+ {
+ // Assuming NHWC format.
+ const std::int32_t total_padding =
+ (input_shape.dim(1 + i) % strides[i] == 0)
+ ? std::max(0, window_size[i] - strides[i])
+ : std::max(0, window_size[i] - input_shape.dim(1 + i) % strides[i]);
+ padding_before[i] = total_padding / 2;
+ padding_after[i] = total_padding - padding_before[i];
+ }
+ break;
+ case mir::ops::PaddingType::Valid:
+ for (int i = 0; i < num_spatial_dims; ++i)
+ {
+ padding_before[i] = 0;
+ padding_after[i] = 0;
+ }
+ break;
+ default:
+ assert(false);
+ }
+}
+
+template <typename VectorT>
+static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t> &tensor)
+{
+ std::vector<VectorT> v;
+ for (const auto &i : mir::ShapeRange(tensor.getShape()))
+ v.emplace_back(static_cast<VectorT>(tensor.at(i)));
+ return v;
+}
+
+static const mir::TensorVariant &extractTensor(const mir::Operation::Output *output)
+{
+ auto constant_op = dynamic_cast<const ops::ConstantOp *>(output->getNode());
+ if (constant_op == nullptr)
+ throw std::runtime_error("Non-constant input is not supported.");
+ return constant_op->getValue();
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertConv2D(const tflite::Conv2DOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ auto kernel = inputs.at(1);
+ auto bias = inputs.at(2);
+
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = {opts->stride_h, opts->stride_w};
+
+ const auto padding_type = convertPadding(opts->padding);
+ const auto &input_shape = input->getShape();
+ const auto &kernel_shape = kernel->getShape();
+ const auto &strides = attributes.strides;
+ auto &pad_before = attributes.padding_before;
+ auto &pad_after = attributes.padding_after;
+ std::vector<std::int32_t> kernel_size{kernel_shape.dim(1), kernel_shape.dim(2)};
+ calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
+
+ mir::Operation::Output *result;
+ if (input->getType().isQuantized())
+ {
+ result = createOp<ops::Conv2DOp>(input, kernel, bias, attributes)->getOutput(0);
+ }
+ else // TODO Fuse bias to other backends
+ {
+ result = createOp<ops::Conv2DOp>(input, kernel, attributes)->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertDepthwiseConv2D(const tflite::DepthwiseConv2DOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ auto kernel = inputs.at(1);
+ auto bias = inputs.at(2);
+
+ // OHWI -> HWIO
+ const std::vector<std::size_t> axis_order{1, 2, 3, 0};
+ kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
+
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = {opts->stride_h, opts->stride_w};
+
+ const auto padding_type = convertPadding(opts->padding);
+ const auto &input_shape = input->getShape();
+ const auto &kernel_shape = kernel->getShape();
+ std::vector<std::int32_t> kernel_size{kernel_shape.dim(0), kernel_shape.dim(1)};
+ const auto &strides = attributes.strides;
+ auto &pad_before = attributes.padding_before;
+ auto &pad_after = attributes.padding_after;
+ calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
+
+ mir::Operation::Output *result;
+ if (input->getType().isQuantized())
+ {
+ result = createOp<ops::DepthwiseConv2DOp>(input, kernel, bias, attributes)->getOutput(0);
+ }
+ else // TODO Fuse bias to other backends
+ {
+ result = createOp<ops::DepthwiseConv2DOp>(input, kernel, attributes)->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertConcatenation(const tflite::ConcatenationOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto result = createOp<ops::ConcatOp>(inputs, opts->axis);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMaxPool2D(const tflite::Pool2DOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ const auto &input_shape = input->getShape();
+
+ mir::MaxPool2DOpAttributes attributes;
+ attributes.window = {opts->filter_height, opts->filter_width};
+ attributes.strides = {opts->stride_h, opts->stride_w};
+
+ const auto padding_type = convertPadding(opts->padding);
+ const auto &window_size = attributes.window;
+ const auto &strides = attributes.strides;
+ auto &pad_before = attributes.padding_before;
+ auto &pad_after = attributes.padding_after;
+ calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
+
+ auto result = createOp<ops::MaxPool2DOp>(input, attributes);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertAveragePool2D(const tflite::Pool2DOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ const auto &input_shape = input->getShape();
+
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = {opts->filter_height, opts->filter_width};
+ attributes.strides = {opts->stride_h, opts->stride_w};
+ attributes.include_pad = false;
+
+ const auto padding_type = convertPadding(opts->padding);
+ const auto &window_size = attributes.window;
+ const auto &strides = attributes.strides;
+ auto &pad_before = attributes.padding_before;
+ auto &pad_after = attributes.padding_after;
+ calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
+
+ auto result = createOp<ops::AvgPool2DOp>(input, attributes);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSoftmax(const tflite::SoftmaxOptionsT * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ // Softmax in TFLite is always 2-D.
+ assert(input->getShape().rank() == 2);
+ const int32_t axis = 1;
+ auto result = createOp<ops::SoftmaxOp>(input, axis);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSlice(const tflite::SliceOptionsT * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
+ mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
+
+ Shape starts(convertIntTensorToVector<int32_t>(begin_tensor));
+ Shape sizes(convertIntTensorToVector<int32_t>(size_tensor));
+ auto result = createOp<ops::SliceOp>(input, starts, sizes);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReshape(const tflite::ReshapeOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ // TODO: we should also support "-1" values in new_shape, which means that correct
+ // shape values must be calculated. Better do it in the shape inference module.
+ Shape new_shape(opts->new_shape.size());
+ for (int i = 0; i < static_cast<int>(opts->new_shape.size()); ++i)
+ {
+ new_shape.dim(i) = opts->new_shape[i];
+ }
+ auto result = createOp<ops::ReshapeOp>(input, new_shape);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTransposeConv(const tflite::TransposeConvOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
+ auto kernel = inputs.at(1);
+ auto input = inputs.at(2);
+
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = {opts->stride_h, opts->stride_w};
+ Shape output_shape(convertIntTensorToVector<int32_t>(output_shape_tensor));
+
+ // OHWI -> HWOI
+ const std::vector<std::size_t> axis_order{1, 2, 0, 3};
+ kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
+
+ attributes.padding_type = convertPadding(opts->padding);
+ auto result = createOp<ops::DeConv2DOp>(input, kernel, attributes, output_shape)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertResizeNearestNeighbor(const tflite::ResizeNearestNeighborOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ if (opts->align_corners)
+ throw std::runtime_error("'align_corners' is not currently supported");
+
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
+
+ const auto &input_shape = input->getShape();
+ Shape res_shape{input_shape.dim(0), size_tensor.at(mir::Index{0}), size_tensor.at(mir::Index{1}),
+ input_shape.dim(3)};
+ auto result =
+ createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertAdd(const tflite::AddOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSub(const tflite::SubOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMul(const tflite::MulOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertDiv(const tflite::DivOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::DivOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs)
+{
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
+ result = createOp<ops::MulOp>(result, result)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMean(const tflite::ReducerOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
+
+ std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
+ auto result = createOp<ops::ReduceMeanOp>(input, axes, opts->keep_dims);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertFullyConnected(const tflite::FullyConnectedOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ auto weights = inputs.at(1);
+ auto bias = inputs.at(2);
+
+ // Flatten input to 2-D shape.
+ const auto &input_shape = input->getShape();
+ int32_t outer_size = input_shape.dim(0);
+ int32_t inner_size = input_shape.numElements() / outer_size;
+ auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size})->getOutput(0);
+
+ // Transpose the weights.
+ const std::vector<std::size_t> axis_order{1, 0};
+ weights = createOp<ops::TransposeOp>(weights, axis_order)->getOutput(0);
+
+ mir::Operation::Output *result;
+ if (input->getType().isQuantized())
+ {
+ result = createOp<ops::FullyConnectedOp>(flatten, weights, bias)->getOutput(0);
+ }
+ else // TODO Fuse bias to other backends
+ {
+ result = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
+ }
+ return {addFusedActivation(result, opts->fused_activation_function)};
+}
+
+mir::Operation::Output *
+TFLiteOpCreator::addFusedActivation(mir::Operation::Output *input,
+ tflite::ActivationFunctionType activation_type)
+{
+ switch (activation_type)
+ {
+ case tflite::ActivationFunctionType_NONE:
+ return input;
+ case tflite::ActivationFunctionType_RELU:
+ return createOp<ops::ReluOp>(input)->getOutput(0);
+ case tflite::ActivationFunctionType_RELU6:
+ return createOp<ops::CappedReluOp>(input, 6)->getOutput(0);
+ case tflite::ActivationFunctionType_TANH:
+ return createOp<ops::TanhOp>(input)->getOutput(0);
+ default:
+ throw std::runtime_error(std::string("Unsupported activation type: ") +
+ tflite::EnumNameActivationFunctionType(activation_type));
+ }
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSqueeze(const tflite::SqueezeOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ std::vector<int32_t> squeeze_dims(opts->squeeze_dims.begin(), opts->squeeze_dims.end());
+ auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertPad(const tflite::PadOptionsT * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
+
+ const auto &input_shape = input->getShape();
+ const int num_dims = input_shape.rank();
+
+ mir::PadOpAttributes attributes(num_dims);
+ for (int i = 0; i < num_dims; i++)
+ {
+ attributes.padding_before[i] = paddings_tensor.at(mir::Index({i, 0}));
+ attributes.padding_after[i] = paddings_tensor.at(mir::Index({i, 1}));
+ }
+
+ auto result = createOp<ops::PadOp>(input, attributes)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::TanhOp>(input);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::ReluOp>(input);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::CappedReluOp>(input, 6);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertRsqrt(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ const float one_value = 1.0f;
+ mir::TensorVariant one_tensor({mir::DataType::FLOAT32, {}}, &one_value);
+ auto one = createOp<ops::ConstantOp>(one_tensor)->getOutput(0);
+ auto sqrt = createOp<ops::SqrtOp>(input)->getOutput(0);
+ auto result = createOp<ops::DivOp>(one, sqrt)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::SqrtOp>(input)->getOutput(0);
+ return {result};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::SigmoidOp>(input);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTranspose(const tflite::TransposeOptionsT * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
+
+ std::vector<std::size_t> axis_order = convertIntTensorToVector<std::size_t>(perm_tensor);
+ auto result = createOp<ops::TransposeOp>(input, axis_order);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertStridedSlice(const tflite::StridedSliceOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ if (opts->ellipsis_mask != 0)
+ throw std::runtime_error("StridedSlice: parameter 'ellipsis_mask' is not supported.");
+
+ if (opts->new_axis_mask != 0)
+ throw std::runtime_error("StridedSlice: parameter 'new_axis_mask' is not supported.");
+
+ auto input = inputs.at(0);
+ mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
+ mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
+ mir::Tensor<int32_t> strides_tensor(extractTensor(inputs.at(3)));
+
+ std::vector<int32_t> begin = convertIntTensorToVector<int32_t>(begin_tensor);
+ std::vector<int32_t> end = convertIntTensorToVector<int32_t>(end_tensor);
+ std::vector<int32_t> strides = convertIntTensorToVector<int32_t>(strides_tensor);
+
+ int32_t begin_mask = opts->begin_mask;
+ int32_t end_mask = opts->end_mask;
+ int32_t shrink_axis_mask = opts->shrink_axis_mask;
+
+ const auto &input_shape = input->getShape();
+ int32_t num_dims = input_shape.rank();
+
+ for (int32_t stride : strides)
+ {
+ if (stride != 1)
+ throw std::runtime_error("StridedSlice: parameter 'strides' is not supported");
+ }
+
+ Shape start(num_dims);
+ Shape size(num_dims);
+ std::vector<int32_t> squeeze_dims;
+ for (int axis = 0; axis < num_dims; axis++)
+ {
+ if (static_cast<uint32_t>(begin_mask) & (1u << static_cast<uint32_t>(axis)))
+ start.dim(axis) = 0;
+ else
+ start.dim(axis) = begin.at(static_cast<uint64_t>(axis));
+
+ if (static_cast<uint32_t>(end_mask) & (1u << static_cast<uint32_t>(axis)))
+ size.dim(axis) = input_shape.dim(axis) - start.dim(axis);
+ else
+ size.dim(axis) = end.at(static_cast<uint64_t>(axis)) - start.dim(axis);
+
+ if (static_cast<uint32_t>(shrink_axis_mask) & (1u << static_cast<uint32_t>(axis)))
+ squeeze_dims.push_back(axis);
+ }
+
+ auto result = createOp<ops::SliceOp>(input, start, size);
+ result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertLeakyReLU(const tflite::LeakyReluOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto input = inputs.at(0);
+
+ auto result = createOp<ops::LeakyReluOp>(input, opts->alpha);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertShape(const tflite::ShapeOptionsT *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ if (opts->out_type != tflite::TensorType_INT32)
+ {
+ throw std::runtime_error(std::string("SHAPE: Unsupported tensor type: ") +
+ EnumNameTensorType(opts->out_type));
+ }
+
+ const auto &input_shape = inputs[0]->getShape();
+ int32_t rank = input_shape.rank();
+ std::vector<int32_t> data;
+ data.reserve(static_cast<uint64_t>(rank));
+ for (int32_t i = 0; i < rank; i++)
+ data.emplace_back(input_shape.dim(i));
+ mir::TensorVariant tensor({mir::DataType::INT32, {rank}}, data.data());
+ auto result = createOp<ops::ConstantOp>(tensor);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertHardSwish(const tflite::HardSwishOptionsT *,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto result = createOp<ops::HardSwishOp>(inputs[0])->getOutput(0);
+ return {result};
+}
+
+} // namespace mir_tflite
diff --git a/compiler/mir-tflite-importer/tflite_op_creator.h b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.h
index 820436f33..820436f33 100644
--- a/compiler/mir-tflite-importer/tflite_op_creator.h
+++ b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.h
diff --git a/compiler/mir2loco/CMakeLists.txt b/compiler/mir2loco/CMakeLists.txt
index 49bf3dbde..a8a096ef4 100644
--- a/compiler/mir2loco/CMakeLists.txt
+++ b/compiler/mir2loco/CMakeLists.txt
@@ -7,7 +7,6 @@ target_include_directories(mir2loco PRIVATE src)
target_include_directories(mir2loco PUBLIC include)
target_link_libraries(mir2loco PUBLIC mir)
target_link_libraries(mir2loco PUBLIC loco)
-target_link_libraries(mir2loco PRIVATE stdex)
nnas_find_package(GTest QUIET)
diff --git a/compiler/mir2loco/README.md b/compiler/mir2loco/README.md
new file mode 100644
index 000000000..a11e10464
--- /dev/null
+++ b/compiler/mir2loco/README.md
@@ -0,0 +1 @@
+# mir2loco
diff --git a/compiler/mir2loco/src/mir2loco.cpp b/compiler/mir2loco/src/mir2loco.cpp
index fc1f6933b..e1370fe1e 100644
--- a/compiler/mir2loco/src/mir2loco.cpp
+++ b/compiler/mir2loco/src/mir2loco.cpp
@@ -37,7 +37,7 @@
#include <cassert>
#include <cstring>
-#include <stdex/Memory.h>
+#include <memory>
namespace mir2loco
{
@@ -54,7 +54,7 @@ template <class NodeType> void setupShape(const mir::Shape &shape, NodeType *nod
std::unique_ptr<loco::TensorShape> make_tensor_shape(const mir::Shape &shape)
{
- auto res = stdex::make_unique<loco::TensorShape>();
+ auto res = std::make_unique<loco::TensorShape>();
setupShape(shape, res.get());
return std::move(res);
}
@@ -107,13 +107,13 @@ loco::Permutation<loco::Domain::Feature> createFeaturePermutation(mir::DataForma
std::unique_ptr<loco::FeatureEncoder> createFeatureEncoder(mir::DataFormat data_format)
{
auto perm = createFeaturePermutation(data_format);
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::Feature>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::Feature>>(perm);
}
std::unique_ptr<loco::FeatureDecoder> createFeatureDecoder(mir::DataFormat data_format)
{
auto perm = createFeaturePermutation(data_format);
- return stdex::make_unique<loco::PermutingDecoder<loco::Domain::Feature>>(perm);
+ return std::make_unique<loco::PermutingDecoder<loco::Domain::Feature>>(perm);
}
std::unique_ptr<loco::FilterEncoder> createOHWIFilterEncoder()
@@ -123,7 +123,7 @@ std::unique_ptr<loco::FilterEncoder> createOHWIFilterEncoder()
perm.axis(loco::FilterAxis::Height) = 1;
perm.axis(loco::FilterAxis::Width) = 2;
perm.axis(loco::FilterAxis::Depth) = 3;
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
}
std::unique_ptr<loco::FilterEncoder> createHWOIFilterEncoder()
@@ -133,7 +133,7 @@ std::unique_ptr<loco::FilterEncoder> createHWOIFilterEncoder()
perm.axis(loco::FilterAxis::Width) = 1;
perm.axis(loco::FilterAxis::Count) = 2;
perm.axis(loco::FilterAxis::Depth) = 3;
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
}
std::unique_ptr<loco::DepthwiseFilterEncoder> createHWIMDepthwiseFilterEncoder()
@@ -143,7 +143,7 @@ std::unique_ptr<loco::DepthwiseFilterEncoder> createHWIMDepthwiseFilterEncoder()
perm.axis(loco::DepthwiseFilterAxis::Width) = 1;
perm.axis(loco::DepthwiseFilterAxis::Depth) = 2;
perm.axis(loco::DepthwiseFilterAxis::Multiplier) = 3;
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
}
std::unique_ptr<loco::DepthwiseFilterEncoder> createIHWMDepthwiseFilterEncoder()
@@ -153,7 +153,7 @@ std::unique_ptr<loco::DepthwiseFilterEncoder> createIHWMDepthwiseFilterEncoder()
perm.axis(loco::DepthwiseFilterAxis::Height) = 1;
perm.axis(loco::DepthwiseFilterAxis::Width) = 2;
perm.axis(loco::DepthwiseFilterAxis::Multiplier) = 3;
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
}
std::unique_ptr<loco::MatrixEncoder> createHWMatrixEncoder()
@@ -161,7 +161,7 @@ std::unique_ptr<loco::MatrixEncoder> createHWMatrixEncoder()
loco::Permutation<loco::Domain::Matrix> perm;
perm.axis(loco::MatrixAxis::Height) = 0;
perm.axis(loco::MatrixAxis::Width) = 1;
- return stdex::make_unique<loco::PermutingEncoder<loco::Domain::Matrix>>(perm);
+ return std::make_unique<loco::PermutingEncoder<loco::Domain::Matrix>>(perm);
}
std::unique_ptr<loco::MatrixDecoder> createHWMatrixDecoder()
@@ -169,7 +169,7 @@ std::unique_ptr<loco::MatrixDecoder> createHWMatrixDecoder()
loco::Permutation<loco::Domain::Matrix> perm;
perm.axis(loco::MatrixAxis::Height) = 0;
perm.axis(loco::MatrixAxis::Width) = 1;
- return stdex::make_unique<loco::PermutingDecoder<loco::Domain::Matrix>>(perm);
+ return std::make_unique<loco::PermutingDecoder<loco::Domain::Matrix>>(perm);
}
loco::DataType convertDataType(mir::DataType data_type)
diff --git a/compiler/moco-tf/requires.cmake b/compiler/moco-tf/requires.cmake
index 751192fff..3e0fabee9 100644
--- a/compiler/moco-tf/requires.cmake
+++ b/compiler/moco-tf/requires.cmake
@@ -11,3 +11,4 @@ require("plier-tf")
require("locoex-customop")
require("logo")
require("oops")
+require("bino")
diff --git a/compiler/moco-tf/src/Canonicalization/PadCanonicalizer.cpp b/compiler/moco-tf/src/Canonicalization/PadCanonicalizer.cpp
index 10816f47c..36136aed4 100644
--- a/compiler/moco-tf/src/Canonicalization/PadCanonicalizer.cpp
+++ b/compiler/moco-tf/src/Canonicalization/PadCanonicalizer.cpp
@@ -58,7 +58,7 @@ bool canonicalize_pad(loco::Graph *graph, moco::TFPad *node)
constant_node->size<loco::DataType::FLOAT32>(1);
constant_node->at<loco::DataType::FLOAT32>(0) = 0.0f;
- auto const_paddings_node = dynamic_cast<loco::ConstGen *>(node->paddings());
+ auto const_paddings_node = loco::must_cast<loco::ConstGen *>(node->paddings());
// TODO: support S64 type.
assert(const_paddings_node->dtype() == loco::DataType::S32);
assert(const_paddings_node->rank() == 2);
diff --git a/compiler/moco/import/src/Importer.cpp b/compiler/moco/import/src/Importer.cpp
index 8d3ca6cfc..3813affce 100644
--- a/compiler/moco/import/src/Importer.cpp
+++ b/compiler/moco/import/src/Importer.cpp
@@ -126,8 +126,7 @@ void convert_graph(const moco::GraphBuilderSource &source, const moco::ModelSign
auto graph_input = graph->inputs()->create();
- auto placeholder_node = dynamic_cast<moco::TFPlaceholder *>(node);
- assert(placeholder_node != nullptr);
+ auto placeholder_node = loco::must_cast<moco::TFPlaceholder *>(node);
graph_input->name(input.nodeName());
diff --git a/compiler/moco/import/src/Nodes/BiasAdd.test.cpp b/compiler/moco/import/src/Nodes/BiasAdd.test.cpp
index 626456d30..207045a6f 100644
--- a/compiler/moco/import/src/Nodes/BiasAdd.test.cpp
+++ b/compiler/moco/import/src/Nodes/BiasAdd.test.cpp
@@ -61,7 +61,7 @@ TEST(TensorFlowImport, bias_add_01)
tester.output("out");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFBiasAdd *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFBiasAdd *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_TRUE(test_node->data_layout() == "NHWC");
}
@@ -106,7 +106,7 @@ TEST(TensorFlowImport, bias_add_NCHW_axis)
tester.output("out");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFBiasAdd *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFBiasAdd *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_TRUE(test_node->data_layout() == "NCHW");
}
diff --git a/compiler/moco/import/src/Nodes/Concat.test.cpp b/compiler/moco/import/src/Nodes/Concat.test.cpp
index c0986578b..30a7db792 100644
--- a/compiler/moco/import/src/Nodes/Concat.test.cpp
+++ b/compiler/moco/import/src/Nodes/Concat.test.cpp
@@ -72,7 +72,7 @@ TEST(TensorFlowImport, concat_01)
tester.output("Concat");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConcatV2 *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConcatV2 *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->num_values(), 2);
}
@@ -128,7 +128,7 @@ TEST(TensorFlowImport, concat_02)
tester.output("Concat");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConcatV2 *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConcatV2 *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->num_values(), 3);
}
diff --git a/compiler/moco/import/src/Nodes/Const.test.cpp b/compiler/moco/import/src/Nodes/Const.test.cpp
index 854499fe6..5a9390ba9 100644
--- a/compiler/moco/import/src/Nodes/Const.test.cpp
+++ b/compiler/moco/import/src/Nodes/Const.test.cpp
@@ -79,7 +79,7 @@ TEST(TensorFlowImport, const_float_01)
tester.output("const/float");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::FLOAT32>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::FLOAT32>(0), 1.1f);
@@ -142,7 +142,7 @@ TEST(TensorFlowImport, const_float_02)
tester.output("const/float");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::FLOAT32>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::FLOAT32>(0), 1.1f);
@@ -206,7 +206,7 @@ TEST(TensorFlowImport, const_float_03)
tester.output("const/float");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::FLOAT32>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::FLOAT32>(0), 1.1f);
@@ -270,7 +270,7 @@ TEST(TensorFlowImport, const_float_04)
tester.output("const/float");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::FLOAT32>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::FLOAT32>(0), 1.1f);
@@ -334,7 +334,7 @@ TEST(TensorFlowImport, const_int32_04)
tester.output("const/int");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::S32>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::S32>(0), 1);
@@ -391,7 +391,7 @@ TEST(TensorFlowImport, const_int32_scalar)
tester.output("const/int");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::S32>(), 1);
ASSERT_EQ(test_node->at<loco::DataType::S32>(0), 3);
@@ -453,7 +453,7 @@ TEST(TensorFlowImport, const_int8_01)
tester.output("const/int8");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFConst *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFConst *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->size<loco::DataType::S8>(), 6);
ASSERT_EQ(test_node->at<loco::DataType::S8>(0), 0);
diff --git a/compiler/moco/import/src/Nodes/Mean.test.cpp b/compiler/moco/import/src/Nodes/Mean.test.cpp
index 6321fad16..4666c32d0 100644
--- a/compiler/moco/import/src/Nodes/Mean.test.cpp
+++ b/compiler/moco/import/src/Nodes/Mean.test.cpp
@@ -65,7 +65,7 @@ TEST(TensorFlowImport, mean_true)
tester.output("Mean");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFMean *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFMean *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->keep_dims(), true);
}
@@ -114,7 +114,7 @@ TEST(TensorFlowImport, mean_false)
tester.output("Mean");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFMean *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFMean *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->keep_dims(), false);
}
diff --git a/compiler/moco/import/src/Nodes/Pack.test.cpp b/compiler/moco/import/src/Nodes/Pack.test.cpp
index 01774a906..3ee27453e 100644
--- a/compiler/moco/import/src/Nodes/Pack.test.cpp
+++ b/compiler/moco/import/src/Nodes/Pack.test.cpp
@@ -73,7 +73,7 @@ TEST(TensorFlowImport, tf_pack_basic)
tester.output("Pack");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFPack *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFPack *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->N(), 4);
ASSERT_NE(test_node->values(0), nullptr);
diff --git a/compiler/moco/import/src/Nodes/StridedSlice.test.cpp b/compiler/moco/import/src/Nodes/StridedSlice.test.cpp
index b6959d7ab..dd3c13314 100644
--- a/compiler/moco/import/src/Nodes/StridedSlice.test.cpp
+++ b/compiler/moco/import/src/Nodes/StridedSlice.test.cpp
@@ -95,7 +95,7 @@ TEST(TensorFlowImport, tf_stridedslice_basic)
tester.output("StridedSlice");
tester.run(nodedef, graphbuilder);
- auto test_node = dynamic_cast<moco::TFStridedSlice *>(tester.output());
+ auto test_node = loco::must_cast<moco::TFStridedSlice *>(tester.output());
ASSERT_NE(test_node, nullptr);
ASSERT_EQ(test_node->begin_mask(), 0);
ASSERT_EQ(test_node->end_mask(), 0);
diff --git a/compiler/moco/pass/src/Passes/ConstantFoldPack.cpp b/compiler/moco/pass/src/Passes/ConstantFoldPack.cpp
index cc8a23d18..105c96cff 100644
--- a/compiler/moco/pass/src/Passes/ConstantFoldPack.cpp
+++ b/compiler/moco/pass/src/Passes/ConstantFoldPack.cpp
@@ -24,6 +24,8 @@
#include <moco/Support/NodeAs.h>
+#include <loco/IR/TensorShape.h>
+
#include <oops/UserExn.h>
#include <cassert>
@@ -32,19 +34,6 @@
namespace
{
-// TODO move to loco
-bool operator==(const loco::TensorShape &lhs, const loco::TensorShape &rhs)
-{
- if (lhs.rank() != rhs.rank())
- return false;
- for (uint32_t axis = 0; axis < lhs.rank(); ++axis)
- {
- if (!(lhs.dim(axis) == rhs.dim(axis)))
- return false;
- }
- return true;
-}
-
bool valid_axis_range(int32_t output_rank, int32_t pack_axis)
{
// check axis range in [-r-1, r+1)
diff --git a/compiler/moco/pass/src/Passes/ConstantFoldStridedSlice.cpp b/compiler/moco/pass/src/Passes/ConstantFoldStridedSlice.cpp
index 8be47648d..3542e6077 100644
--- a/compiler/moco/pass/src/Passes/ConstantFoldStridedSlice.cpp
+++ b/compiler/moco/pass/src/Passes/ConstantFoldStridedSlice.cpp
@@ -35,10 +35,9 @@ namespace
loco::TensorShape calc_output_shape(moco::TFStridedSlice *node)
{
- auto const_input = dynamic_cast<moco::TFConst *>(node->input());
- auto const_begin = dynamic_cast<moco::TFConst *>(node->begin());
- auto const_end = dynamic_cast<moco::TFConst *>(node->end());
- auto const_strides = dynamic_cast<moco::TFConst *>(node->strides());
+ auto const_input = loco::must_cast<moco::TFConst *>(node->input());
+ auto const_begin = loco::must_cast<moco::TFConst *>(node->begin());
+ auto const_end = loco::must_cast<moco::TFConst *>(node->end());
auto input_rank = const_input->rank();
auto output_rank = input_rank;
loco::TensorShape output_shape_range;
diff --git a/compiler/moco/pass/src/Passes/FuseBinaryIntoPreceding.cpp b/compiler/moco/pass/src/Passes/FuseBinaryIntoPreceding.cpp
index 4a9631ea9..f97546a80 100644
--- a/compiler/moco/pass/src/Passes/FuseBinaryIntoPreceding.cpp
+++ b/compiler/moco/pass/src/Passes/FuseBinaryIntoPreceding.cpp
@@ -471,8 +471,7 @@ bool fuse_to_preceding(loco::Graph *graph, moco::TFAdd *node)
}
// Let's fuse addparam into biasadd bias
- auto biasadd_bias = dynamic_cast<moco::TFConst *>(biasadd->bias());
- assert(biasadd_bias != nullptr);
+ auto biasadd_bias = loco::must_cast<moco::TFConst *>(biasadd->bias());
if (!shape_match(biasadd_bias, addparam))
{
// INFO(l) << "TFBiasAdd bias and TFAdd input shape mismatch";
diff --git a/compiler/moco/service/src/Service/TFShapeInferenceRule.cpp b/compiler/moco/service/src/Service/TFShapeInferenceRule.cpp
index 6d122c863..98434155e 100644
--- a/compiler/moco/service/src/Service/TFShapeInferenceRule.cpp
+++ b/compiler/moco/service/src/Service/TFShapeInferenceRule.cpp
@@ -473,8 +473,7 @@ public:
auto input_shape = node_shape(node->input());
assert(input_shape.domain() == loco::Domain::Tensor);
- auto const_paddings = dynamic_cast<moco::TFConst *>(node->paddings());
- assert(const_paddings);
+ auto const_paddings = loco::must_cast<moco::TFConst *>(node->paddings());
assert(const_paddings->dtype() == loco::DataType::S32);
assert(const_paddings->rank() == 2);
@@ -707,9 +706,9 @@ public:
assert(node->ellipsis_mask() == 0);
assert(node->shrink_axis_mask() == 1);
- auto const_begin = dynamic_cast<moco::TFConst *>(node->begin());
- auto const_end = dynamic_cast<moco::TFConst *>(node->end());
- auto const_strides = dynamic_cast<moco::TFConst *>(node->strides());
+ auto const_begin = loco::must_cast<moco::TFConst *>(node->begin());
+ auto const_end = loco::must_cast<moco::TFConst *>(node->end());
+ auto const_strides = loco::must_cast<moco::TFConst *>(node->strides());
assert(dynamic_cast<moco::TFConst *>(node->input()) != nullptr);
assert(const_begin != nullptr);
@@ -880,7 +879,7 @@ void TFShapeInferenceRule::infer(const Context *ctx, const loco::Node *node, Sin
assert(dynamic_cast<const TFNode *>(node) != nullptr);
ShapeInferenceAlgorithm alg{ctx};
- auto shape = dynamic_cast<const TFNode *>(node)->accept(&alg);
+ auto shape = loco::must_cast<const TFNode *>(node)->accept(&alg);
if (shape.domain() == loco::Domain::Unknown)
sink->fail();
diff --git a/compiler/moco/service/src/Service/TFTypeInferenceRule.cpp b/compiler/moco/service/src/Service/TFTypeInferenceRule.cpp
index 112ab955d..f168c80ff 100644
--- a/compiler/moco/service/src/Service/TFTypeInferenceRule.cpp
+++ b/compiler/moco/service/src/Service/TFTypeInferenceRule.cpp
@@ -98,7 +98,7 @@ bool TFTypeInferenceRule::infer(const loco::Node *node, loco::DataType &dtype) c
#define TENSORFLOW_NODE(OPCODE,CLASS) \
if (dynamic_cast<const moco::CLASS *>(node)) \
{ \
- auto tfnode = dynamic_cast<const moco::CLASS *>(node); \
+ auto tfnode = loco::must_cast<const moco::CLASS *>(node); \
dtype = tfnode->accept(&alg); \
assert(dtype != loco::DataType::Unknown); \
return true; \
diff --git a/compiler/nest/core/src/Block.test.cpp b/compiler/nest/core/src/Block.test.cpp
index b40fbeaac..d8faa0bdb 100644
--- a/compiler/nest/core/src/Block.test.cpp
+++ b/compiler/nest/core/src/Block.test.cpp
@@ -30,12 +30,12 @@ TEST(BLOCK, use_case_1)
{
nest::Block block;
- ASSERT_EQ(block.size(), 0);
+ ASSERT_EQ(0, block.size());
auto stmt = std::make_shared<DummyNode>();
block.append(stmt);
- ASSERT_EQ(block.size(), 1);
- ASSERT_EQ(block.at(0), stmt);
+ ASSERT_EQ(1, block.size());
+ ASSERT_EQ(stmt, block.at(0));
}
diff --git a/compiler/nest/core/src/Bound.test.cpp b/compiler/nest/core/src/Bound.test.cpp
index 7b2f0b62e..a4c3d4d38 100644
--- a/compiler/nest/core/src/Bound.test.cpp
+++ b/compiler/nest/core/src/Bound.test.cpp
@@ -22,6 +22,6 @@ TEST(BOUND, ctor)
{
const nest::Bound b{-10, 20};
- ASSERT_EQ(b.min(), -10);
- ASSERT_EQ(b.max(), 20);
+ ASSERT_EQ(-10, b.min());
+ ASSERT_EQ(20, b.max());
}
diff --git a/compiler/nest/core/src/Closure.test.cpp b/compiler/nest/core/src/Closure.test.cpp
index 1dae849a3..495e2186a 100644
--- a/compiler/nest/core/src/Closure.test.cpp
+++ b/compiler/nest/core/src/Closure.test.cpp
@@ -30,8 +30,8 @@ TEST(Closure, ctor)
nest::DomainID dom_id{0};
nest::Closure closure{dom_id, std::make_shared<DummyNode>()};
- ASSERT_EQ(closure.id().value(), 0);
- ASSERT_EQ(closure.sub().rank(), 1);
+ ASSERT_EQ(0, closure.id().value());
+ ASSERT_EQ(1, closure.sub().rank());
}
TEST(Closure, cast)
diff --git a/compiler/nest/core/src/Domain.test.cpp b/compiler/nest/core/src/Domain.test.cpp
index 5f973ecf7..8d1845905 100644
--- a/compiler/nest/core/src/Domain.test.cpp
+++ b/compiler/nest/core/src/Domain.test.cpp
@@ -36,6 +36,6 @@ TEST(_DOMAIN, base_usecase)
nest::Closure clo = dom(std::make_shared<::expr::DummyNode>());
- ASSERT_EQ(clo.id(), dom_id);
- ASSERT_EQ(clo.sub().rank(), 1);
+ ASSERT_EQ(dom_id, clo.id());
+ ASSERT_EQ(1, clo.sub().rank());
}
diff --git a/compiler/nest/core/src/DomainContext.test.cpp b/compiler/nest/core/src/DomainContext.test.cpp
index 10882df70..cc553eaa0 100644
--- a/compiler/nest/core/src/DomainContext.test.cpp
+++ b/compiler/nest/core/src/DomainContext.test.cpp
@@ -24,30 +24,30 @@ TEST(DOMAIN_CONTEXT, usecase)
auto dom_0 = ctx.make({1, 3, 4});
- ASSERT_EQ(ctx.count(), 1);
+ ASSERT_EQ(1, ctx.count());
auto check_dom_0 = [&](void) {
- ASSERT_EQ(ctx.info(dom_0).rank(), 3);
- ASSERT_EQ(ctx.info(dom_0).dim(0), 1);
- ASSERT_EQ(ctx.info(dom_0).dim(1), 3);
- ASSERT_EQ(ctx.info(dom_0).dim(2), 4);
+ ASSERT_EQ(3, ctx.info(dom_0).rank());
+ ASSERT_EQ(1, ctx.info(dom_0).dim(0));
+ ASSERT_EQ(3, ctx.info(dom_0).dim(1));
+ ASSERT_EQ(4, ctx.info(dom_0).dim(2));
};
check_dom_0();
auto dom_1 = ctx.make({7, 6, 2, 1});
- ASSERT_EQ(ctx.count(), 2);
+ ASSERT_EQ(2, ctx.count());
// Domain ID should be unique for each domain
ASSERT_FALSE(dom_0.id() == dom_1.id());
auto check_dom_1 = [&](void) {
- ASSERT_EQ(ctx.info(dom_1).rank(), 4);
- ASSERT_EQ(ctx.info(dom_1).dim(0), 7);
- ASSERT_EQ(ctx.info(dom_1).dim(1), 6);
- ASSERT_EQ(ctx.info(dom_1).dim(2), 2);
- ASSERT_EQ(ctx.info(dom_1).dim(3), 1);
+ ASSERT_EQ(4, ctx.info(dom_1).rank());
+ ASSERT_EQ(7, ctx.info(dom_1).dim(0));
+ ASSERT_EQ(6, ctx.info(dom_1).dim(1));
+ ASSERT_EQ(2, ctx.info(dom_1).dim(2));
+ ASSERT_EQ(1, ctx.info(dom_1).dim(3));
};
// make() SHOULD NOT affect the existing domain information
diff --git a/compiler/nest/core/src/DomainID.test.cpp b/compiler/nest/core/src/DomainID.test.cpp
index 6b1ce8360..3a7705025 100644
--- a/compiler/nest/core/src/DomainID.test.cpp
+++ b/compiler/nest/core/src/DomainID.test.cpp
@@ -22,7 +22,7 @@ TEST(DOMAIN_ID, ctor)
{
nest::DomainID id{0};
- ASSERT_EQ(id.value(), 0);
+ ASSERT_EQ(0, id.value());
}
TEST(DOMAIN_ID, operator_eq)
diff --git a/compiler/nest/core/src/DomainInfo.test.cpp b/compiler/nest/core/src/DomainInfo.test.cpp
index 7a5d81144..ddee683a4 100644
--- a/compiler/nest/core/src/DomainInfo.test.cpp
+++ b/compiler/nest/core/src/DomainInfo.test.cpp
@@ -22,9 +22,9 @@ TEST(DOMAIN_INFO, ctor)
{
nest::DomainInfo info{1, 2, 3, 4};
- ASSERT_EQ(info.rank(), 4);
- ASSERT_EQ(info.dim(0), 1);
- ASSERT_EQ(info.dim(1), 2);
- ASSERT_EQ(info.dim(2), 3);
- ASSERT_EQ(info.dim(3), 4);
+ ASSERT_EQ(4, info.rank());
+ ASSERT_EQ(1, info.dim(0));
+ ASSERT_EQ(2, info.dim(1));
+ ASSERT_EQ(3, info.dim(2));
+ ASSERT_EQ(4, info.dim(3));
}
diff --git a/compiler/nest/core/src/Expr.test.cpp b/compiler/nest/core/src/Expr.test.cpp
index 0c96f7714..2e26c234a 100644
--- a/compiler/nest/core/src/Expr.test.cpp
+++ b/compiler/nest/core/src/Expr.test.cpp
@@ -38,8 +38,8 @@ TEST(EXPR, operator_sum)
auto add = expr->asAdd();
- ASSERT_EQ(add->lhs().get(), left.get());
- ASSERT_EQ(add->rhs().get(), right.get());
+ ASSERT_EQ(left.get(), add->lhs().get());
+ ASSERT_EQ(right.get(), add->rhs().get());
}
TEST(EXPR, operator_mul)
@@ -53,6 +53,6 @@ TEST(EXPR, operator_mul)
auto add = expr->asMul();
- ASSERT_EQ(add->lhs().get(), left.get());
- ASSERT_EQ(add->rhs().get(), right.get());
+ ASSERT_EQ(left.get(), add->lhs().get());
+ ASSERT_EQ(right.get(), add->rhs().get());
}
diff --git a/compiler/nest/core/src/FV.test.cpp b/compiler/nest/core/src/FV.test.cpp
index 55f5f5877..8bb061cc9 100644
--- a/compiler/nest/core/src/FV.test.cpp
+++ b/compiler/nest/core/src/FV.test.cpp
@@ -27,7 +27,7 @@ TEST(FV, var_expr)
auto fvs = nest::FV::in(var);
- ASSERT_EQ(fvs.size(), 1);
+ ASSERT_EQ(1, fvs.size());
ASSERT_NE(fvs.find(var.id()), fvs.end());
}
@@ -40,7 +40,7 @@ TEST(FV, deref_expr)
auto fvs = nest::FV::in(dom(var));
- ASSERT_EQ(fvs.size(), 1);
+ ASSERT_EQ(1, fvs.size());
ASSERT_NE(fvs.find(var.id()), fvs.end());
}
@@ -53,7 +53,7 @@ TEST(FV, add_expr)
auto fvs = nest::FV::in(v_0 + v_1);
- ASSERT_EQ(fvs.size(), 2);
+ ASSERT_EQ(2, fvs.size());
ASSERT_NE(fvs.find(v_0.id()), fvs.end());
ASSERT_NE(fvs.find(v_1.id()), fvs.end());
}
@@ -69,7 +69,7 @@ TEST(FV, mul_expr)
auto fvs = nest::FV::in(v_0 * v_1);
- ASSERT_EQ(fvs.size(), 2);
+ ASSERT_EQ(2, fvs.size());
ASSERT_NE(fvs.find(v_0.id()), fvs.end());
ASSERT_NE(fvs.find(v_1.id()), fvs.end());
}
diff --git a/compiler/nest/core/src/Level.test.cpp b/compiler/nest/core/src/Level.test.cpp
index b9e203d9d..cc447e8d2 100644
--- a/compiler/nest/core/src/Level.test.cpp
+++ b/compiler/nest/core/src/Level.test.cpp
@@ -22,7 +22,7 @@ TEST(LEVEL, constructor)
{
nest::Level lv{3};
- ASSERT_EQ(lv.value(), 3);
+ ASSERT_EQ(3, lv.value());
}
TEST(LEVEL, operator_eq)
diff --git a/compiler/nest/core/src/Module.test.cpp b/compiler/nest/core/src/Module.test.cpp
index 01e414d25..70a6c473c 100644
--- a/compiler/nest/core/src/Module.test.cpp
+++ b/compiler/nest/core/src/Module.test.cpp
@@ -29,7 +29,7 @@ TEST(MODULE, create_var)
auto check = [](const nest::Module &m) {
// This code will invoke 'const VarContext &var(void) const' method
- ASSERT_EQ(m.var().count(), 1);
+ ASSERT_EQ(1, m.var().count());
};
create(m);
@@ -47,7 +47,7 @@ TEST(MODULE, create_domain)
auto check = [](const nest::Module &m) {
// This code will invoke 'const DomainContext &domain(void) const' method
- ASSERT_EQ(m.domain().count(), 1);
+ ASSERT_EQ(1, m.domain().count());
};
create(m, {1, 3, 3});
@@ -66,7 +66,7 @@ TEST(MODULE, push)
m.push(ifm(var_ch, var_row, var_col));
- ASSERT_EQ(m.block().size(), 1);
+ ASSERT_EQ(1, m.block().size());
ASSERT_NE(m.block().at(0)->asPush(), nullptr);
}
@@ -82,8 +82,8 @@ TEST(MODULE, ret)
m.push(ifm(ind));
m.ret(ofm(ind));
- ASSERT_EQ(m.ret().id(), ofm.id());
- ASSERT_EQ(m.ret().sub().rank(), 1);
+ ASSERT_EQ(ofm.id(), m.ret().id());
+ ASSERT_EQ(1, m.ret().sub().rank());
}
TEST(MODULE, copy)
@@ -95,5 +95,5 @@ TEST(MODULE, copy)
orig.var().make();
- ASSERT_EQ(copy.var().count(), 0);
+ ASSERT_EQ(0, copy.var().count());
}
diff --git a/compiler/nest/core/src/Ret.test.cpp b/compiler/nest/core/src/Ret.test.cpp
index 703f04901..a85223578 100644
--- a/compiler/nest/core/src/Ret.test.cpp
+++ b/compiler/nest/core/src/Ret.test.cpp
@@ -32,8 +32,8 @@ TEST(RET, ctor)
nest::Ret ret{dom_id, sub};
- ASSERT_EQ(ret.id().value(), 0);
- ASSERT_EQ(ret.sub().rank(), 1);
+ ASSERT_EQ(0, ret.id().value());
+ ASSERT_EQ(1, ret.sub().rank());
}
TEST(RET, copy)
@@ -48,11 +48,11 @@ TEST(RET, copy)
nest::Ret dst{dst_id, dst_sub};
- ASSERT_EQ(dst.id().value(), 1);
- ASSERT_EQ(dst.sub().rank(), 2);
+ ASSERT_EQ(1, dst.id().value());
+ ASSERT_EQ(2, dst.sub().rank());
dst = src;
- ASSERT_EQ(dst.id().value(), 0);
- ASSERT_EQ(dst.sub().rank(), 1);
+ ASSERT_EQ(0, dst.id().value());
+ ASSERT_EQ(1, dst.sub().rank());
}
diff --git a/compiler/nest/core/src/Schedule.test.cpp b/compiler/nest/core/src/Schedule.test.cpp
index 8f0ddb23c..7994d7abe 100644
--- a/compiler/nest/core/src/Schedule.test.cpp
+++ b/compiler/nest/core/src/Schedule.test.cpp
@@ -26,7 +26,7 @@ TEST(SCHEDULE, module)
nest::Schedule sch{m};
- ASSERT_EQ(sch.level(var_1).value(), 0);
+ ASSERT_EQ(0, sch.level(var_1).value());
}
TEST(SCHEDULE, module_copy)
@@ -40,5 +40,5 @@ TEST(SCHEDULE, module_copy)
// Update on 'm' does not affect the schedule
m.var().make();
- ASSERT_EQ(sch.var().count(), 1);
+ ASSERT_EQ(1, sch.var().count());
}
diff --git a/compiler/nest/core/src/Var.test.cpp b/compiler/nest/core/src/Var.test.cpp
index 29f879558..aea8c5dde 100644
--- a/compiler/nest/core/src/Var.test.cpp
+++ b/compiler/nest/core/src/Var.test.cpp
@@ -23,7 +23,7 @@ TEST(VAR, ctor)
nest::VarID id{0};
nest::Var var{id};
- ASSERT_EQ(var.id(), id);
+ ASSERT_EQ(id, var.id());
}
TEST(VAR, cast)
@@ -34,5 +34,5 @@ TEST(VAR, cast)
nest::Expr expr = var;
ASSERT_NE(expr->asVar(), nullptr);
- ASSERT_EQ(expr->asVar()->id(), id);
+ ASSERT_EQ(id, expr->asVar()->id());
}
diff --git a/compiler/nest/core/src/VarContext.test.cpp b/compiler/nest/core/src/VarContext.test.cpp
index 169bd6126..d953f2f6c 100644
--- a/compiler/nest/core/src/VarContext.test.cpp
+++ b/compiler/nest/core/src/VarContext.test.cpp
@@ -32,15 +32,15 @@ TEST(VAR_CONTEXT, count)
{
nest::VarContext ctx;
- ASSERT_EQ(ctx.count(), 0);
+ ASSERT_EQ(0, ctx.count());
auto var_0 = ctx.make();
- ASSERT_EQ(ctx.count(), 1);
+ ASSERT_EQ(1, ctx.count());
auto var_1 = ctx.make();
- ASSERT_EQ(ctx.count(), 2);
+ ASSERT_EQ(2, ctx.count());
}
TEST(VAR_CONTEXT, bound_one)
@@ -49,13 +49,13 @@ TEST(VAR_CONTEXT, bound_one)
auto var_0 = ctx.make();
- ASSERT_EQ(ctx.bound(var_0).min(), 0);
- ASSERT_EQ(ctx.bound(var_0).max(), 0);
+ ASSERT_EQ(0, ctx.bound(var_0).min());
+ ASSERT_EQ(0, ctx.bound(var_0).max());
ctx.bound(var_0) = nest::Bound{-3, 5};
- ASSERT_EQ(ctx.bound(var_0).min(), -3);
- ASSERT_EQ(ctx.bound(var_0).max(), 5);
+ ASSERT_EQ(-3, ctx.bound(var_0).min());
+ ASSERT_EQ(5, ctx.bound(var_0).max());
}
TEST(VAR_CONTEXT, bound_independent)
@@ -64,19 +64,19 @@ TEST(VAR_CONTEXT, bound_independent)
auto var_0 = ctx.make();
- ASSERT_EQ(ctx.bound(var_0).min(), 0);
- ASSERT_EQ(ctx.bound(var_0).max(), 0);
+ ASSERT_EQ(0, ctx.bound(var_0).min());
+ ASSERT_EQ(0, ctx.bound(var_0).max());
auto var_1 = ctx.make();
- ASSERT_EQ(ctx.bound(var_1).min(), 0);
- ASSERT_EQ(ctx.bound(var_1).max(), 0);
+ ASSERT_EQ(0, ctx.bound(var_1).min());
+ ASSERT_EQ(0, ctx.bound(var_1).max());
ctx.bound(var_0) = nest::Bound{-3, 5};
- ASSERT_EQ(ctx.bound(var_0).min(), -3);
- ASSERT_EQ(ctx.bound(var_0).max(), 5);
+ ASSERT_EQ(-3, ctx.bound(var_0).min());
+ ASSERT_EQ(5, ctx.bound(var_0).max());
- ASSERT_EQ(ctx.bound(var_1).min(), 0);
- ASSERT_EQ(ctx.bound(var_1).max(), 0);
+ ASSERT_EQ(0, ctx.bound(var_1).min());
+ ASSERT_EQ(0, ctx.bound(var_1).max());
}
diff --git a/compiler/nest/core/src/VarID.test.cpp b/compiler/nest/core/src/VarID.test.cpp
index e4a17a5c1..05e4b9aa9 100644
--- a/compiler/nest/core/src/VarID.test.cpp
+++ b/compiler/nest/core/src/VarID.test.cpp
@@ -22,7 +22,7 @@ TEST(VAR_ID, ctor)
{
nest::VarID id{0};
- ASSERT_EQ(id.value(), 0);
+ ASSERT_EQ(0, id.value());
}
TEST(VAR_ID, operator_eq)
diff --git a/compiler/nest/core/src/expr/AddNode.test.cpp b/compiler/nest/core/src/expr/AddNode.test.cpp
index 5c44c4743..dba6cc826 100644
--- a/compiler/nest/core/src/expr/AddNode.test.cpp
+++ b/compiler/nest/core/src/expr/AddNode.test.cpp
@@ -36,8 +36,8 @@ TEST(ADD_NODE, cast)
std::shared_ptr<nest::expr::Node> base = derived;
ASSERT_NE(derived.get(), nullptr);
- ASSERT_EQ(base->asAdd(), derived.get());
+ ASSERT_EQ(derived.get(), base->asAdd());
- ASSERT_EQ(derived->lhs().get(), left.get());
- ASSERT_EQ(derived->rhs().get(), right.get());
+ ASSERT_EQ(left.get(), derived->lhs().get());
+ ASSERT_EQ(right.get(), derived->rhs().get());
}
diff --git a/compiler/nest/core/src/expr/DerefNode.test.cpp b/compiler/nest/core/src/expr/DerefNode.test.cpp
index e02a7de0b..125d8bf1e 100644
--- a/compiler/nest/core/src/expr/DerefNode.test.cpp
+++ b/compiler/nest/core/src/expr/DerefNode.test.cpp
@@ -35,5 +35,5 @@ TEST(DEREF_NODE, cast)
std::shared_ptr<nest::expr::Node> base = derived;
ASSERT_NE(derived.get(), nullptr);
- ASSERT_EQ(base->asDeref(), derived.get());
+ ASSERT_EQ(derived.get(), base->asDeref());
}
diff --git a/compiler/nest/core/src/expr/MulNode.test.cpp b/compiler/nest/core/src/expr/MulNode.test.cpp
index b2d29471c..85cb5a56e 100644
--- a/compiler/nest/core/src/expr/MulNode.test.cpp
+++ b/compiler/nest/core/src/expr/MulNode.test.cpp
@@ -36,8 +36,8 @@ TEST(MUL_NODE, cast)
std::shared_ptr<nest::expr::Node> base = derived;
ASSERT_NE(derived.get(), nullptr);
- ASSERT_EQ(base->asMul(), derived.get());
+ ASSERT_EQ(derived.get(), base->asMul());
- ASSERT_EQ(derived->lhs().get(), left.get());
- ASSERT_EQ(derived->rhs().get(), right.get());
+ ASSERT_EQ(left.get(), derived->lhs().get());
+ ASSERT_EQ(right.get(), derived->rhs().get());
}
diff --git a/compiler/nest/core/src/expr/Subscript.test.cpp b/compiler/nest/core/src/expr/Subscript.test.cpp
index 2f187b86c..cfcdd4473 100644
--- a/compiler/nest/core/src/expr/Subscript.test.cpp
+++ b/compiler/nest/core/src/expr/Subscript.test.cpp
@@ -31,7 +31,7 @@ TEST(SUBSCRIPT, ctor)
nest::expr::Subscript sub{expr_0, expr_1};
- ASSERT_EQ(sub.rank(), 2);
- ASSERT_EQ(sub.at(0), expr_0);
- ASSERT_EQ(sub.at(1), expr_1);
+ ASSERT_EQ(2, sub.rank());
+ ASSERT_EQ(expr_0, sub.at(0));
+ ASSERT_EQ(expr_1, sub.at(1));
}
diff --git a/compiler/nest/core/src/expr/VarNode.test.cpp b/compiler/nest/core/src/expr/VarNode.test.cpp
index e8b2764e4..9400551b5 100644
--- a/compiler/nest/core/src/expr/VarNode.test.cpp
+++ b/compiler/nest/core/src/expr/VarNode.test.cpp
@@ -31,7 +31,7 @@ TEST(VAR_NODE, ctor)
auto node = make(4);
// NOTE 'id' should be copied
- ASSERT_EQ(node->id().value(), 4);
+ ASSERT_EQ(4, node->id().value());
}
TEST(VAR_NODE, cast)
@@ -43,5 +43,5 @@ TEST(VAR_NODE, cast)
// NOTE Cast method should be overrided
ASSERT_NE(derived.get(), nullptr);
- ASSERT_EQ(base->asVar(), derived.get());
+ ASSERT_EQ(derived.get(), base->asVar());
}
diff --git a/compiler/nest/core/src/stmt/PushNode.test.cpp b/compiler/nest/core/src/stmt/PushNode.test.cpp
index a54efbb54..c02c69220 100644
--- a/compiler/nest/core/src/stmt/PushNode.test.cpp
+++ b/compiler/nest/core/src/stmt/PushNode.test.cpp
@@ -33,5 +33,5 @@ TEST(STMT_PUSH_NODE, cast)
std::shared_ptr<nest::stmt::Node> base = derived;
ASSERT_NE(derived.get(), nullptr);
- ASSERT_EQ(base->asPush(), derived.get());
+ ASSERT_EQ(derived.get(), base->asPush());
}
diff --git a/compiler/nnc/CMakeLists.txt b/compiler/nnc/CMakeLists.txt
index f899ffb95..ab91cd4b6 100644
--- a/compiler/nnc/CMakeLists.txt
+++ b/compiler/nnc/CMakeLists.txt
@@ -14,7 +14,6 @@ configure_file(${NNC_ROOT_SRC_DIR}/include/Definitions.h.in
# target for compiler executable
add_executable(${NNC_TARGET_EXECUTABLE} ${NNC_DRIVER_DIR}/main.cpp ${NNC_DRIVER_DIR}/Driver.cpp ${NNC_DRIVER_DIR}/Options.cpp)
-target_link_libraries(${NNC_TARGET_EXECUTABLE} PRIVATE stdex)
# install compiler
nnc_install_executable(${NNC_TARGET_EXECUTABLE})
diff --git a/compiler/nnc/cmake/config.cmake b/compiler/nnc/cmake/config.cmake
index d9a1288dc..8623c8c11 100644
--- a/compiler/nnc/cmake/config.cmake
+++ b/compiler/nnc/cmake/config.cmake
@@ -21,7 +21,8 @@ set(NNC_INSTALL_LIB_PATH ${NNC_INSTALL_PATH}/lib) # directory that contains othe
#
# find necessary packages
#
-find_package(HDF5 COMPONENTS CXX QUIET)
+nnas_find_package(HDF5 QUIET)
+
# defines if hdf5 package was found
if(HDF5_FOUND)
set(NNC_HDF5_SUPPORTED ON)
diff --git a/compiler/nnc/driver/Driver.cpp b/compiler/nnc/driver/Driver.cpp
index 5b369623e..995fa9bad 100644
--- a/compiler/nnc/driver/Driver.cpp
+++ b/compiler/nnc/driver/Driver.cpp
@@ -49,7 +49,7 @@
#include <ONNXImporterImpl.h>
#endif // NNC_FRONTEND_ONNX_ENABLED
-#include <stdex/Memory.h>
+#include <memory>
namespace nnc
{
@@ -169,19 +169,19 @@ void Driver::registerBackendSpecificPasses()
if (cli::target == NNC_TARGET_ARM_CPP || cli::target == NNC_TARGET_X86_CPP)
{
- _passManager.registerPass(stdex::make_unique<LowerConv2D>());
- _passManager.registerPass(stdex::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
+ _passManager.registerPass(std::make_unique<LowerConv2D>());
+ _passManager.registerPass(std::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
}
else if (cli::target == NNC_TARGET_ARM_GPU_CPP)
{
- _passManager.registerPass(stdex::make_unique<LowerConv2D>());
- _passManager.registerPass(stdex::make_unique<ConstantFoldTranspose>());
+ _passManager.registerPass(std::make_unique<LowerConv2D>());
+ _passManager.registerPass(std::make_unique<ConstantFoldTranspose>());
// TODO Change to DataFormat::NCHW when fix it in ACL
- _passManager.registerPass(stdex::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
+ _passManager.registerPass(std::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
}
else if (cli::target == NNC_TARGET_INTERPRETER)
{
- _passManager.registerPass(stdex::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
+ _passManager.registerPass(std::make_unique<DataFormatSwitcher>(mir::DataFormat::NHWC));
}
else
{
diff --git a/compiler/nnc/requires.cmake b/compiler/nnc/requires.cmake
index 8b460d962..2fcaea4d5 100644
--- a/compiler/nnc/requires.cmake
+++ b/compiler/nnc/requires.cmake
@@ -1,7 +1,3 @@
require("adtidas")
-require("mir-caffe2-importer")
-require("mir-caffe-importer")
-require("mir-onnx-importer")
+require("mir")
require("mir-interpreter")
-require("mir-tflite-importer")
-require("stdex")
diff --git a/compiler/nnc/tests/acl_soft_backend/CMakeLists.txt b/compiler/nnc/tests/acl_soft_backend/CMakeLists.txt
index b33c1e66f..69a3be882 100644
--- a/compiler/nnc/tests/acl_soft_backend/CMakeLists.txt
+++ b/compiler/nnc/tests/acl_soft_backend/CMakeLists.txt
@@ -29,7 +29,8 @@ if(NOT DEFINED ENV{ODROID_H5_DIR})
return()
endif()
-find_package(HDF5 COMPONENTS CXX REQUIRED)
+nnas_find_package(HDF5 QUIET)
+
nnas_find_package(GTest REQUIRED)
# Provide the test suite with the information where to locate executalbes to run etc.
diff --git a/compiler/nnc/tests/acl_soft_backend/artifact_cmake/CMakeLists.txt b/compiler/nnc/tests/acl_soft_backend/artifact_cmake/CMakeLists.txt
index ceecded9e..b0a805a30 100644
--- a/compiler/nnc/tests/acl_soft_backend/artifact_cmake/CMakeLists.txt
+++ b/compiler/nnc/tests/acl_soft_backend/artifact_cmake/CMakeLists.txt
@@ -11,7 +11,8 @@ set(ODROID_ACL_BUILD_DIR ${ODROID_ACL_DIR}/build)
find_library(OPEN_CL OpenCL /usr/lib/arm-linux-gnueabihf)
find_library(ARM_COMPUTE arm_compute PATHS ${ODROID_ACL_BUILD_DIR})
find_library(ARM_COMPUTE_CORE arm_compute_core PATHS ${ODROID_ACL_BUILD_DIR})
-find_package(HDF5 COMPONENTS CXX REQUIRED)
+nnas_find_package(HDF5 QUIET)
+
add_executable(nnc_test main.cpp AclArtifact.cpp)
diff --git a/compiler/nnc/utils/caffe_model_maker/AllFill.sh b/compiler/nnc/utils/caffe_model_maker/AllFill.sh
deleted file mode 100755
index 93e38d1d7..000000000
--- a/compiler/nnc/utils/caffe_model_maker/AllFill.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/sh
-: '
-Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'
-
-
-#Fills all models and writes errors
-usage () {
- echo "Filler.sh should be in the working directory\nusage:
- no args - assumes current directory
- -d=<dir> fills models in <dir>
- Example:
- $(basename $0) -d='./foobar/'"
-}
-
-DIR="./"
-for i in "$@"
-do
- case $i in
- -h|--help|help)
- usage
- exit 1
- ;;
- -d=*)
- DIR=${i#*=}
- ;;
- esac
- shift
-done
-echo $DIR
-if [ $# -eq 0 ]; then
- echo "Assume working directory"
-fi
-for a in `ls $DIR*.prototxt`; do
- ./Filler.sh $a
-done 2>error.log
diff --git a/compiler/nnc/utils/caffe_model_maker/Filler.sh b/compiler/nnc/utils/caffe_model_maker/Filler.sh
deleted file mode 100755
index 963edbfb3..000000000
--- a/compiler/nnc/utils/caffe_model_maker/Filler.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-: '
-Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'
-
-#Fills $1 with random weights
-if [ $# -eq 0 ]
- then
- echo "usage:\n $(basename $0) foo.prototxt"
- exit 1
-fi
-FN=$1
-NOEXT=${FN%%.*} # filename without the extension
-mkdir $NOEXT
-caffegen init < $FN > $NOEXT/filled.prototxt
-caffegen encode < $NOEXT/filled.prototxt > $NOEXT/model.caffemodel
diff --git a/compiler/nnc/utils/caffe_model_maker/GenerateCaffeModels.py b/compiler/nnc/utils/caffe_model_maker/GenerateCaffeModels.py
deleted file mode 100755
index ca8b3776a..000000000
--- a/compiler/nnc/utils/caffe_model_maker/GenerateCaffeModels.py
+++ /dev/null
@@ -1,722 +0,0 @@
-#!/usr/bin/python3
-"""
-Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import caffe
-import numpy as np
-import sys
-import h5py
-from itertools import chain
-from caffe import layers as L
-import random
-import lmdb
-from collections import Counter, OrderedDict
-
-if (len(sys.argv) < 2):
- dest_folder = ''
- print('Using current directory as destination folder')
-else:
- dest_folder = sys.argv[1] + '/'
-
-
-class PH:
- """
- PlaceHolder value
- """
-
- def __init__(self, type, param):
- self.type = type
- self.param = param
-
-
-# Bookkeeping
-LS = 224
-# bynaryProto file for Infogain
-H = np.eye(3, dtype='f4')
-blob = caffe.io.array_to_blobproto(H.reshape((1, 1, 3, 3)))
-with open(dest_folder + 'infogainH.binaryproto', 'wb+') as f:
- f.write(blob.SerializeToString())
-
-# List of hdf5 files
-with open(dest_folder + "in", 'w+') as f:
- f.write('in.hdf5')
-
-#Window File
-with open(dest_folder + "in_winds", 'w+') as f:
- f.write("""# 1
-in.jpg
-3
-224
-224
-2
-1 0.1 50 50 60 70
-1 0.9 30 30 50 50
-# 2
-in.jpg
-3
-224
-224
-2
-1 0.1 50 50 70 70
-1 0.9 30 30 50 50
-""")
-
-# HDF5 file for HDF5DataSet
-h5f = h5py.File(dest_folder + "in.hdf5", "w")
-h5f.create_dataset("data", data=np.random.rand(1, 3, LS, LS))
-h5f.close()
-
-# LMDB file
-env = lmdb.open(dest_folder + 'test-lmdb')
-with env.begin(write=True) as txn:
- img_data = np.random.rand(3, LS, LS)
- datum = caffe.io.array_to_datum(img_data, label=1)
- txn.put('{:0>10d}'.format(1).encode('ascii'), datum.SerializeToString())
-env.close()
-
-# recurring parameters
-losspara = {'ignore_label': True, 'normalization': 1, 'normalize': True}
-softmaxpara = {'engine': 0, 'axis': 1}
-gdfil = {'type': 'gaussian', 'std': 0.001}
-cofil = {'type': 'constant', 'value': 0}
-rp = {
- 'num_output': 1,
- 'weight_filler': gdfil,
- 'bias_filler': cofil,
- 'expose_hidden': True
-}
-
-filler_par = {
- 'type': 'constant',
- 'value': 0,
- 'min': 0,
- 'max': 1,
- 'mean': 0,
- 'std': 1,
- 'sparse': -1, # -1 means no sparsification
- 'variance_norm': 0
-} # 0 = FAN_IN, 1 = FAN_OUT, 2 = AVERAGE
-
-OPS = [
- ('Parameter', {
- 'shape': {
- 'dim': [1]
- },
- "is_data": True
- }), # ok
- (
- 'Data',
- {
- 'source': 'test-lmdb', # FIXME: unknown DB backend
- 'batch_size': 1,
- 'rand_skip': 0,
- 'backend': 1, # 0 = LEVELDB, 1 = LMDB
- 'scale': 1.0, # deprecated in favor of TransformationParameter
- 'mean_file': 'wtf.is_that',
- 'crop_size': 0,
- 'mirror': False,
- 'force_encoded_color': False,
- 'prefetch': 4,
- "is_data": True
- }),
- (
- 'DummyData',
- {
- 'data_filler': cofil, # ok
- #'num' : [1,1,1], # deprecated shape specification
- #'channels' : [2,2,2],
- #'height' : [3,3,3],
- #'width' : [4,4,4]},
- 'shape': {
- 'dim': [1, 3, LS, LS]
- },
- "is_data": True
- }),
- (
- 'ImageData',
- {
- 'source': 'in_imgs', # file with list of imgs
- 'top': 'op2',
- 'batch_size': 1,
- 'rand_skip': 0,
- 'shuffle': False,
- 'new_height': 0,
- 'new_width': 0,
- 'is_color': True,
- 'root_folder': '',
- 'scale': 1.0, # deprecated in favor of TransformationParameter
- 'mirror': False,
- "is_data": True
- }),
- (
- 'WindowData',
- {
- 'source': 'in_winds',
- 'top': 'op2',
- 'batch_size': 1,
- 'mean_file': 'in.jpg',
- 'transform_param': {
- 'scale': 0.8,
- 'crop_size': 24,
- 'mirror': False,
- #'fg_treshold' : 0.5,
- #'bg_treshold' : 0.5,
- #'fg_fraction' : 0.25,
- },
- 'context_pad': 1,
- 'crop_mode': 'warp',
- 'cache_images': True,
- 'root_folder': './',
- "is_data": True
- }),
- (
- 'HDF5Data',
- {
- 'source': 'in', # This is the name of the file WITH HDF5 FILENAMES 0_0
- # Top should have the same name as the dataset in the hdf5 file
- # FIXME Requires Caffegen to be built with Caffe that supports LMDB
- 'batch_size': 1,
- 'shuffle': False,
- "is_data": True
- }),
- ('Input', {
- 'shape': {
- 'dim': [1, 2, 3, 4]
- },
- "is_data": True
- }), # ok
- (
- 'MemoryData',
- {
- 'batch_size': 1, # ok
- 'channels': 2,
- 'height': 3,
- 'width': 4,
- 'top': "foo",
- "is_data": True
- }),
-
- ## Regular OPS
- (
- "Convolution",
- {
- 'num_output': 64, # ok
- 'kernel_size': 9,
- 'stride': 1,
- 'pad': 0,
- 'weight_filler': gdfil,
- 'param': [{
- 'lr_mult': 1
- }, {
- 'lr_mult': 0.1
- }],
- 'bias_filler': cofil
- }),
-
- # Depthvise conv
- (
- "Convolution",
- {
- 'num_output': 12, # ok
- 'kernel_size': 9,
- 'stride': 1,
- 'dilation': 2,
- 'group': 3,
- 'pad': 0,
- 'weight_filler': gdfil,
- 'param': [{
- 'lr_mult': 1
- }, {
- 'lr_mult': 0.1
- }],
- 'bias_filler': cofil
- }),
- (
- "Deconvolution",
- {
- 'convolution_param': # ok
- {
- 'num_output': 4,
- 'kernel_size': 9,
- 'stride': 1,
- 'pad': 0,
- 'weight_filler': gdfil,
- 'bias_filler': cofil
- }
- }),
- # Depthvise deconv
- (
- "Deconvolution",
- {
- 'convolution_param': # ok
- {
- 'num_output': 12,
- 'kernel_size': 9,
- 'stride': 1,
- 'dilation': 2,
- 'group': 3,
- 'pad': 0,
- 'weight_filler': gdfil,
- 'bias_filler': cofil
- }
- }),
- (
- 'BatchNorm',
- {
- 'eps': 1e-5, # ok
- 'moving_average_fraction': 0.999
- }),
- (
- 'LRN',
- {
- 'alpha': 1., # ok
- 'beta': 0.75,
- 'norm_region': 1,
- 'local_size': 5,
- 'k': 1,
- 'engine': 0
- }),
- # local_size[default 5]: the number of channels to sum over
- # alpha[default 1]: the scaling paramete
- # beta[default5]: the exponent
- # norm_region[default ACROSS_CHANNLS]: whether to sum over adjacent channels(ACROSS_CHANNLS) or nearby
- # spatial locations(WITHIN_CHANNLS)
- # `input / (1 + (\alpha/n) \sum_i x_i^2)^\beta`
- (
- "MVN",
- {
- 'normalize_variance': True, # ok
- 'across_channels': False,
- 'eps': 1e-9
- }),
- (
- 'Im2col',
- {
- 'convolution_param': # ok
- {
- 'num_output': 64,
- 'kernel_size': 9,
- 'stride': 1,
- 'pad': 0,
- 'weight_filler': gdfil,
- # 'param' : [{'lr_mult':1},{'lr_mult':0.1}],
- 'bias_filler': cofil
- }
- }),
- ('Dropout', {
- 'dropout_ratio': 0.5
- }), # ok
- ('Split', {}), # ok
- ('Concat', {
- 'axis': 1
- }), # ok
- (
- 'Tile',
- {
- 'axis': 1, # ok
- 'tiles': 2
- }),
- ('Slice', {
- 'axis': 1,
- 'top': 'op2',
- 'slice_point': 1
- }),
- (
- 'Reshape',
- {
- 'shape': {
- 'dim': [1, 0, -1]
- }, # ok
- 'axis': 0,
- 'num_axes': -1
- }),
- # reshapes only [axis, axis + num_axes] if those aren't 0 and -1; axis can be negative
- # 0 in shape means retaining dim size, -1 means auto size
- (
- 'Flatten',
- {
- 'axis': 1, # ok
- 'end_axis': -1
- }),
- (
- 'Pooling',
- {
- 'pool': 0, # ok # pool: 0 = MAX, 1 = AVE, 2 = STOCHASTIC
- 'pad': 0, # can be replaced with pad_w, pad_h
- 'kernel_size': 3, # can be replaced with kernel_w, kernel_h
- 'stride': 1, # can be replaced with stride_w, stride_h
- 'engine': 0,
- 'global_pooling': False
- }),
- # 'round_mode' : 0}), # 0 = CELS, 1 = FLOOR
- (
- 'Reduction',
- {
- 'operation': 1, # ok # 1 = SUM, 2 = ASUM, 3 = SUMSQ, 4 = MEAN # ok
- 'axis': 0,
- 'coeff': 1.0
- }),
- (
- 'SPP',
- {
- 'pyramid_height': 1, # ok
- 'pool': 0,
- 'engine': 0
- }),
- (
- 'InnerProduct',
- {
- 'num_output': 2, # ok
- 'bias_term': True,
- 'weight_filler': filler_par,
- 'bias_filler': filler_par,
- 'axis': 1,
- 'transpose': False
- }),
- (
- 'Embed',
- {
- 'num_output': 2, # ok
- 'input_dim': 1,
- 'bias_term': True,
- 'weight_filler': filler_par,
- 'bias_filler': filler_par
- }),
- (
- 'ArgMax',
- {
- 'out_max_val': False, # ok # if True, outputs pairs (argmax, maxval) # ok
- 'top_k': 1,
- 'axis': -1
- }),
- (
- 'Softmax',
- {
- 'engine': 0, # ok
- 'axis': 1
- }),
- (
- 'ReLU',
- {
- 'negative_slope': 0, # ok
- 'engine': 0
- }),
- (
- 'PReLU',
- {
- 'filler': filler_par, # ok
- 'channel_shared': False
- }),
- ('ELU', {
- 'alpha': 1
- }), # ok
- ('Sigmoid', {
- 'engine': 0
- }), # ok
- ('BNLL', {}), # ok
- ('TanH', {
- 'engine': 0
- }), # ok
- ('Threshold', {
- 'threshold': 0
- }), # ok
- (
- 'Bias',
- {
- 'axis': 0, # ok
- 'num_axes': -1,
- 'filler': filler_par
- }),
- (
- 'Scale',
- {
- 'axis': 0, # ok
- 'num_axes': -1,
- 'filler': filler_par,
- 'bias_term': False,
- 'bias_filler': filler_par
- }),
- ('AbsVal', {}), # ok
- (
- 'Log',
- {
- 'base': -1.0, # ok
- 'scale': 1.0,
- 'shift': PH(float, (2.0, 10.0)),
- 'how_many': 10
- }), # y = ln(shift + scale * x) (log_base() for base > 0)
- (
- 'Power',
- {
- 'power': -1.0, # ok
- 'scale': 1.0,
- 'shift': 0.0
- }), # y = (shift + scale * x) ^ power
- (
- 'Exp',
- {
- 'base': -1.0, # ok
- 'scale': 1.0,
- 'shift': 0.0
- }),
-
- ## TWO INPUTS
- (
- 'Crop',
- {
- 'axis': 2, # ok
- 'offset': [0],
- "inputs": 2
- }), # if one offset - for all dims, more - specifies
- (
- "Eltwise",
- {
- 'operation': 1, # ok
- 'coeff': [3, 3],
- 'stable_prod_grad': True,
- "inputs": 2
- }),
- ("EuclideanLoss", {
- "inputs": 2
- }), # ok
- ("HingeLoss", {
- 'norm': 1,
- "inputs": 2
- }), # L1 = 1; L2 = 2; # ok
- ("SigmoidCrossEntropyLoss", {
- 'loss_param': losspara,
- "inputs": 2
- }), # ok
-
- ## TWO Inputs, special shape
- (
- "Accuracy",
- {
- 'top_k': 1, # FIXME: different bottom shapes needed
- 'axis': 0,
- 'ignore_label': 0,
- "inputs": 2,
- "special_shape": [1, 3, 1, 1]
- }),
- (
- "SoftmaxWithLoss",
- {
- 'loss_param': losspara, # FIXME: different bottom shapes needed
- 'softmax_param': softmaxpara,
- "inputs": 2,
- "special_shape": [1, 1, 1, 1]
- }),
- ("MultinomialLogisticLoss", {
- 'loss_param': losspara,
- "inputs": 2,
- "special_shape": [1, 1, 1, 1]
- }), # FIXME: different bottom shapes needed
- ("Filter", {
- "inputs": 2,
- "special_shape": [1, 1, 1, 1]
- }), # FIXME: different bottom shapes needed
- ('BatchReindex', {
- "inputs": 2,
- "special_shape": [2]
- }), # takes indices as second blob
- ("InfogainLoss", {
- 'source': 'infogainH.binaryproto',
- 'axis': 1,
- "inputs": 2,
- "special_shape": [1, 1, 1, 1]
- }),
- (
- 'Python',
- {
- 'python_param': # Custom Loss layer
- {
- 'module': 'Pyloss', # the module name -- usually the filename -- that needs to be in $PYTHONPATH
- 'layer': 'EuclideanLossLayer', # the layer name -- the class name in the module
- 'share_in_parallel': False
- },
- # set loss weight so Caffe knows this is a loss layer.
- # since PythonLayer inherits directly from Layer, this isn't automatically
- # known to Caffe
- 'loss_weight': 1,
- "inputs": 2,
- "special_shape": [1, 3, 1, 1]
- },
- ),
-
- ## NOTOP OPS
- ('HDF5Output', {
- 'file_name': 'out.hdf5',
- "inputs": 2,
- "is_notop": True
- }), # ok
- ('Silence', {
- "inputs": 2,
- "is_notop": True
- }), # ok, need to remove tops
-
- ## THREE INPUTS
- ("RNN", {
- 'recurrent_param': rp,
- 'top': "out2",
- "inputs": 3
- }), # ok
- ("Recurrent", {
- 'recurrent_param': rp,
- 'top': "out2",
- "inputs": 3
- }), # ok
-
- ## FOUR INPUTS
- ("LSTM", {
- 'recurrent_param': rp,
- 'top': ["out2", "out3"],
- "inputs": 4
- }), # ok
-
- ## Handled explicitly (special case)
- ("ContrastiveLoss", {
- 'margin': 1.0,
- 'legacy_version': False
- }),
-]
-
-#Helper functions
-
-
-def traverse(obj, callback=None):
- """
- walks a nested dict/list recursively
- :param obj:
- :param callback:
- :return:
- """
- if isinstance(obj, dict):
- value = {k: traverse(v, callback) for k, v in obj.items()}
- elif isinstance(obj, list):
- value = [traverse(elem, callback) for elem in obj]
- else:
- value = obj
-
- if callback is None:
- return value
- else:
- return callback(value)
-
-
-def mock(inp):
- if not (isinstance(inp, PH)): return inp
- if inp.type == int:
- return random.randint(*inp.param)
- if inp.type == float:
- return random.uniform(*inp.param)
-
-
-EXTRA_SHAPES = \
- [(), # alredy defined
- [1, 3],
- [1, 3, 1],
- [1, 3, 1]]
-
-
-class Layer:
- """
- Represents a caffe layer
- """
-
- def __init__(self, name, params):
- self.name = name
- self.args = params
- if self.args == None: self.args = dict()
- self.num_inp = self.args.pop("inputs", 1)
- self.num_out = self.args.pop("outputs", 1)
- self.special_shape = self.args.pop("special_shape",
- False) # 2nd input has special shape
- self.is_data = self.args.pop("is_data", False)
- self.is_notop = self.args.pop("is_notop", False)
-
- def make_net(self):
- """
- Creates a protobuf network
- :return:
- """
- net = caffe.NetSpec()
-
- if self.is_data:
- net.data = getattr(L, self.name)(**self.args)
-
- # Very special,
- elif self.name == "ContrastiveLoss":
- net.data = L.Input(shape={'dim': [1, 4]})
- net.data1 = L.DummyData(data_filler=cofil, shape={'dim': [1, 4]})
- net.data2 = L.DummyData(data_filler=cofil, shape={'dim': [1, 1]})
-
- net.op = getattr(L, self.name)(net.data, net.data1, net.data2, **self.args)
-
- # this covers most cases
- else:
- net.data = L.Input(shape={'dim': [1, 3, LS, LS]})
- if self.num_inp == 2:
- net.data1 = L.DummyData(data_filler=cofil, shape={'dim': [1, 3, LS, LS]})
- elif self.num_inp > 2:
- for i in range(1, self.num_inp):
- setattr(
- net, "data" + str(i),
- L.DummyData(data_filler=cofil, shape={'dim': EXTRA_SHAPES[i]}))
- if self.special_shape:
- net.data = L.Input(shape={'dim': [1, 3, 1, 1]})
- net.data1 = L.DummyData(
- data_filler=cofil, shape={'dim': self.special_shape})
-
- net.op = getattr(L, self.name)(
- net.data,
- *[getattr(net, "data" + str(i))
- for i in range(1, self.num_inp)], **self.args)
-
- if self.is_notop:
- net.op.fn.tops = OrderedDict()
- net.op.fn.ntop = 0 # the messing about in question
-
- return net
-
-
-class LayerMaker:
- """
- Factory class for Layer
- """
-
- def __init__(self, params):
- self.name, self.args = params
- self.how_many = self.args.pop("how_many", 1)
-
- def make(self):
- return [Layer(self.name, traverse(self.args, mock)) for i in range(self.how_many)]
-
-
-layer_gen = chain(*map(lambda para: LayerMaker(para).make(), OPS))
-
-filename = dest_folder + '{}_{}.prototxt'
-
-counter = Counter()
-for layer in layer_gen:
- n = layer.make_net()
- counter[layer.name] += 1
-
- with open(filename.format(layer.name, counter[layer.name] - 1), 'w+') as ptxt_file:
- print(n.to_proto(), file=ptxt_file)
-
- if layer.name == "Python": # Special case for python layer
- with open("Python_0.caffemodel", 'wb+') as caffemodelFile:
- caffemodelFile.write(n.to_proto().SerializeToString())
diff --git a/compiler/nnc/utils/caffe_model_maker/Pyloss.py b/compiler/nnc/utils/caffe_model_maker/Pyloss.py
deleted file mode 100755
index e3f781759..000000000
--- a/compiler/nnc/utils/caffe_model_maker/Pyloss.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-COPYRIGHT
-
-All contributions by the University of California:
-Copyright (c) 2014-2017 The Regents of the University of California (Regents)
-All rights reserved.
-
-All other contributions:
-Copyright (c) 2014-2017, the respective contributors
-All rights reserved.
-
-Caffe uses a shared copyright model: each contributor holds copyright over
-their contributions to Caffe. The project versioning records all such
-contribution and copyright details. If a contributor wants to further mark
-their specific copyright on a particular contribution, they should indicate
-their copyright solely in the commit message of the change when it is
-committed.
-
-LICENSE
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-CONTRIBUTION AGREEMENT
-
-By contributing to the BVLC/caffe repository through pull-request, comment,
-or otherwise, the contributor releases their content to the
-license and copyright terms herein.
-"""
-import caffe
-import numpy as np
-
-
-class EuclideanLossLayer(caffe.Layer):
- """
- Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
- to demonstrate the class interface for developing layers in Python.
- """
-
- def setup(self, bottom, top):
- # check input pair
- if len(bottom) != 2:
- raise Exception("Need two inputs to compute distance.")
-
- def reshape(self, bottom, top):
- # check input dimensions match
- if bottom[0].count != bottom[1].count:
- raise Exception("Inputs must have the same dimension.")
- # difference is shape of inputs
- self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
- # loss output is scalar
- top[0].reshape(1)
-
- def forward(self, bottom, top):
- self.diff[...] = bottom[0].data - bottom[1].data
- top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
-
- def backward(self, top, propagate_down, bottom):
- for i in range(2):
- if not propagate_down[i]:
- continue
- if i == 0:
- sign = 1
- else:
- sign = -1
- bottom[i].diff[...] = sign * self.diff / bottom[i].num
diff --git a/compiler/nnc/utils/caffe_model_maker/README.md b/compiler/nnc/utils/caffe_model_maker/README.md
deleted file mode 100644
index e34a769a0..000000000
--- a/compiler/nnc/utils/caffe_model_maker/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Utils
-Caffe model generation helpers
-
-REQUIRES:
-
-* caffe
-* h5py
-* lmdb
-* numpy
-* caffegen in `$PATH`
-
-`GenerateCaffeModels.py` creates `*.prototxt` files for 1 and 2 layer caffe models
-The generator can create multiple examples of any layer, assuming you add a
-`how_many` field into the layer's dict. You will also need to replace the constants in said dict with `PH(type, param)` values, where `type` is the type of the placeholder variable
-and `params` is a list (or tuple) of paramenters for generating the mock.
-
-For an example of generating multiple instances of a layer see the `Log` layer.
-
-`Filler.sh` fills a single model with random weights by using `caffegen` and creates a dir with a filled `prototxt` and a `caffemodel` binary file. The result directory is located in the same directory as the `prototxt` file
-
-`AllFill.sh` fills all `*.prototxt` files in the current directory or in provided directory
-(-d)
diff --git a/compiler/nnkit-caffe/README.md b/compiler/nnkit-caffe/README.md
new file mode 100644
index 000000000..c5d8bb291
--- /dev/null
+++ b/compiler/nnkit-caffe/README.md
@@ -0,0 +1 @@
+# nnkit-caffe
diff --git a/compiler/nnkit-mocotf/README.md b/compiler/nnkit-mocotf/README.md
new file mode 100644
index 000000000..e81692ca8
--- /dev/null
+++ b/compiler/nnkit-mocotf/README.md
@@ -0,0 +1 @@
+# nnkit-mocotf
diff --git a/compiler/nnkit-onnxrt/README.md b/compiler/nnkit-onnxrt/README.md
new file mode 100644
index 000000000..ccc948ce3
--- /dev/null
+++ b/compiler/nnkit-onnxrt/README.md
@@ -0,0 +1 @@
+# nnkit-onnxrt
diff --git a/compiler/nnkit-tf/README.md b/compiler/nnkit-tf/README.md
new file mode 100644
index 000000000..82d279bb8
--- /dev/null
+++ b/compiler/nnkit-tf/README.md
@@ -0,0 +1 @@
+# nnkit-tf
diff --git a/compiler/nnkit-tflite/CMakeLists.txt b/compiler/nnkit-tflite/CMakeLists.txt
index d1dbbd772..2ca9a13b8 100644
--- a/compiler/nnkit-tflite/CMakeLists.txt
+++ b/compiler/nnkit-tflite/CMakeLists.txt
@@ -1,4 +1,4 @@
-nnas_find_package(TensorFlowLite QUIET EXACT 1.12)
+nnas_find_package(TensorFlowLite QUIET EXACT 1.13.1)
if(NOT TensorFlowLite_FOUND)
return()
diff --git a/compiler/nnkit-tflite/README.md b/compiler/nnkit-tflite/README.md
new file mode 100644
index 000000000..6d294f6b0
--- /dev/null
+++ b/compiler/nnkit-tflite/README.md
@@ -0,0 +1 @@
+# nnkit-tflite
diff --git a/compiler/nnkit-tflite/backend/Backend.cpp b/compiler/nnkit-tflite/backend/Backend.cpp
index 7d766063e..08ba338e8 100644
--- a/compiler/nnkit-tflite/backend/Backend.cpp
+++ b/compiler/nnkit-tflite/backend/Backend.cpp
@@ -16,8 +16,8 @@
#include "nnkit/support/tflite/AbstractBackend.h"
-#include <tensorflow/contrib/lite/kernels/register.h>
-#include <tensorflow/contrib/lite/model.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
#include <stdexcept>
diff --git a/compiler/nnkit-tflite/support/CMakeLists.txt b/compiler/nnkit-tflite/support/CMakeLists.txt
index 0a3e2fbe8..90d694868 100644
--- a/compiler/nnkit-tflite/support/CMakeLists.txt
+++ b/compiler/nnkit-tflite/support/CMakeLists.txt
@@ -1,10 +1,10 @@
file(GLOB_RECURSE SOURCES "src/*.cpp")
-# TODO Rename nnkit_support_tflite-1.12 as nnkit_tflite_support-1.12
-add_library(nnkit_support_tflite-1.12 STATIC ${SOURCES})
-set_target_properties(nnkit_support_tflite-1.12 PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(nnkit_support_tflite-1.12 PUBLIC include)
-target_link_libraries(nnkit_support_tflite-1.12 nnkit_intf_backend)
-target_link_libraries(nnkit_support_tflite-1.12 tensorflowlite-1.12)
+# TODO Rename nnkit_support_tflite-1.13.1 as nnkit_tflite_support-1.13.1
+add_library(nnkit_support_tflite-1.13.1 STATIC ${SOURCES})
+set_target_properties(nnkit_support_tflite-1.13.1 PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnkit_support_tflite-1.13.1 PUBLIC include)
+target_link_libraries(nnkit_support_tflite-1.13.1 nnkit_intf_backend)
+target_link_libraries(nnkit_support_tflite-1.13.1 tensorflowlite-1.13.1)
-add_library(nnkit_support_tflite ALIAS nnkit_support_tflite-1.12)
+add_library(nnkit_support_tflite ALIAS nnkit_support_tflite-1.13.1)
diff --git a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/AbstractBackend.h b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/AbstractBackend.h
index d2f6aa9f3..7b89c7f42 100644
--- a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/AbstractBackend.h
+++ b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/AbstractBackend.h
@@ -18,7 +18,7 @@
#define __NNKIT_SUPPORT_TFLITE_ABSTRACT_BACKEND_H__
#include <nnkit/Backend.h>
-#include <tensorflow/contrib/lite/interpreter.h>
+#include <tensorflow/lite/interpreter.h>
namespace nnkit
{
diff --git a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSet.h b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSet.h
index d28ab6e77..4226a3ac4 100644
--- a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSet.h
+++ b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSet.h
@@ -17,7 +17,7 @@
#ifndef __NNKIT_SUPPORT_TFLITE_TENSOR_SET_H__
#define __NNKIT_SUPPORT_TFLITE_TENSOR_SET_H__
-#include <tensorflow/contrib/lite/context.h>
+#include <tensorflow/lite/context.h>
#include <cstdint>
diff --git a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSets.h b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSets.h
index 570803117..13dea981e 100644
--- a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSets.h
+++ b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorSets.h
@@ -19,7 +19,7 @@
#include "nnkit/support/tflite/TensorSet.h"
-#include <tensorflow/contrib/lite/interpreter.h>
+#include <tensorflow/lite/interpreter.h>
namespace nnkit
{
diff --git a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorUtils.h b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorUtils.h
index 05fb7d58c..20b34a3c2 100644
--- a/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorUtils.h
+++ b/compiler/nnkit-tflite/support/include/nnkit/support/tflite/TensorUtils.h
@@ -17,7 +17,7 @@
#ifndef __NNKIT_SUPPORT_TENSOR_UTILS_H__
#define __NNKIT_SUPPORT_TENSOR_UTILS_H__
-#include <tensorflow/contrib/lite/context.h>
+#include <tensorflow/lite/context.h>
#include <nncc/core/ADT/tensor/Shape.h>
namespace nnkit
diff --git a/compiler/nnkit/actions/HDF5/CMakeLists.txt b/compiler/nnkit/actions/HDF5/CMakeLists.txt
index b799f6df1..63d3320c5 100644
--- a/compiler/nnkit/actions/HDF5/CMakeLists.txt
+++ b/compiler/nnkit/actions/HDF5/CMakeLists.txt
@@ -1,4 +1,4 @@
-find_package(HDF5 COMPONENTS CXX QUIET)
+nnas_find_package(HDF5 QUIET)
if(NOT HDF5_FOUND)
return()
diff --git a/compiler/nnop/README.md b/compiler/nnop/README.md
new file mode 100644
index 000000000..89edf81b4
--- /dev/null
+++ b/compiler/nnop/README.md
@@ -0,0 +1 @@
+# nnop
diff --git a/compiler/nnsuite/README.md b/compiler/nnsuite/README.md
new file mode 100644
index 000000000..d3b2828ed
--- /dev/null
+++ b/compiler/nnsuite/README.md
@@ -0,0 +1 @@
+# nnsuite
diff --git a/compiler/one-cmds/CMakeLists.txt b/compiler/one-cmds/CMakeLists.txt
new file mode 100644
index 000000000..7d73d9b23
--- /dev/null
+++ b/compiler/one-cmds/CMakeLists.txt
@@ -0,0 +1,44 @@
+set(ONE_COMMAND_FILES
+ one-import
+ one-import-tf
+ one-import-tflite
+ one-optimize
+ one-quantize
+ one-pack
+ one-codegen
+ one-prepare-venv
+)
+
+foreach(ONE_COMMAND IN ITEMS ${ONE_COMMAND_FILES})
+
+ set(ONE_COMMAND_FILE ${ONE_COMMAND})
+ set(ONE_COMMAND_SRC "${CMAKE_CURRENT_SOURCE_DIR}/${ONE_COMMAND_FILE}")
+ set(ONE_COMMAND_BIN "${CMAKE_CURRENT_BINARY_DIR}/${ONE_COMMAND_FILE}")
+ set(ONE_COMMAND_TARGET "${ONE_COMMAND}_target")
+
+ add_custom_command(OUTPUT ${ONE_COMMAND_BIN}
+ COMMAND ${CMAKE_COMMAND} -E copy "${ONE_COMMAND_SRC}" "${ONE_COMMAND_BIN}"
+ DEPENDS ${ONE_COMMAND_SRC}
+ COMMENT "Generate ${ONE_COMMAND_BIN}"
+ )
+
+ add_custom_target(${ONE_COMMAND_TARGET} ALL DEPENDS ${ONE_COMMAND_BIN})
+
+ install(FILES ${ONE_COMMAND}
+ PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE
+ GROUP_READ GROUP_WRITE GROUP_EXECUTE
+ WORLD_READ WORLD_EXECUTE
+ DESTINATION bin)
+
+endforeach(ONE_COMMAND)
+
+set(ONE_DOCUMENT_FILES
+ how-to-use-one-commands.txt
+ how-to-prepare-virtualenv.txt
+)
+
+foreach(ONE_DOCUMENT IN ITEMS ${ONE_DOCUMENT_FILES})
+
+ install(FILES ${ONE_DOCUMENT} DESTINATION doc)
+
+endforeach(ONE_DOCUMENT)
diff --git a/compiler/one-cmds/README.md b/compiler/one-cmds/README.md
new file mode 100644
index 000000000..2ce6eb42b
--- /dev/null
+++ b/compiler/one-cmds/README.md
@@ -0,0 +1,3 @@
+# one-cmds
+
+_one-cmds_ provides user commands driver
diff --git a/compiler/one-cmds/how-to-prepare-virtualenv.txt b/compiler/one-cmds/how-to-prepare-virtualenv.txt
new file mode 100644
index 000000000..41fff3aaf
--- /dev/null
+++ b/compiler/one-cmds/how-to-prepare-virtualenv.txt
@@ -0,0 +1,37 @@
+About
+-----
+
+Last update: 2020-07-14
+
+This document explains about 'one-prepare-venv' command.
+
+'one-prepare-venv' will prepare python3 virtual environment with tensorflow-cpu
+version 2.3.0rc0, recommanded 2.x version as of now, so that 'one-import-tf'
+command can execute properly.
+
+
+Prerequisite
+------------
+
+Please install these required packages before venv preparation.
+
+$ sudo apt-get update
+$ sudo apt-get upgrade
+$ sudo apt-get install python3-pip python3-venv
+
+
+How to run
+----------
+
+Just run 'one-prepare-venv' command
+
+$ one-prepare-venv
+
+There will be venv folder as of result.
+
+
+Trouble shooting
+----------------
+
+If you have any problems, please click 'New issue' at
+https://github.com/Samsung/ONE/issues page.
diff --git a/compiler/one-cmds/how-to-use-one-commands.txt b/compiler/one-cmds/how-to-use-one-commands.txt
new file mode 100644
index 000000000..6c2176afa
--- /dev/null
+++ b/compiler/one-cmds/how-to-use-one-commands.txt
@@ -0,0 +1,114 @@
+About
+-----
+
+Last update: 2020-07-14
+
+This document briefly explains how to use one-* commands.
+Detailed options are not explained here. Run the command to see options.
+
+Compilation flow for running with onert;
+1) one-import will import model files generated from famous frameworks
+2) one-optimize will optimize models. This step is optional.
+3) one-quantize will quantize models. This step is also optional.
+4) one-pack will pack to nnpkg so that we can run the model with our onert
+ runtime
+
+Compilation flow for NPU
+1) one-import will import model files generated from famous frameworks
+2) one-optimize will optimize models. This step is optional.
+3) one-quantize will quantize models. Depending on the NPUs.
+4) one-codegen will compile to binary codes.
+
+
+one-import
+-----------
+
+one-import will invokes one-import-* commands.
+
+Syntax: one-import [framework] [options]
+
+Currently supported frameworks are 'tf', 'tflite' for TensorFlow and TensorFlow
+lite.
+
+
+one-import-tf
+-------------
+
+This will convert TensorFlow model (.pb) file to our circle model. You can also
+directly call this command. one-import-tf invokes tf2tfliteV2.py script that
+will internally use TensorFlow lite converter and then invoke tflite2circle
+converter to convert tflite model to circle model.
+
+As tf2tfliteV2.py runs TensorFlow lite converter, you need to have TensorFlow
+installed in your system. We recommand to use 2.3.0rc0 for now.
+
+We provide python virtual environment and one-import-tf will enter and leave
+this environment so that you don't need to explictly 'activate' virtual
+environment.
+
+
+one-import-tflite
+-----------------
+
+You can use one-import-tflite to convert TensorFlow lite model (.tflite) file to
+our circle model. Internally this will invoke tflite2circle.
+
+
+one-optimize
+------------
+
+one-optimize provides network or operator transformation shown below.
+
+Current transformation options are
+- fuse_bcq: This enables Binary-Coded-bases Quantized DNNs
+ - read https://arxiv.org/abs/2005.09904 for detailed information
+- fuse_instnorm: This will convert instance normalization related operators to
+ one InstanceNormalization operator that our onert provides for faster
+ execution.
+- resolve_customop_add: This will convert Custom(Add) to normal Add operator
+- resolve_customop_batchmatmul: This will convert Custom(BatchMatMul) to
+ normal BatchMatMul operator
+- resolve_customop_matmul: This will convert Custom(MatMul) to normal MatMul
+ operator
+
+
+one-quantize
+------------
+
+one-quantize will quantize float32 model to uint8 so that the model can benefit
+for speed that our onert runtime and NPU provides. For convolution type
+operators we currently support layer-wise quantization. Later we will support
+int16 and channel-wise quantization.
+
+Internally this calls circle-quantizer and record-minmax tools.
+
+
+one-pack
+--------
+
+one-pack will generate a package from circle model to nnpackage for our onert
+runtime.
+
+Output is a folder with the model(s) and meta information.
+
+ex) if you have a model named '20200719.circle' and want to pack to 'testnnpack'
+
+$ one-pack -i 20200709.circle -o testnnpack
+
+$ tree testnnpack
+testnnpack
+└── 20200709
+ ├── 20200709.circle
+ └── metadata
+ └── MANIFEST
+
+
+one-codegen
+-----------
+
+one-codegen, like one-import will invoke backend code generation commands.
+As of now, our ONE repo does not provide any code generation commands yet.
+
+Syntax: one-codegen [target-backend] [options]
+
+This will invoke [target-backend]-compile command if available.
diff --git a/compiler/one-cmds/one-codegen b/compiler/one-cmds/one-codegen
new file mode 100644
index 000000000..2c80664e2
--- /dev/null
+++ b/compiler/one-cmds/one-codegen
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+function Usage()
+{
+ echo "Usage: $0 [BACKEND] ..."
+ echo "Available BACKEND drivers:"
+ backend_exist=0
+ for file in `find $DRIVER_PATH -name *-compile -type f`;
+ do
+ backend_driver=$(basename $file)
+ sub_length=8
+ driver_length=$(expr ${#backend_driver} - ${sub_length})
+ backend=${backend_driver:0:${driver_length}} # 8 is length of "-compile"
+ echo " $backend"
+ backend_exist=1
+ done
+ if [ $backend_exist == 0 ]; then
+ echo " (There is no available backend drivers)"
+ fi
+}
+
+# Get command from command-line
+BACKEND=$1; shift
+BACKEND_DRIVER="$BACKEND-compile"
+
+if [[ -z "${BACKEND_DRIVER}" ]]; then
+ Usage
+ exit 255
+fi
+
+BACKEND_DRIVER_CMD="${DRIVER_PATH}/${BACKEND_DRIVER}"
+
+if [[ ! -f "${BACKEND_DRIVER_CMD}" ]]; then
+ echo "ERROR: '${BACKEND_DRIVER}' is not supported"
+ Usage
+ exit 255
+fi
+
+"${BACKEND_DRIVER_CMD}" "$@"
diff --git a/compiler/one-cmds/one-import b/compiler/one-cmds/one-import
new file mode 100644
index 000000000..dbf4af534
--- /dev/null
+++ b/compiler/one-cmds/one-import
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+function Usage()
+{
+ echo "Usage: $0 [FRAMEWORK] ..."
+ echo "Available FRAMEWORK drivers:"
+ framework_exist=0
+ for file in "$DRIVER_PATH"/one-import-*;
+ do
+ framework_driver=$(basename $file)
+ framework=${framework_driver:11} # 11 is length of "one-import-"
+ echo " $framework"
+ framework_exist=1
+ done
+ if [ $framework_exist == 0 ]; then
+ echo " (There is no available import drivers)"
+ fi
+}
+
+# Get command from command-line
+FRAMEWORK=$1; shift
+FRAMEWORK_DRIVER="one-import-$FRAMEWORK"
+
+if [[ -z "${FRAMEWORK_DRIVER}" ]]; then
+ Usage
+ exit 255
+fi
+
+FRAMEWORK_DRIVER_CMD="${DRIVER_PATH}/${FRAMEWORK_DRIVER}"
+
+if [[ ! -f "${FRAMEWORK_DRIVER_CMD}" ]]; then
+ echo "ERROR: '${FRAMEWORK_DRIVER}' is not supported"
+ Usage
+ exit 255
+fi
+
+"${FRAMEWORK_DRIVER_CMD}" "$@"
diff --git a/compiler/one-cmds/one-import-tf b/compiler/one-cmds/one-import-tf
new file mode 100644
index 000000000..c048a4e0c
--- /dev/null
+++ b/compiler/one-cmds/one-import-tf
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+usage()
+{
+ echo "Convert TensorFlow model to circle."
+ echo "Usage: one-import-tf"
+ echo " --input_path <path/to/tfmodel>"
+ echo " --output_path <path/to/circle>"
+ echo " --input_arrays <names of the input arrays, comma-separated>"
+ echo " --input_shapes <input shapes, colon-separated>"
+ echo " --output_arrays <names of the output arrays, comma-separated>"
+ exit 0
+}
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--input_path')
+ export INPUT_PATH="$2"
+ shift 2
+ ;;
+ '--output_path')
+ export OUTPUT_PATH="$2"
+ shift 2
+ ;;
+ '--input_arrays')
+ export INPUT_ARRAYS="$2"
+ shift 2
+ ;;
+ '--input_shapes')
+ export INPUT_SHAPES="$2"
+ shift 2
+ ;;
+ '--output_arrays')
+ export OUTPUT_ARRAYS="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown parameter: ${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then
+ echo "Error: input model not found"
+ echo ""
+ usage
+ exit 2
+fi
+
+FILE_BASE=$(basename ${OUTPUT_PATH})
+MODEL_NAME="${FILE_BASE%.*}"
+
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${DRIVER_PATH}/venv/bin/activate"
+VIRTUALENV_WINDOWS="${DRIVER_PATH}/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# remove previous log
+rm -rf "${OUTPUT_PATH}.log"
+
+# generate temporary tflite file
+echo "python" "${DRIVER_PATH}/tf2tfliteV2.py" --v2 --input_path ${INPUT_PATH} \
+--input_arrays ${INPUT_ARRAYS} --input_shapes ${INPUT_SHAPES} \
+--output_path "${TMPDIR}/${MODEL_NAME}.tflite" \
+--output_arrays ${OUTPUT_ARRAYS} > "${OUTPUT_PATH}.log"
+echo " " >> "${OUTPUT_PATH}.log"
+
+python "${DRIVER_PATH}/tf2tfliteV2.py" --v2 --input_path ${INPUT_PATH} \
+--input_arrays ${INPUT_ARRAYS} --input_shapes ${INPUT_SHAPES} \
+--output_path "${TMPDIR}/${MODEL_NAME}.tflite" \
+--output_arrays ${OUTPUT_ARRAYS} >> "${OUTPUT_PATH}.log" 2>&1
+
+# convert .tflite to .circle
+echo " " >> "${OUTPUT_PATH}.log"
+echo "${DRIVER_PATH}/tflite2circle" "${TMPDIR}/${MODEL_NAME}.tflite" \
+"${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log"
+echo " " >> "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/tflite2circle" "${TMPDIR}/${MODEL_NAME}.tflite" \
+"${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
diff --git a/compiler/one-cmds/one-import-tflite b/compiler/one-cmds/one-import-tflite
new file mode 100644
index 000000000..31ed5af85
--- /dev/null
+++ b/compiler/one-cmds/one-import-tflite
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+usage()
+{
+ echo "Convert TensorFlow lite model to circle."
+ echo "Usage: one-import-tflite"
+ echo " --input_path <path/to/tflitemodel>"
+ echo " --output_path <path/to/circle>"
+ exit 0
+}
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--input_path')
+ export INPUT_PATH="$2"
+ shift 2
+ ;;
+ '--output_path')
+ export OUTPUT_PATH="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown parameter: ${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then
+ echo "Error: input model not found"
+ echo ""
+ usage
+ exit 2
+fi
+
+# remove previous log
+rm -rf "${OUTPUT_PATH}.log"
+
+# convert .tflite to .circle
+echo "${DRIVER_PATH}/tflite2circle" "${INPUT_PATH}" "${OUTPUT_PATH}" > "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/tflite2circle" "${INPUT_PATH}" "${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
diff --git a/compiler/one-cmds/one-optimize b/compiler/one-cmds/one-optimize
new file mode 100644
index 000000000..95384c10d
--- /dev/null
+++ b/compiler/one-cmds/one-optimize
@@ -0,0 +1,132 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+usage()
+{
+ echo "Optimize circle model."
+ echo "Usage: one-optimize"
+ echo " --all Enable all optimization algorithms"
+ echo " --fuse_bcq Enable FuseBCQ Pass"
+ echo " --fuse_instnorm Enable FuseInstanceNormalization Pass"
+ echo " --resolve_customop_add"
+ echo " Enable ResolveCustomOpAddPass Pass"
+ echo " --resolve_customop_batchmatmul"
+ echo " Enable ResolveCustomOpBatchMatMulPass Pass"
+ echo " --resolve_customop_matmul"
+ echo " Enable ResolveCustomOpMatMulPass Pass"
+ echo " --input_path <path/to/input/circle>"
+ echo " --output_path <path/to/output/circle>"
+ exit 0
+}
+
+OPTIMIZE_all=0
+OPTIMIZE_fuse_bcq=0
+OPTIMIZE_fuse_instnorm=0
+OPTIMIZE_resolve_customop_add=0
+OPTIMIZE_resolve_customop_batchmatmul=0
+OPTIMIZE_resolve_customop_matmul=0
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--all')
+ OPTIMIZE_all=1
+ shift
+ ;;
+ '--fuse_bcq')
+ OPTIMIZE_fuse_bcq=1
+ shift
+ ;;
+ '--fuse_instnorm')
+ OPTIMIZE_fuse_instnorm=1
+ shift
+ ;;
+ '--resolve_customop_add')
+ OPTIMIZE_resolve_customop_add=1
+ shift
+ ;;
+ '--resolve_customop_batchmatmul')
+ OPTIMIZE_resolve_customop_batchmatmul=1
+ shift
+ ;;
+ '--resolve_customop_matmul')
+ OPTIMIZE_resolve_customop_matmul=1
+ shift
+ ;;
+
+ '--input_path')
+ export INPUT_PATH="$2"
+ shift 2
+ ;;
+ '--output_path')
+ export OUTPUT_PATH="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown parameter: ${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then
+ echo "Error: input model not found"
+ echo ""
+ usage
+ exit 2
+fi
+
+OPTIMIZE_OPTIONS=""
+
+if [ $OPTIMIZE_all == 1 ]; then
+ OPTIMIZE_OPTIONS+="--all "
+fi
+if [ $OPTIMIZE_fuse_bcq == 1 ]; then
+ OPTIMIZE_OPTIONS+="--fuse_bcq "
+fi
+if [ $OPTIMIZE_fuse_instnorm == 1 ]; then
+ OPTIMIZE_OPTIONS+="--fuse_instnorm "
+fi
+if [ $OPTIMIZE_resolve_customop_add == 1 ]; then
+ OPTIMIZE_OPTIONS+="--resolve_customop_add "
+fi
+if [ $OPTIMIZE_resolve_customop_batchmatmul == 1 ]; then
+ OPTIMIZE_OPTIONS+="--resolve_customop_batchmatmul "
+fi
+if [ $OPTIMIZE_resolve_customop_matmul == 1 ]; then
+ OPTIMIZE_OPTIONS+="--resolve_customop_matmul "
+fi
+
+# remove previous log
+rm -rf "${OUTPUT_PATH}.log"
+
+# NOTE do not wrap ${OPTIMIZE_OPTIONS} with ""
+# optimize circle
+echo "${DRIVER_PATH}/circle2circle" ${OPTIMIZE_OPTIONS} \
+"${INPUT_PATH}" "${OUTPUT_PATH}" > "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/circle2circle" ${OPTIMIZE_OPTIONS} \
+"${INPUT_PATH}" "${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
diff --git a/compiler/one-cmds/one-pack b/compiler/one-cmds/one-pack
new file mode 100644
index 000000000..2bc4c601d
--- /dev/null
+++ b/compiler/one-cmds/one-pack
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+usage()
+{
+ echo "Package circle to nnpkg"
+ echo "Usage: one-pack"
+ echo " -i <path/to/circle>"
+ echo " -o <path/to/nnpackage/folder>"
+ exit 0
+}
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '-i')
+ export INPUT_PATH="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_PATH="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown parameter: ${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then
+ echo "Error: input model not found"
+ echo ""
+ usage
+ exit 2
+fi
+
+# remove previous log
+rm -rf "${OUTPUT_PATH}.log"
+
+# Package circle model file to nnpkg
+echo "${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" > "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
diff --git a/compiler/one-cmds/one-prepare-venv b/compiler/one-cmds/one-prepare-venv
new file mode 100644
index 000000000..fce838d81
--- /dev/null
+++ b/compiler/one-cmds/one-prepare-venv
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+VENV_ACTIVATE=${DRIVER_PATH}/venv/bin/activate
+
+if [ -f ${VENV_ACTIVATE} ]; then
+ echo "Virtual environment is already prepared."
+ exit 0
+fi
+
+# Install prerequisites
+python3 -m pip install -U virtualenv
+
+# Create python virtual enviornment
+python3 -m venv "${DRIVER_PATH}/venv"
+
+# Install tensorflow
+source "${VENV_ACTIVATE}"
+
+python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \
+ install -U pip setuptools
+python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \
+ install tensorflow-cpu==2.3.0rc0
diff --git a/compiler/one-cmds/one-quantize b/compiler/one-cmds/one-quantize
new file mode 100644
index 000000000..ff9e26672
--- /dev/null
+++ b/compiler/one-cmds/one-quantize
@@ -0,0 +1,155 @@
+#!/bin/bash
+
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+usage()
+{
+ echo "Quantize circle model."
+ echo "Usage: one-quantize"
+ echo " --input_dtype Input data type (supported: float32, default=float32)"
+ echo " --quantized_dtype Output quantized data type (supported: uint8, default=uint8)"
+ echo " --granularity Quantize granularity (supported: layer, default=layer)"
+ echo " --min_percentile Minimum percentile (0.0~100.0, default=1.0)"
+ echo " --max_percentile Maximum percentile (0.0~100.0, default=99.0)"
+ echo " --mode Record mode (supported: percentile/moving_average, default=percentile)"
+ echo " --input_path <path/to/input/circle>"
+ echo " --input_data <path/to/input/data>"
+ echo " --output_path <path/to/output/circle>"
+ exit 0
+}
+
+INPUT_DTYPE=float32
+QUANTIZED_DTYPE=uint8
+GRANULARITY=layer
+MIN_PERCENTILE=1
+MAX_PERCENTILE=99
+MODE=percentile
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+
+ '--input_dtype')
+ INPUT_DTYPE="$2"
+ shift 2
+ ;;
+ '--quantized_dtype')
+ QUANTIZED_DTYPE="$2"
+ shift 2
+ ;;
+ '--granularity')
+ GRANULARITY="$2"
+ shift 2
+ ;;
+ '--min_percentile')
+ MIN_PERCENTILE="$2"
+ shift 2
+ ;;
+ '--max_percentile')
+ MAX_PERCENTILE="$2"
+ shift 2
+ ;;
+ '--mode')
+ MODE="$2"
+ shift 2
+ ;;
+
+ '--input_path')
+ INPUT_PATH="$2"
+ shift 2
+ ;;
+ '--input_data')
+ INPUT_DATA="$2"
+ shift 2
+ ;;
+ '--output_path')
+ OUTPUT_PATH="$2"
+ shift 2
+ ;;
+
+ *)
+ echo "Unknown parameter: ${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then
+ echo "Error: input model not found"
+ echo ""
+ usage
+ exit 2
+fi
+if [ -z ${INPUT_DATA} ] || [ ! -e ${INPUT_DATA} ]; then
+ echo "Error: input data not found"
+ echo ""
+ usage
+ exit 2
+fi
+
+FILE_BASE=$(basename ${OUTPUT_PATH})
+MODEL_NAME="${FILE_BASE%.*}"
+
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# remove previous log
+rm -rf "${OUTPUT_PATH}.log"
+
+# quantize circle
+echo "${DRIVER_PATH}/circle-quantizer" \
+--quantize_dequantize_weights ${INPUT_DTYPE} ${QUANTIZED_DTYPE} ${GRANULARITY} \
+"${INPUT_PATH}" "${TMPDIR}/${MODEL_NAME}.1.circle" > "${OUTPUT_PATH}.log"
+echo " " >> "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/circle-quantizer" \
+--quantize_dequantize_weights ${INPUT_DTYPE} ${QUANTIZED_DTYPE} ${GRANULARITY} \
+"${INPUT_PATH}" "${TMPDIR}/${MODEL_NAME}.1.circle" >> "${OUTPUT_PATH}.log" 2>&1
+
+echo " " >> "${OUTPUT_PATH}.log"
+echo "${DRIVER_PATH}/record-minmax" \
+--input_model "${TMPDIR}/${MODEL_NAME}.1.circle" \
+--input_data "${INPUT_DATA}" \
+--min_percentile ${MIN_PERCENTILE} --max_percentile ${MAX_PERCENTILE} \
+--mode "${MODE}" \
+--output_model "${TMPDIR}/${MODEL_NAME}.2.circle" >> "${OUTPUT_PATH}.log" 2>&1
+echo " " >> "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/record-minmax" \
+--input_model "${TMPDIR}/${MODEL_NAME}.1.circle" \
+--input_data "${INPUT_DATA}" \
+--min_percentile ${MIN_PERCENTILE} --max_percentile ${MAX_PERCENTILE} \
+--mode "${MODE}" \
+--output_model "${TMPDIR}/${MODEL_NAME}.2.circle" >> "${OUTPUT_PATH}.log" 2>&1
+
+echo " " >> "${OUTPUT_PATH}.log"
+echo "${DRIVER_PATH}/circle-quantizer" \
+--quantize_with_minmax ${INPUT_DTYPE} ${QUANTIZED_DTYPE} ${GRANULARITY} \
+"${TMPDIR}/${MODEL_NAME}.2.circle" "${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
+echo " " >> "${OUTPUT_PATH}.log"
+
+"${DRIVER_PATH}/circle-quantizer" \
+--quantize_with_minmax ${INPUT_DTYPE} ${QUANTIZED_DTYPE} ${GRANULARITY} \
+"${TMPDIR}/${MODEL_NAME}.2.circle" "${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1
diff --git a/compiler/one-cmds/requires.cmake b/compiler/one-cmds/requires.cmake
new file mode 100644
index 000000000..9b858ad90
--- /dev/null
+++ b/compiler/one-cmds/requires.cmake
@@ -0,0 +1,5 @@
+require("tf2tfliteV2")
+require("tflite2circle")
+require("circle2circle")
+require("circle-quantizer")
+require("record-minmax")
diff --git a/compiler/oneco-value-pbtxt-test/README.md b/compiler/oneco-value-pbtxt-test/README.md
new file mode 100644
index 000000000..6ae4519c1
--- /dev/null
+++ b/compiler/oneco-value-pbtxt-test/README.md
@@ -0,0 +1 @@
+# oneco-value-pbtxt-test
diff --git a/compiler/oneco/README.md b/compiler/oneco/README.md
new file mode 100644
index 000000000..9687fafc4
--- /dev/null
+++ b/compiler/oneco/README.md
@@ -0,0 +1 @@
+# oneco
diff --git a/compiler/onnx2circle/requires.cmake b/compiler/onnx2circle/requires.cmake
index 418148e4f..f52e40416 100644
--- a/compiler/onnx2circle/requires.cmake
+++ b/compiler/onnx2circle/requires.cmake
@@ -1,7 +1,7 @@
require("stdex")
require("hermes-std")
require("mir2loco")
-require("mir-onnx-importer")
+require("mir")
require("exo")
require("locop")
require("loco")
diff --git a/compiler/onnx2tflite-integration-test/README.md b/compiler/onnx2tflite-integration-test/README.md
new file mode 100644
index 000000000..cf160fe67
--- /dev/null
+++ b/compiler/onnx2tflite-integration-test/README.md
@@ -0,0 +1 @@
+# onnx2tflite-integration-test
diff --git a/compiler/onnx2tflite/requires.cmake b/compiler/onnx2tflite/requires.cmake
index cc05edd84..b16a51141 100644
--- a/compiler/onnx2tflite/requires.cmake
+++ b/compiler/onnx2tflite/requires.cmake
@@ -1,3 +1,3 @@
-require("mir-onnx-importer")
+require("mir")
require("mir2loco")
require("exo")
diff --git a/compiler/oops/README.md b/compiler/oops/README.md
new file mode 100644
index 000000000..9794cf8aa
--- /dev/null
+++ b/compiler/oops/README.md
@@ -0,0 +1 @@
+# oops
diff --git a/compiler/pepper-assert/README.md b/compiler/pepper-assert/README.md
new file mode 100644
index 000000000..df41371cc
--- /dev/null
+++ b/compiler/pepper-assert/README.md
@@ -0,0 +1 @@
+# pepper-assert
diff --git a/compiler/pota-quantization-value-test/CMakeLists.txt b/compiler/pota-quantization-value-test/CMakeLists.txt
new file mode 100644
index 000000000..d97ffc123
--- /dev/null
+++ b/compiler/pota-quantization-value-test/CMakeLists.txt
@@ -0,0 +1,69 @@
+unset(QUANTIZATION_VALUE_TEST)
+unset(QUANTIZATION_VALUE_TEST_WITH_PARAM)
+
+macro(addTest NAME GRANULARITY DTYPE)
+ list(APPEND QUANTIZATION_VALUE_TEST ${NAME})
+ list(APPEND QUANTIZATION_VALUE_TEST_WITH_PARAM ${NAME} ${GRANULARITY} ${DTYPE})
+endmacro(addTest)
+
+# Read "test.lst"
+include("test.lst")
+# Read "test.local.lst" if exists
+include("test.local.lst" OPTIONAL)
+
+unset(TEST_DEPS)
+
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
+set(VIRTUALENV "${NNCC_OVERLAY_DIR}/venv_1_13_2")
+
+###
+### Generate test.config
+###
+set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
+
+add_custom_command(
+ OUTPUT ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'RECORD_MINMAX_PATH=\"$<TARGET_FILE:record-minmax>\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'CIRCLE_QUANTIZER_PATH=\"$<TARGET_FILE:circle-quantizer>\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'CIRCLE_TENSORDUMP_PATH=\"$<TARGET_FILE:circle-tensordump>\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'VIRTUALENV=\"${VIRTUALENV}\"' >> ${TEST_CONFIG}
+ DEPENDS record-minmax
+ DEPENDS circle-quantizer
+ DEPENDS circle-tensordump
+ COMMENT "Generate test configuration"
+)
+
+list(APPEND TEST_DEPS "${TEST_CONFIG}")
+
+# This enforces CMake to generate all the dependencies during "build" phase
+add_custom_target(pota_quantization_value_test_deps ALL DEPENDS ${TEST_DEPS})
+
+# Run tests
+add_test(
+ NAME pota_fake_wquant_test
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_fake_wquant.sh"
+ "${TEST_CONFIG}"
+ "${ARTIFACTS_BIN_PATH}"
+ ${QUANTIZATION_VALUE_TEST_WITH_PARAM}
+)
+
+#add_test(
+# NAME pota_record_minmax_test
+# COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_record_minmax.sh"
+# "${TEST_CONFIG}"
+# "${ARTIFACTS_BIN_PATH}"
+# ${QUANTIZATION_VALUE_TEST_WITH_PARAM}
+#)
+
+#add_test(
+# NAME pota_quantization_test
+# COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_quantization.sh"
+# "${TEST_CONFIG}"
+# "${ARTIFACTS_BIN_PATH}"
+# ${QUANTIZATION_VALUE_TEST_WITH_PARAM}
+#)
+
+#set_tests_properties(pota_record_minmax_test PROPERTIES DEPENDS pota_fake_wquant_test)
+#set_tests_properties(pota_quantization_test PROPERTIES DEPENDS pota_record_minmax_test)
diff --git a/compiler/pota-quantization-value-test/README.md b/compiler/pota-quantization-value-test/README.md
new file mode 100644
index 000000000..e3359ae4f
--- /dev/null
+++ b/compiler/pota-quantization-value-test/README.md
@@ -0,0 +1,41 @@
+# pota-quantization-value-test
+
+`pota-quantization-value-test` checks whether a Circle model listed in `test.lst` is correctly quantized (`pota` denotes post-training affine). The circle models are generated from the recipes saved in `res/TensorFlowLiteRecipes`.
+
+Write `test.local.lst` for local test.
+
+### Test process
+
+#### Step 1. Fake quantization
+
+Run `circle-quantizer` with `--quantize_dequantize_weights` option.
+
+Dump the fake-quantized model with `circle-tensordump`.
+
+Compare the dumped model with the expected output in "expected_outputs/<model_name>/\<granularity\>/<quantized_type>/fake_quantization/<tensor_name>.json"
+
+The expected output should include
+ (1) values of weights (only for conv, transposed_conv, depthwise_conv, and fc layers)
+
+#### Step 2. Record moving avg of min and moving avg of max for activations
+
+Run `record-minmax` with the fake-quantized model (input data is saved in "test_inputs/<model_name>/\<granularity\>/<quantized_type>/<record_number>.txt")
+
+Dump the minmax-recorded model with `circle-tensordump`.
+Compare the dumped model with the expected output in "expected_outputs/<model_name>/\<granularity\>/<quantized_type>/record_minmax/<tensor_name>.json"
+
+The expected output should include
+ (1) min/max of activations
+
+#### Step 3. Quantization
+
+Run `circle-quantizer` with `--quantize_with_minmax` option.
+
+Dump the quantized model with `circle-tensordump`.
+
+Compare the dumped model with the expected output in "expected_outputs/<model_name>/\<granularity\>/<quantized_type>/quantization/<tensor_name>.json"
+
+The expected output should include
+ (1) scale, zero point of activations
+ (2) scale, zero point, values of weights
+ (3) scale, values (weights) of bias
diff --git a/compiler/pota-quantization-value-test/compare_tensors.py b/compiler/pota-quantization-value-test/compare_tensors.py
new file mode 100755
index 000000000..258d46dc9
--- /dev/null
+++ b/compiler/pota-quantization-value-test/compare_tensors.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+import h5py as h5
+import numpy as np
+import argparse
+import os.path
+import json
+import sys
+
+#
+# This script checks if the min/max values recorded in the circle model are the same with the expected values
+#
+# Basic usage:
+# compare_tensors.py --input_h5 <path/to/iput/h5> --expect_dir <path/to/expect/dir> --mode <compare_mode>
+# ex: compare_minmax.py --input_h5 Add_000.h5 --expect_dir expected_outputs/Add_000 --mode fake_quantization
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--input_h5', type=str, required=True)
+parser.add_argument('--expect_dir', type=str, required=True)
+parser.add_argument('--mode', type=str, required=True)
+args = parser.parse_args()
+
+supported_modes = ["fake_quantization", "record_minmax", "quantization"]
+
+model = args.input_h5
+expect_dir = args.expect_dir
+mode = args.mode
+
+failed_cases = 0
+
+if mode not in supported_modes:
+ raise SystemExit("Unsupported mode. --mode should be one of " + str(supported_modes))
+
+
+def compare_fake_quantization(tensor, tensor_name, expect_dir):
+ global failed_cases
+ with open(expect_dir + "/" + tensor_name + ".json", "r") as expect_file:
+ json_load = json.load(expect_file)
+ expected_weights = np.array(json_load["weights"])
+ input_weights = tensor["weights"][:]
+ if np.allclose(input_weights, expected_weights, rtol=1.e-5, atol=1.e-5) == False:
+ print("Fake-quantized weights of " + tensor_name + " (" + str(input_weights) +
+ ") do not match with expected value (" + str(expected_weights) + ").")
+ failed_cases += 1
+
+
+def compare_record_minmax(tensor, tensor_name, expect_dir):
+ global failed_cases
+ with open(expect_dir + "/" + tensor_name + ".json", "r") as expect_file:
+ json_load = json.load(expect_file)
+ expected_min = np.array(json_load["min"])
+ expected_max = np.array(json_load["max"])
+ input_min = tensor["min"][:]
+ input_max = tensor["max"][:]
+ if np.allclose(input_min, expected_min, rtol=1.e-5, atol=1.e-5) == False:
+ print("Recorded min of " + tensor_name + " (" + str(input_min) +
+ ") does not match with expected value (" + str(expected_min) + ").")
+ failed_cases += 1
+ if np.allclose(input_max, expected_max, rtol=1.e-5, atol=1.e-5) == False:
+ print("Recorded max of " + tensor_name + " (" + str(input_max) +
+ ") does not match with expected value (" + str(expected_max) + ").")
+ failed_cases += 1
+
+
+def compare_quantization(tensor, tensor_name, expect_dir):
+ global failed_cases
+ with open(expect_dir + "/" + tensor_name + ".json", "r") as expect_file:
+ json_load = json.load(expect_file)
+ for key in json_load:
+ if key == "weights":
+ expected_weights = np.array(json_load["weights"])
+ input_weights = tensor["weights"][:]
+ if np.allclose(input_weights, expected_weights, rtol=0, atol=0) == False:
+ print("Quantized weights of " + tensor_name + " (" + str(input_weights) +
+ ") do not match with expected value (" + str(expected_weights) +
+ ").")
+ failed_cases += 1
+
+ if key == "scale":
+ expected_scale = np.array(json_load["scale"])
+ input_scale = tensor["scale"][:]
+ if np.allclose(input_scale, expected_scale, rtol=1.e-5, atol=1.e-5) == False:
+ print("Quantized scale of " + tensor_name + " (" + str(input_scale) +
+ ") do not match with expected value (" + str(expected_scale) + ").")
+ failed_cases += 1
+
+ if key == "zero_point":
+ expected_zero_point = np.array(json_load["zero_point"])
+ input_zero_point = tensor["zero_point"][:]
+ if np.allclose(
+ input_zero_point, expected_zero_point, rtol=0, atol=0) == False:
+ print("Quantized zero_point of " + tensor_name + " (" +
+ str(input_zero_point) + ") do not match with expected value (" +
+ str(expected_zero_point) + ").")
+ failed_cases += 1
+
+
+with h5.File(model, "r") as input:
+ for tensor_name in input.keys():
+ # We only check the given golden data
+ if os.path.isfile(expect_dir + "/" + tensor_name + ".json"):
+ print("Compare " + tensor_name)
+ if mode == "fake_quantization":
+ compare_fake_quantization(input[tensor_name], tensor_name, expect_dir)
+ elif mode == "record_minmax":
+ compare_record_minmax(input[tensor_name], tensor_name, expect_dir)
+ elif mode == "quantization":
+ compare_quantization(input[tensor_name], tensor_name, expect_dir)
+ else:
+ raise SystemExit("Unsupproted mode.")
+
+sys.exit(failed_cases)
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json
new file mode 100644
index 000000000..21b8ecad7
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json
@@ -0,0 +1,48 @@
+{
+ "weights": [
+ [
+ [
+ [
+ 1.003921627998352,
+ 2.007843255996704
+ ],
+ [
+ -3.0117647647857666,
+ -4.015686511993408
+ ]
+ ],
+ [
+ [
+ -5.019608020782471,
+ 6.023529529571533
+ ],
+ [
+ -7.027451038360596,
+ 7.968627452850342
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ 4.015686511993408,
+ -2.007843255996704
+ ],
+ [
+ 3.0117647647857666,
+ -1.003921627998352
+ ]
+ ],
+ [
+ [
+ -7.968627452850342,
+ -6.023529529571533
+ ],
+ [
+ 7.027451038360596,
+ 5.019608020782471
+ ]
+ ]
+ ]
+ ]
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json
new file mode 100644
index 000000000..462d0d3e3
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json
@@ -0,0 +1,7 @@
+ {
+ "scale": 0.0059054209919261825,
+ "weights": [
+ 169.0,
+ 339.0
+ ]
+ }
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json
new file mode 100644
index 000000000..107117b80
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json
@@ -0,0 +1,4 @@
+{
+ "scale": 0.09411764705882353,
+ "zero_point": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json
new file mode 100644
index 000000000..3a6e171a1
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json
@@ -0,0 +1,52 @@
+{
+ "max": 7.968627450980392,
+ "scale": 0.06274509803921569,
+ "weights": [
+ [
+ [
+ [
+ 144,
+ 160
+ ],
+ [
+ 80,
+ 64
+ ]
+ ],
+ [
+ [
+ 48,
+ 224
+ ],
+ [
+ 16,
+ 255
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ 192,
+ 96
+ ],
+ [
+ 176,
+ 112
+ ]
+ ],
+ [
+ [
+ 1,
+ 32
+ ],
+ [
+ 240,
+ 208
+ ]
+ ]
+ ]
+ ],
+ "min": -8.031372549019608,
+ "zero_point": 128.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json
new file mode 100644
index 000000000..2374639b1
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json
@@ -0,0 +1,4 @@
+{
+ "scale": 0.17836222929113052,
+ "zero_point": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json
new file mode 100644
index 000000000..563c0424f
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json
@@ -0,0 +1,4 @@
+{
+ "max": 24.0,
+ "min": 1.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json
new file mode 100644
index 000000000..fd0c6dc86
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json
@@ -0,0 +1,4 @@
+{
+ "max": 45.48236846923828,
+ "min": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json
new file mode 100644
index 000000000..11e91ca42
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json
@@ -0,0 +1,34 @@
+{
+ "weights": [
+ [
+ [
+ [
+ 0.9725490212440491,
+ 1.9450980424880981,
+ 3.0392158031463623,
+ 4.0117645263671875
+ ],
+ [
+ -8.996078491210938,
+ 9.968626976013184,
+ -10.941176414489746,
+ 12.035294532775879
+ ]
+ ],
+ [
+ [
+ 4.984313488006592,
+ 5.956862926483154,
+ 7.050980567932129,
+ 8.023529052734375
+ ],
+ [
+ 13.007843017578125,
+ -13.980392456054688,
+ 14.952940940856934,
+ -16.04705810546875
+ ]
+ ]
+ ]
+ ]
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json
new file mode 100644
index 000000000..df7cb14c4
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json
@@ -0,0 +1,9 @@
+{
+ "scale": 0.007627835447904652,
+ "weights": [
+ 131.0,
+ 262.0,
+ 393.0,
+ 524.0
+ ]
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json
new file mode 100644
index 000000000..254ce899a
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json
@@ -0,0 +1,4 @@
+{
+ "scale": 0.06274509803921569,
+ "zero_point": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json
new file mode 100644
index 000000000..3d14da173
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json
@@ -0,0 +1,38 @@
+{
+ "max": 14.952941176470588,
+ "scale": 0.12156862745098039,
+ "weights": [
+ [
+ [
+ [
+ 140,
+ 148,
+ 157,
+ 165
+ ],
+ [
+ 58,
+ 214,
+ 42,
+ 231
+ ]
+ ],
+ [
+ [
+ 173,
+ 181,
+ 190,
+ 198
+ ],
+ [
+ 239,
+ 17,
+ 255,
+ 0
+ ]
+ ]
+ ]
+ ],
+ "min": -16.04705882352941,
+ "zero_point": 132.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json
new file mode 100644
index 000000000..85dd4d9ae
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json
@@ -0,0 +1,4 @@
+{
+ "scale": 0.893733185412837,
+ "zero_point": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json
new file mode 100644
index 000000000..9aee7bcb0
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json
@@ -0,0 +1,4 @@
+{
+ "max": 16.0,
+ "min": 1.0
+}
diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json
new file mode 100644
index 000000000..aa42a6614
--- /dev/null
+++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json
@@ -0,0 +1,4 @@
+{
+ "max": 227.90196228027344,
+ "min": 0.0
+}
diff --git a/compiler/pota-quantization-value-test/gen_h5_explicit_inputs.py b/compiler/pota-quantization-value-test/gen_h5_explicit_inputs.py
new file mode 100755
index 000000000..9863c807a
--- /dev/null
+++ b/compiler/pota-quantization-value-test/gen_h5_explicit_inputs.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+import h5py as h5
+import numpy as np
+import tensorflow as tf
+import argparse
+import glob
+
+#
+# This script generates a pack of random input data (.h5) expected by the input tflite model
+#
+# Basic usage:
+# gen_h5_explicit_inputs.py --model <path/to/model/file> --input <path/to/input/directory> --output <path/to/output/file>
+# ex: gen_h5_explicit_inputs.py --model Add_000.tflite --input Add_000 --output Add_000.input.h5
+# (This will create Add_000.input.h5)
+#
+# The input directory should be organized as follows
+# <input_directory>/
+# -> <record_index>.txt
+# ...
+# Each txt file has the explicit values of inputs
+# Example. if the model has two inputs whose shapes are both (1, 3),
+# the first record file name is 0.txt, and its contents is something like below
+# 1, 2, 3
+# 4, 5, 6
+#
+parser = argparse.ArgumentParser()
+parser.add_argument('--model', type=str, required=True)
+parser.add_argument('--input', type=str, required=True)
+parser.add_argument('--output', type=str, required=True)
+args = parser.parse_args()
+
+model = args.model
+input = args.input
+output = args.output
+
+# Build TFLite interpreter. (to get the information of model input)
+interpreter = tf.lite.Interpreter(model)
+input_details = interpreter.get_input_details()
+
+# Create h5 file
+h5_file = h5.File(output, 'w')
+group = h5_file.create_group("value")
+group.attrs['desc'] = "Input data for " + model
+
+# Input files
+records = sorted(glob.glob(input + "/*.txt"))
+for i, record in enumerate(records):
+ sample = group.create_group(str(i))
+ sample.attrs['desc'] = "Input data " + str(i)
+ with open(record, 'r') as f:
+ lines = f.readlines()
+ for j, line in enumerate(lines):
+ data = np.array(line.split(','))
+ input_detail = input_details[j]
+ input_data = np.array(
+ data.reshape(input_detail["shape"]), input_detail["dtype"])
+ sample.create_dataset(str(j), data=input_data)
+
+h5_file.close()
diff --git a/compiler/pota-quantization-value-test/requires.cmake b/compiler/pota-quantization-value-test/requires.cmake
new file mode 100644
index 000000000..883a925df
--- /dev/null
+++ b/compiler/pota-quantization-value-test/requires.cmake
@@ -0,0 +1,4 @@
+require("record-minmax")
+require("circle-quantizer")
+require("circle-tensordump")
+require("common-artifacts")
diff --git a/compiler/pota-quantization-value-test/test.lst b/compiler/pota-quantization-value-test/test.lst
new file mode 100644
index 000000000..65613ff8f
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test.lst
@@ -0,0 +1,2 @@
+addTest(Conv2D_004 layer uint8)
+addTest(DepthwiseConv2D_002 layer uint8)
diff --git a/compiler/pota-quantization-value-test/test_fake_wquant.sh b/compiler/pota-quantization-value-test/test_fake_wquant.sh
new file mode 100755
index 000000000..1331703ee
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test_fake_wquant.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+# This script tests the basic behavior of record-minmax
+#
+# HOW TO USE
+#
+# ./test_fake_quantization.sh <path/to/test.config> <path/to/work_dir> <TEST 1> <TEST 2> ...
+# test.config : set ${RECORD_MINMAX_PATH} and ${CIRCLE_QUANTIZER_PATH}
+# work_dir : build directory of quantization-value-test (ex: build/compiler/quantization-value-test)
+
+SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+COMPARE_SCRIPT_PATH="${SOURCE_PATH}/compare_tensors.py"
+CONFIG_PATH="$1"; shift
+BIN_PATH=$(dirname "${CONFIG_PATH}")
+TEST_INPUT_PATH="${SOURCE_PATH}/test_inputs"
+WORKDIR="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found CIRCLE_QUANTIZER: ${CIRCLE_QUANTIZER_PATH}"
+echo "-- Found CIRCLE_TENSORDUMP: ${CIRCLE_TENSORDUMP_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [ "$1" != "" ]; do
+ MODELNAME=$1; shift
+ GRANULARITY=$1; shift
+ DTYPE=$1; shift
+ TESTCASE="${MODELNAME}.${GRANULARITY}.${DTYPE}"
+
+ TESTED+=("${TESTCASE}")
+
+ TESTCASE_FILE="${WORKDIR}/${TESTCASE}"
+ TEST_RESULT_FILE="${BIN_PATH}/${TESTCASE}"
+
+ PASSED_TAG="${TEST_RESULT_FILE}.fake_quantized.passed"
+ rm -f "${PASSED_TAG}"
+
+ cat > "${TEST_RESULT_FILE}_fake_quantization.log" <(
+ exec 2>&1
+ set -ex
+
+ # Run circle-quantizer with --quantize_dequantize_weights
+ "${CIRCLE_QUANTIZER_PATH}" \
+ --quantize_dequantize_weights float32 "${DTYPE}" "${GRANULARITY}" \
+ "${WORKDIR}/${MODELNAME}.circle" \
+ "${TEST_RESULT_FILE}.fake_quantized.circle"
+
+ # Dump weights values (circle-tensordump)
+ "${CIRCLE_TENSORDUMP_PATH}" \
+ "${TEST_RESULT_FILE}.fake_quantized.circle" \
+ --tensors_to_hdf5 "${TEST_RESULT_FILE}.fake_quantized.circle.h5"
+
+ # Compare result
+ "${VIRTUALENV}/bin/python" "${COMPARE_SCRIPT_PATH}" \
+ --input_h5 "${TEST_RESULT_FILE}.fake_quantized.circle.h5" \
+ --expect_dir "${SOURCE_PATH}/expected_outputs/${MODELNAME}/${GRANULARITY}/${DTYPE}/fake_quantization" \
+ --mode fake_quantization
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$TESTCASE")
+ else
+ FAILED+=("$TESTCASE")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt
new file mode 100644
index 000000000..8803cb178
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt
@@ -0,0 +1 @@
+1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt
new file mode 100644
index 000000000..c210774d2
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt
@@ -0,0 +1 @@
+1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12, 13, 14, 15, 16
diff --git a/compiler/pota-quantization-value-test/test_quantization.sh b/compiler/pota-quantization-value-test/test_quantization.sh
new file mode 100755
index 000000000..5ebd72601
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test_quantization.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+# This script tests the basic behavior of record-minmax
+#
+# HOW TO USE
+#
+# ./test_quantization.sh <path/to/test.config> <path/to/work_dir> <TEST 1> <TEST 2> ...
+# test.config : set ${RECORD_MINMAX_PATH} and ${CIRCLE_QUANTIZER_PATH}
+# work_dir : build directory of quantization-value-test (ex: build/compiler/quantization-value-test)
+
+SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+COMPARE_SCRIPT_PATH="${SOURCE_PATH}/compare_tensors.py"
+CONFIG_PATH="$1"; shift
+BIN_PATH=$(dirname "${CONFIG_PATH}")
+TEST_INPUT_PATH="${SOURCE_PATH}/test_inputs"
+WORKDIR="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found CIRCLE_QUANTIZER: ${CIRCLE_QUANTIZER_PATH}"
+echo "-- Found CIRCLE_TENSORDUMP: ${CIRCLE_TENSORDUMP_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [ "$1" != "" ]; do
+ MODELNAME=$1; shift
+ GRANULARITY=$1; shift
+ DTYPE=$1; shift
+ TESTCASE="${MODELNAME}.${GRANULARITY}.${DTYPE}"
+
+ TESTED+=("${TESTCASE}")
+
+ TESTCASE_FILE="${WORKDIR}/${TESTCASE}"
+ TEST_RESULT_FILE="${BIN_PATH}/${TESTCASE}"
+
+ PASSED_TAG="${TEST_RESULT_FILE}.quantization.passed"
+ rm -f "${PASSED_TAG}"
+
+ cat > "${TEST_RESULT_FILE}_quantization.log" <(
+ exec 2>&1
+ set -ex
+
+ # Run circle-quantizer with --quantize_with_minmax
+ "${CIRCLE_QUANTIZER_PATH}" \
+ --quantize_with_minmax float32 "${DTYPE}" "${GRANULARITY}" \
+ "${TEST_RESULT_FILE}.minmax_recorded.circle" \
+ "${TEST_RESULT_FILE}.quantized.circle"
+
+ # Dump scale, zp, weights values (circle-tensordump)
+ "${CIRCLE_TENSORDUMP_PATH}" \
+ "${TEST_RESULT_FILE}.quantized.circle" \
+ --tensors_to_hdf5 "${TEST_RESULT_FILE}.quantized.circle.h5"
+
+ # Compare result
+ "${VIRTUALENV}/bin/python" "${COMPARE_SCRIPT_PATH}" \
+ --input_h5 "${TEST_RESULT_FILE}.quantized.circle.h5" \
+ --expect_dir "${SOURCE_PATH}/expected_outputs/${MODELNAME}/${GRANULARITY}/${DTYPE}/quantization" \
+ --mode quantization
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$TESTCASE")
+ else
+ FAILED+=("$TESTCASE")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/pota-quantization-value-test/test_record_minmax.sh b/compiler/pota-quantization-value-test/test_record_minmax.sh
new file mode 100755
index 000000000..eaa462d0c
--- /dev/null
+++ b/compiler/pota-quantization-value-test/test_record_minmax.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+# This script tests the basic behavior of record-minmax
+#
+# HOW TO USE
+#
+# ./test_record_minmax.sh <path/to/test.config> <path/to/work_dir> <TEST 1> <TEST 2> ...
+# test.config : set ${RECORD_MINMAX_PATH} and ${CIRCLE2CIRCLE_PATH}
+# work_dir : build directory of quantization-value-test (ex: build/compiler/quantization-value-test)
+
+SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+GEN_SCRIPT_PATH="${SOURCE_PATH}/gen_h5_explicit_inputs.py"
+COMPARE_SCRIPT_PATH="${SOURCE_PATH}/compare_tensors.py"
+CONFIG_PATH="$1"; shift
+BIN_PATH=$(dirname "${CONFIG_PATH}")
+TEST_INPUT_PATH="${SOURCE_PATH}/test_inputs"
+WORKDIR="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found RECORD-MINMAX: ${RECORD_MINMAX_PATH}"
+echo "-- Found CIRCLE_TENSORDUMP: ${CIRCLE_TENSORDUMP_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [ "$1" != "" ]; do
+ MODELNAME=$1; shift
+ GRANULARITY=$1; shift
+ DTYPE=$1; shift
+ TESTCASE="${MODELNAME}.${GRANULARITY}.${DTYPE}"
+
+ TESTED+=("${TESTCASE}")
+
+ TESTCASE_FILE="${WORKDIR}/${TESTCASE}"
+ TEST_RESULT_FILE="${BIN_PATH}/${TESTCASE}"
+
+ PASSED_TAG="${TEST_RESULT_FILE}.record_minmax.passed"
+ rm -f "${PASSED_TAG}"
+
+ cat > "${TEST_RESULT_FILE}_record_minmax.log" <(
+ exec 2>&1
+ set -ex
+
+ # Generate h5 input data
+ source "${VIRTUALENV}/bin/activate"
+ "${VIRTUALENV}/bin/python" "${GEN_SCRIPT_PATH}" \
+ --model "${WORKDIR}/${MODELNAME}.tflite" \
+ --input "${TEST_INPUT_PATH}/${MODELNAME}/${GRANULARITY}/${DTYPE}" \
+ --output "${TESTCASE_FILE}.input.h5"
+
+ if [[ $? -ne 0 ]]; then
+ echo "FAILED TO GENERATE INPUT"
+ continue
+ fi
+
+ # Run record-minmax
+ "${RECORD_MINMAX_PATH}" \
+ "${TEST_RESULT_FILE}.fake_quantized.circle" \
+ "${TEST_RESULT_FILE}.input.h5" \
+ "${TEST_RESULT_FILE}.minmax_recorded.circle"
+
+ # Dump min/max values (circle-tensordump)
+ "${CIRCLE_TENSORDUMP_PATH}" \
+ "${TEST_RESULT_FILE}.minmax_recorded.circle" \
+ --tensors_to_hdf5 "${TEST_RESULT_FILE}.minmax_recorded.circle.h5"
+
+ # Compare result
+ "${VIRTUALENV}/bin/python" "${COMPARE_SCRIPT_PATH}" \
+ --input_h5 "${TEST_RESULT_FILE}.minmax_recorded.circle.h5" \
+ --expect_dir "${SOURCE_PATH}/expected_outputs/${MODELNAME}/${GRANULARITY}/${DTYPE}/record_minmax" \
+ --mode record_minmax
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$TESTCASE")
+ else
+ FAILED+=("$TESTCASE")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/pp/src/LinearDocument.cpp b/compiler/pp/src/LinearDocument.cpp
index 2bc5f260c..2cd35ef98 100644
--- a/compiler/pp/src/LinearDocument.cpp
+++ b/compiler/pp/src/LinearDocument.cpp
@@ -63,9 +63,9 @@ const std::string &LinearDocument::line(uint32_t n) const
{
return _lines.at(lines() - n - 1);
}
+ default:
+ throw std::runtime_error{"Not supported Direction"};
}
-
- throw std::runtime_error{"unreachable"};
}
} // namespace pp
diff --git a/compiler/record-minmax-conversion-test/CMakeLists.txt b/compiler/record-minmax-conversion-test/CMakeLists.txt
new file mode 100644
index 000000000..2221e1702
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/CMakeLists.txt
@@ -0,0 +1,42 @@
+unset(RECORD_MINMAX_CONVERSION_TEST)
+
+macro(addTest NAME)
+ list(APPEND RECORD_MINMAX_CONVERSION_TEST ${NAME})
+endmacro(addTest)
+
+# Read "test.lst"
+include("test.lst")
+# Read "test.local.lst" if exists
+include("test.local.lst" OPTIONAL)
+
+unset(TEST_DEPS)
+
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
+###
+### Generate test.config
+###
+set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
+
+add_custom_command(
+ OUTPUT ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'RECORD_MINMAX_PATH=\"$<TARGET_FILE:record-minmax>\"' >> ${TEST_CONFIG}
+ DEPENDS record-minmax
+ COMMENT "Generate test configuration"
+)
+
+list(APPEND TEST_DEPS "${TEST_CONFIG}")
+
+# This enforces CMake to generate all the dependencies during "build" phase
+add_custom_target(record_minmax_conversion_test_deps ALL DEPENDS ${TEST_DEPS})
+
+# Run tests
+add_test(
+ NAME record_minmax_conversion_test
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/testall.sh"
+ "${TEST_CONFIG}"
+ "${ARTIFACTS_BIN_PATH}"
+ "${NNCC_OVERLAY_DIR}/venv_1_13_2"
+ ${RECORD_MINMAX_CONVERSION_TEST}
+)
diff --git a/compiler/record-minmax-conversion-test/README.md b/compiler/record-minmax-conversion-test/README.md
new file mode 100644
index 000000000..cc06c1542
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/README.md
@@ -0,0 +1,5 @@
+# record-minmax-conversion-test
+
+Run `record-minmax` with random input data and Circle models listed in `test.lst`. This test checks whether a given Circle model can be converted to a Circle model embedded with min/max values without failure.
+
+Write `test.local.lst` for local test.
diff --git a/compiler/record-minmax-conversion-test/gen_h5_random_inputs.py b/compiler/record-minmax-conversion-test/gen_h5_random_inputs.py
new file mode 100755
index 000000000..b7709812c
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/gen_h5_random_inputs.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+import h5py as h5
+import numpy as np
+import tensorflow as tf
+import argparse
+
+#
+# This script generates a pack of random input data (.h5) expected by the input tflite model
+#
+# Basic usage:
+# gen_h5_inputs.py --model <path/to/tflite/model> --num_data <number/of/data> --output <path/to/output/data>
+# ex: gen_h5_inputs.py --model add.tflite --num_data 3 --output add.tflite.input.h5
+# (This will create add.tflite.input.h5 composed of three random inputs in the same directory as the model)
+parser = argparse.ArgumentParser()
+parser.add_argument('--model', type=str, required=True)
+parser.add_argument('--num_data', type=int, required=True)
+parser.add_argument('--output', type=str, required=True)
+args = parser.parse_args()
+
+model = args.model
+
+num_data = args.num_data
+
+output_path = args.output
+
+# Build TFLite interpreter. (to get the information of model input)
+interpreter = tf.lite.Interpreter(model)
+input_details = interpreter.get_input_details()
+
+# Create h5 file
+h5_file = h5.File(output_path, 'w')
+group = h5_file.create_group("value")
+group.attrs['desc'] = "Input data for " + model
+
+# Generate random data
+for i in range(num_data):
+ sample = group.create_group(str(i))
+ sample.attrs['desc'] = "Input data " + str(i)
+
+ for j in range(len(input_details)):
+ input_detail = input_details[j]
+ input_data = np.array(
+ np.random.random_sample(input_detail["shape"]), input_detail["dtype"])
+ sample.create_dataset(str(j), data=input_data)
+
+h5_file.close()
diff --git a/compiler/record-minmax-conversion-test/requires.cmake b/compiler/record-minmax-conversion-test/requires.cmake
new file mode 100644
index 000000000..9105c3e2e
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/requires.cmake
@@ -0,0 +1,2 @@
+require("common-artifacts")
+require("record-minmax")
diff --git a/compiler/record-minmax-conversion-test/test.lst b/compiler/record-minmax-conversion-test/test.lst
new file mode 100644
index 000000000..771c3bd66
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/test.lst
@@ -0,0 +1,16 @@
+addTest(Add_000)
+addTest(AveragePool2D_000)
+addTest(Concatenation_000)
+addTest(Conv2D_000)
+addTest(Conv2D_001)
+addTest(Conv2D_002)
+addTest(DepthwiseConv2D_000)
+addTest(FullyConnected_000)
+addTest(FullyConnected_001)
+addTest(MaxPool2D_000)
+addTest(Mul_000)
+addTest(Pad_000)
+addTest(Reshape_000)
+addTest(Reshape_001)
+addTest(Reshape_002)
+addTest(Softmax_000)
diff --git a/compiler/record-minmax-conversion-test/testall.sh b/compiler/record-minmax-conversion-test/testall.sh
new file mode 100755
index 000000000..29c9ed3d1
--- /dev/null
+++ b/compiler/record-minmax-conversion-test/testall.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+# This script tests the basic behavior of record-minmax
+#
+# HOW TO USE
+#
+# ./testall.sh <path/to/test.config> <path/to/work_dir> <TEST 1> <TEST 2> ...
+# test.config : set ${RECORD_MINMAX_PATH}
+# work_dir : build directory of record-minmax-conversion-test (ex: build/compiler/record-minmax-conversion-test)
+
+GEN_SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+GEN_SCRIPT_PATH="${GEN_SOURCE_PATH}/gen_h5_random_inputs.py"
+CONFIG_PATH="$1"; shift
+BIN_PATH=$(dirname "$CONFIG_PATH")
+WORKDIR="$1"; shift
+VIRTUALENV="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found RECORD-MINMAX: ${RECORD_MINMAX_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+for TESTCASE in "$@"; do
+ TESTED+=("${TESTCASE}")
+
+ TESTCASE_FILE="${WORKDIR}/${TESTCASE}"
+
+ PASSED_TAG="${BIN_PATH}/${TESTCASE}.passed"
+ rm -f "${PASSED_TAG}"
+
+ cat > "${BIN_PATH}/${TESTCASE}.log" <(
+ exec 2>&1
+ set -ex
+
+ # Generate h5 input data
+ source "${VIRTUALENV}/bin/activate"
+ "${VIRTUALENV}/bin/python" "${GEN_SCRIPT_PATH}" \
+ --model "${TESTCASE_FILE}.tflite" \
+ --num_data 3 \
+ --output "${BIN_PATH}/${TESTCASE}.tflite.input.h5"
+
+ if [[ $? -ne 0 ]]; then
+ echo "FAILED TO GENERATE INPUT"
+ continue
+ fi
+
+ # Run record-minmax
+ "${RECORD_MINMAX_PATH}" \
+ --input_model "${TESTCASE_FILE}.circle" \
+ --input_data "${BIN_PATH}/${TESTCASE}.tflite.input.h5" \
+ --output_model "${BIN_PATH}/${TESTCASE}.out.circle"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$TESTCASE")
+ else
+ FAILED+=("$TESTCASE")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/record-minmax/CMakeLists.txt b/compiler/record-minmax/CMakeLists.txt
new file mode 100644
index 000000000..862660e06
--- /dev/null
+++ b/compiler/record-minmax/CMakeLists.txt
@@ -0,0 +1,27 @@
+nnas_find_package(HDF5 COMPONENTS STATIC QUIET)
+
+if(NOT HDF5_FOUND)
+ message(STATUS "Build record-minmax: FAILED (missing HDF5)")
+ return()
+endif(NOT HDF5_FOUND)
+
+set(DRIVER "driver/Driver.cpp")
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_executable(record-minmax ${DRIVER} ${SOURCES})
+target_include_directories(record-minmax PRIVATE include)
+target_include_directories(record-minmax PRIVATE ${HDF5_INCLUDE_DIRS})
+
+target_link_libraries(record-minmax ${HDF5_CXX_LIBRARIES})
+target_link_libraries(record-minmax arser)
+target_link_libraries(record-minmax safemain)
+target_link_libraries(record-minmax luci_import)
+target_link_libraries(record-minmax luci_export)
+target_link_libraries(record-minmax luci_interpreter)
+
+install(TARGETS record-minmax DESTINATION bin)
+
+nnas_find_package(GTest REQUIRED)
+GTest_AddTest(record_minmax_function_test "${CMAKE_CURRENT_SOURCE_DIR}/tests/RecordFunction.test.cpp")
+target_include_directories(record_minmax_function_test PRIVATE include)
diff --git a/compiler/record-minmax/README.md b/compiler/record-minmax/README.md
new file mode 100644
index 000000000..6a491f279
--- /dev/null
+++ b/compiler/record-minmax/README.md
@@ -0,0 +1,18 @@
+# record-minmax
+
+_record-minmax_ is a tool to embed min/max values of activations to the circle model for post-training quantization.
+
+## Usage
+
+This will run with the path to the input model (.circle), a pack of input data (.h5), and the output model (.circle).
+
+```
+$ ./record-minmax <path_to_input_model> <path_to_input_data> <path_to_output_model>
+```
+
+For example,
+```
+$ ./record-minmax input.circle input.h5 out.circle
+```
+
+Output is a circle model where min/max values of activation tensors are saved in QuantizationParameters.
diff --git a/compiler/record-minmax/driver/Driver.cpp b/compiler/record-minmax/driver/Driver.cpp
new file mode 100644
index 000000000..ae4fcb7c7
--- /dev/null
+++ b/compiler/record-minmax/driver/Driver.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RecordMinMax.h"
+
+#include <arser/arser.h>
+
+int entry(const int argc, char **argv)
+{
+ using namespace record_minmax;
+
+ arser::Arser arser(
+ "Embedding min/max values of activations to the circle model for post-training quantization");
+
+ arser.add_argument("--input_model")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .required(true)
+ .help("Input model filepath");
+
+ arser.add_argument("--input_data")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .required(true)
+ .help("Input data filepath");
+
+ arser.add_argument("--output_model")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .required(true)
+ .help("Output model filepath");
+
+ arser.add_argument("--min_percentile")
+ .nargs(1)
+ .type(arser::DataType::FLOAT)
+ .help("Record n'th percentile of min");
+
+ arser.add_argument("--max_percentile")
+ .nargs(1)
+ .type(arser::DataType::FLOAT)
+ .help("Record n'th percentile of max");
+
+ arser.add_argument("--mode")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .help("Record mode. percentile (default) or moving_average");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
+ }
+
+ auto input_model_path = arser.get<std::string>("--input_model");
+ auto input_data_path = arser.get<std::string>("--input_data");
+ auto output_model_path = arser.get<std::string>("--output_model");
+
+ // Default values
+ std::string mode("percentile");
+ float min_percentile = 1.0;
+ float max_percentile = 99.0;
+
+ if (arser["--min_percentile"])
+ min_percentile = arser.get<float>("--min_percentile");
+
+ if (arser["--max_percentile"])
+ max_percentile = arser.get<float>("--max_percentile");
+
+ if (arser["--mode"])
+ mode = arser.get<std::string>("--mode");
+
+ if (mode != "percentile" && mode != "moving_average")
+ throw std::runtime_error("Unsupported mode");
+
+ RecordMinMax rmm;
+
+ // Initialize interpreter and observer
+ rmm.initialize(input_model_path);
+
+ // Profile min/max while executing the given input data
+ rmm.profileData(mode, input_data_path, min_percentile, max_percentile);
+
+ // Save profiled values to the model
+ rmm.saveModel(output_model_path);
+
+ return EXIT_SUCCESS;
+}
diff --git a/compiler/record-minmax/include/MinMaxObserver.h b/compiler/record-minmax/include/MinMaxObserver.h
new file mode 100644
index 000000000..ce63438ac
--- /dev/null
+++ b/compiler/record-minmax/include/MinMaxObserver.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RECORD_MINMAX_MINMAXOBSERVER_H__
+#define __RECORD_MINMAX_MINMAXOBSERVER_H__
+
+#include <luci_interpreter/Interpreter.h>
+#include <luci_interpreter/core/Tensor.h>
+
+#include <vector>
+#include <unordered_map>
+
+namespace record_minmax
+{
+
+struct MinMaxVectors
+{
+ std::vector<float> min_vector;
+ std::vector<float> max_vector;
+};
+
+class MinMaxMap
+{
+public:
+ // Record min/max of node
+ void recordMinMax(const luci::CircleNode *node, float min, float max)
+ {
+ MinMaxVectors &vectors = _minmax_map[node];
+ vectors.min_vector.push_back(min);
+ vectors.max_vector.push_back(max);
+ }
+
+ const std::unordered_map<const luci::CircleNode *, MinMaxVectors> *getMap() const
+ {
+ return &_minmax_map;
+ }
+
+private:
+ std::unordered_map<const luci::CircleNode *, MinMaxVectors> _minmax_map;
+};
+
+class MinMaxObserver : public luci_interpreter::ExecutionObserver
+{
+public:
+ MinMaxObserver()
+ {
+ // Do nothing
+ }
+
+ void postTensorWrite(const luci::CircleNode *node,
+ const luci_interpreter::Tensor *tensor) override;
+
+ const MinMaxMap *minMaxData() { return &_minmax_data; }
+
+private:
+ MinMaxMap _minmax_data;
+};
+
+} // namespace record_minmax
+
+#endif // __RECORD_MINMAX_MINMAXOBSERVER_H__
diff --git a/compiler/record-minmax/include/RecordFunction.h b/compiler/record-minmax/include/RecordFunction.h
new file mode 100644
index 000000000..b570c6a0a
--- /dev/null
+++ b/compiler/record-minmax/include/RecordFunction.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <cassert>
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+#include <stdexcept>
+
+namespace record_minmax
+{
+
+/**
+ * @brief getNthPercentile calculates the n-th percentile of input vector (0.0 <= n <= 100.0)
+ * linear interpolation is used when the desired percentile lies between two data points
+ */
+float getNthPercentile(std::vector<float> &vector, float percentile)
+{
+ if (percentile < 0 || percentile > 100)
+ throw std::runtime_error("Percentile must be ranged from 0 to 100");
+
+ if (percentile == 0.0)
+ return vector.front();
+
+ if (percentile == 100.0)
+ return vector.back();
+
+ if (vector.empty())
+ throw std::runtime_error("Percentile must take a non-empty vector as an argument");
+
+ if (vector.size() == 1)
+ return vector[0];
+
+ std::vector<float> copy;
+ copy.assign(vector.begin(), vector.end());
+ std::sort(copy.begin(), copy.end());
+
+ int index = static_cast<int>(std::floor((copy.size() - 1) * percentile / 100.0));
+
+ float percent_i = static_cast<float>(index) / static_cast<float>(copy.size() - 1);
+ float fraction =
+ (percentile / 100.0 - percent_i) / ((index + 1.0) / (copy.size() - 1.0) - percent_i);
+ float res = copy[index] + fraction * (copy[index + 1] - copy[index]);
+ return res;
+}
+
+/**
+ * @brief getMovingAverage calculates the weighted moving average of input vector
+ * The initial value is the minimum (or maximum) value of the first batch of the vector
+ */
+float getMovingAverage(const std::vector<float> &vector, const float alpha,
+ const uint8_t batch_size, bool is_min)
+{
+ assert(!vector.empty());
+ assert(alpha >= 0.0 && alpha <= 1.0);
+ assert(batch_size > 0);
+
+ auto getBatchMinOrMax = [&](int start_index) {
+ assert(start_index >= 0 && start_index < vector.size());
+
+ float res = is_min ? std::numeric_limits<float>::max() : std::numeric_limits<float>::lowest();
+ for (int offset = 0; offset < batch_size; offset++)
+ {
+ int index = start_index + offset;
+ if (index >= vector.size())
+ break;
+
+ if (is_min)
+ {
+ res = vector[index] < res ? vector[index] : res;
+ }
+ else
+ {
+ res = vector[index] > res ? vector[index] : res;
+ }
+ }
+ return res;
+ };
+
+ float curr_avg = getBatchMinOrMax(0);
+ for (size_t i = batch_size; i < vector.size(); i += batch_size)
+ {
+ curr_avg = curr_avg * alpha + getBatchMinOrMax(i) * (1.0 - alpha);
+ }
+ return curr_avg;
+}
+
+} // namespace record_minmax
diff --git a/compiler/record-minmax/include/RecordMinMax.h b/compiler/record-minmax/include/RecordMinMax.h
new file mode 100644
index 000000000..ffdb17aec
--- /dev/null
+++ b/compiler/record-minmax/include/RecordMinMax.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RECORD_MINMAX_H__
+#define __RECORD_MINMAX_H__
+
+#include <luci/IR/Module.h>
+#include <luci_interpreter/Interpreter.h>
+
+#include "MinMaxObserver.h"
+
+#include <memory>
+
+namespace record_minmax
+{
+
+class RecordMinMax
+{
+public:
+ explicit RecordMinMax() = default;
+
+ ~RecordMinMax() = default;
+
+ void initialize(const std::string &input_model_path);
+
+ void profileData(const std::string &mode, const std::string &input_data_path,
+ float min_percentile, float max_percentile);
+
+ void saveModel(const std::string &output_model_path);
+
+private:
+ std::unique_ptr<luci::Module> _module;
+ std::unique_ptr<luci_interpreter::Interpreter> _interpreter;
+ std::unique_ptr<MinMaxObserver> _observer;
+};
+
+} // namespace record_minmax
+
+#endif // __RECORD_MINMAX_H__
diff --git a/compiler/record-minmax/requires.cmake b/compiler/record-minmax/requires.cmake
new file mode 100644
index 000000000..054503539
--- /dev/null
+++ b/compiler/record-minmax/requires.cmake
@@ -0,0 +1,3 @@
+require("luci")
+require("safemain")
+require("arser")
diff --git a/compiler/record-minmax/src/CircleExpContract.cpp b/compiler/record-minmax/src/CircleExpContract.cpp
new file mode 100644
index 000000000..b703250bd
--- /dev/null
+++ b/compiler/record-minmax/src/CircleExpContract.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CircleExpContract.h"
+
+#include <oops/InternalExn.h>
+
+#include <fstream>
+#include <iostream>
+
+namespace record_minmax
+{
+
+bool CircleExpContract::store(const char *ptr, const size_t size) const
+{
+ if (!ptr)
+ INTERNAL_EXN("Graph was not serialized by FlatBuffer for some reason");
+
+ std::ofstream fs(_filepath, std::ofstream::binary);
+ fs.write(ptr, size);
+
+ return fs.good();
+}
+
+} // namespace record_minmax
diff --git a/compiler/record-minmax/src/CircleExpContract.h b/compiler/record-minmax/src/CircleExpContract.h
new file mode 100644
index 000000000..ab00fa860
--- /dev/null
+++ b/compiler/record-minmax/src/CircleExpContract.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RECORD_MINMAX_CIRCLEXPCONTRACT_H__
+#define __RECORD_MINMAX_CIRCLEXPCONTRACT_H__
+
+#include <loco.h>
+#include <luci/CircleExporter.h>
+#include <luci/IR/Module.h>
+
+#include <string>
+
+namespace record_minmax
+{
+
+struct CircleExpContract : public luci::CircleExporter::Contract
+{
+public:
+ CircleExpContract(luci::Module *module, const std::string &filename)
+ : _module(module), _filepath(filename)
+ {
+ // NOTHING TO DO
+ }
+ virtual ~CircleExpContract() = default;
+
+public:
+ loco::Graph *graph(void) const final { return nullptr; }
+ luci::Module *module(void) const final { return _module; };
+
+public:
+ bool store(const char *ptr, const size_t size) const final;
+
+private:
+ luci::Module *_module;
+ const std::string _filepath;
+};
+
+} // namespace record_minmax
+
+#endif // __RECORD_MINMAX_CIRCLEXPCONTRACT_H__
diff --git a/compiler/record-minmax/src/HDF5Importer.cpp b/compiler/record-minmax/src/HDF5Importer.cpp
new file mode 100644
index 000000000..cf30cd863
--- /dev/null
+++ b/compiler/record-minmax/src/HDF5Importer.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "HDF5Importer.h"
+
+#include <H5Cpp.h>
+
+#include <string>
+#include <cassert>
+
+using Shape = luci_interpreter::Shape;
+using DataType = luci_interpreter::DataType;
+
+namespace
+{
+
+Shape toInternalShape(const H5::DataSpace &dataspace)
+{
+ int rank = dataspace.getSimpleExtentNdims();
+
+ std::vector<hsize_t> dims;
+ dims.resize(rank, 0);
+ dataspace.getSimpleExtentDims(dims.data());
+
+ Shape res(rank);
+ for (int axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = dims[axis];
+ }
+
+ return res;
+}
+
+DataType toInternalDtype(const H5::DataType &h5_type)
+{
+ if (h5_type == H5::PredType::IEEE_F32BE || h5_type == H5::PredType::IEEE_F32LE)
+ {
+ return DataType::FLOAT32;
+ }
+ if (h5_type == H5::PredType::STD_I32BE || h5_type == H5::PredType::STD_I32LE)
+ {
+ return DataType::S32;
+ }
+ if (h5_type == H5::PredType::STD_I64BE || h5_type == H5::PredType::STD_I64LE)
+ {
+ return DataType::S64;
+ }
+ // Only support three datatypes for now
+ return DataType::Unknown;
+}
+
+void readTensorData(H5::DataSet &tensor, uint8_t *buffer)
+{
+ tensor.read(buffer, H5::PredType::NATIVE_UINT8);
+}
+
+void readTensorData(H5::DataSet &tensor, float *buffer)
+{
+ tensor.read(buffer, H5::PredType::NATIVE_FLOAT);
+}
+
+void readTensorData(H5::DataSet &tensor, int32_t *buffer)
+{
+ tensor.read(buffer, H5::PredType::NATIVE_INT);
+}
+
+void readTensorData(H5::DataSet &tensor, int64_t *buffer)
+{
+ tensor.read(buffer, H5::PredType::NATIVE_LONG);
+}
+
+} // namespace
+
+namespace record_minmax
+{
+
+int32_t HDF5Importer::numInputs(int32_t record_idx)
+{
+ auto records = _value_grp.openGroup(std::to_string(record_idx));
+ return records.getNumObjs();
+}
+
+void HDF5Importer::readTensor(int32_t record_idx, int32_t input_idx, void *buffer)
+{
+ auto record = _value_grp.openGroup(std::to_string(record_idx));
+ auto tensor = record.openDataSet(std::to_string(input_idx));
+
+ readTensorData(tensor, static_cast<uint8_t *>(buffer));
+}
+
+void HDF5Importer::readTensor(int32_t record_idx, int32_t input_idx, DataType *dtype, Shape *shape,
+ void *buffer)
+{
+ auto record = _value_grp.openGroup(std::to_string(record_idx));
+ auto tensor = record.openDataSet(std::to_string(input_idx));
+
+ auto tensor_dtype = tensor.getDataType();
+ *dtype = toInternalDtype(tensor_dtype);
+
+ auto tensor_shape = tensor.getSpace();
+ *shape = toInternalShape(tensor_shape);
+
+ switch (*dtype)
+ {
+ case DataType::FLOAT32:
+ readTensorData(tensor, static_cast<float *>(buffer));
+ break;
+ case DataType::S32:
+ readTensorData(tensor, static_cast<int32_t *>(buffer));
+ break;
+ case DataType::S64:
+ readTensorData(tensor, static_cast<int64_t *>(buffer));
+ break;
+ default:
+ throw std::runtime_error{"Unsupported data type for input data (.h5)"};
+ }
+}
+
+} // namespace record_minmax
diff --git a/compiler/record-minmax/src/HDF5Importer.h b/compiler/record-minmax/src/HDF5Importer.h
new file mode 100644
index 000000000..cf6526685
--- /dev/null
+++ b/compiler/record-minmax/src/HDF5Importer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RECORD_MINMAX_HDF5IMPORTER_H__
+#define __RECORD_MINMAX_HDF5IMPORTER_H__
+
+#include <luci_interpreter/core/Tensor.h>
+
+#include <H5Cpp.h>
+
+using Shape = luci_interpreter::Shape;
+using DataType = luci_interpreter::DataType;
+
+namespace record_minmax
+{
+
+// HDF5Importer reads an input data saved in the hdf5 file in the given path
+// The hierarchy of the hdf5 file is as follows.
+// Group "/"
+// > Group "value"
+// > Group <record_idx>
+// > Dataset <input_idx>
+// record_idx : index of the record (dataset file can contain multiple records)
+// input_idx : index of the input (DNN model can have multiple inputs)
+// Ex: the j'th input of the i'th record can be accessed by "/value/i/j"
+class HDF5Importer
+{
+public:
+ explicit HDF5Importer(const std::string &path) : _file{path, H5F_ACC_RDONLY}
+ {
+ // Do nothing
+ }
+
+public:
+ /**
+ * @brief importGroup has to be called before readTensor is called
+ * Otherwise, readTensor will throw an exception
+ */
+ void importGroup() { _value_grp = _file.openGroup("value"); }
+
+ /**
+ * @brief Read tensor data from file and store it into buffer
+ * @details A tensor in the file can be retrieved with (record_idx, input_idx)
+ * @param record_idx : index of the record
+ * @param input_idx : index of the input
+ * @param dtype : pointer to write the tensor's data type
+ * @param shape : pointer to write the tensor's shape
+ * @param buffer : pointer to write the tensor's data
+ */
+ void readTensor(int32_t record_idx, int32_t input_idx, DataType *dtype, Shape *shape,
+ void *buffer);
+
+ // Read a raw tensor (no type/shape is specified)
+ void readTensor(int32_t record_idx, int32_t input_idx, void *buffer);
+
+ bool isRawData() { return _value_grp.attrExists("rawData"); }
+
+ int32_t numRecords() { return _value_grp.getNumObjs(); }
+
+ int32_t numInputs(int32_t record_idx);
+
+private:
+ H5::H5File _file;
+ H5::Group _value_grp;
+};
+
+} // namespace record_minmax
+
+#endif // __RECORD_MINMAX_HDF5IMPORTER_H__
diff --git a/compiler/record-minmax/src/MinMaxObserver.cpp b/compiler/record-minmax/src/MinMaxObserver.cpp
new file mode 100644
index 000000000..45f0197c8
--- /dev/null
+++ b/compiler/record-minmax/src/MinMaxObserver.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MinMaxObserver.h"
+
+#include <luci/IR/CircleOpcode.h>
+
+using DataType = luci_interpreter::DataType;
+
+namespace record_minmax
+{
+
+// postTensorWrite is only called for a node producing a tensor
+void MinMaxObserver::postTensorWrite(const luci::CircleNode *node,
+ const luci_interpreter::Tensor *tensor)
+{
+ // CircleOutput does not produce a tensor
+ assert(node->opcode() != luci::CircleOpcode::CIRCLEOUTPUT);
+
+ // Operators with multiple outputs
+ assert(node->opcode() != luci::CircleOpcode::IF);
+ assert(node->opcode() != luci::CircleOpcode::SPLIT);
+ assert(node->opcode() != luci::CircleOpcode::SPLIT_V);
+ assert(node->opcode() != luci::CircleOpcode::TOPK_V2);
+ assert(node->opcode() != luci::CircleOpcode::UNPACK);
+ assert(node->opcode() != luci::CircleOpcode::WHILE);
+
+ if (node->opcode() == luci::CircleOpcode::CONST)
+ {
+ // node is not activation. Do nothing.
+ return;
+ }
+
+ if (node->opcode() == luci::CircleOpcode::ARG_MAX)
+ {
+ // Output of arg_max is the index of the largest value across axes of a tensor
+ // this should not be quantized
+ return;
+ }
+
+ // Only support recording of float32 values
+ if (tensor->element_type() != DataType::FLOAT32)
+ throw std::runtime_error("Tensor's data type is not float");
+
+ const auto data = tensor->data<float>();
+ const auto num_elements = tensor->shape().num_elements();
+
+ std::vector<float> buf(data, data + num_elements);
+ auto minmax = std::minmax_element(buf.begin(), buf.end());
+ float min = *minmax.first;
+ float max = *minmax.second;
+
+ _minmax_data.recordMinMax(node, min, max);
+}
+
+} // namespace record_minmax
diff --git a/compiler/record-minmax/src/RecordMinMax.cpp b/compiler/record-minmax/src/RecordMinMax.cpp
new file mode 100644
index 000000000..d12a0d3ae
--- /dev/null
+++ b/compiler/record-minmax/src/RecordMinMax.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RecordMinMax.h"
+#include "RecordFunction.h"
+#include "CircleExpContract.h"
+#include "MinMaxObserver.h"
+#include "HDF5Importer.h"
+
+#include <luci/Importer.h>
+#include <luci/CircleExporter.h>
+#include <luci/IR/CircleQuantParam.h>
+
+#include <algorithm>
+#include <cmath>
+#include <fstream>
+#include <numeric>
+#include <stdexcept>
+#include <iostream>
+
+using Shape = luci_interpreter::Shape;
+using DataType = luci_interpreter::DataType;
+
+namespace
+{
+
+/**
+ * @brief getTensorSize will return size in bytes
+ */
+template <typename NodeT> size_t getTensorSize(const NodeT *node)
+{
+ uint32_t tensor_size = loco::size(node->dtype());
+ for (uint32_t i = 0; i < node->rank(); ++i)
+ tensor_size *= node->dim(i).value();
+ return tensor_size;
+}
+
+/**
+ * @brief verifyTypeShape checks the type and the shape of CircleInput
+ * This throws an exception if type or shape does not match
+ */
+void verifyTypeShape(const luci::CircleInput *input_node, const DataType &dtype, const Shape &shape)
+{
+ // Type check
+ if (dtype != input_node->dtype())
+ throw std::runtime_error("Wrong input type.");
+
+ if (shape.num_dims() != input_node->rank())
+ throw std::runtime_error("Input rank mismatch.");
+
+ for (uint32_t i = 0; i < shape.num_dims(); i++)
+ {
+ if (shape.dim(i) != input_node->dim(i).value())
+ throw std::runtime_error("Input shape mismatch.");
+ }
+}
+
+} // namespace
+
+namespace record_minmax
+{
+
+void RecordMinMax::initialize(const std::string &input_model_path)
+{
+ // Load model from the file
+ std::ifstream fs(input_model_path, std::ifstream::binary);
+ if (fs.fail())
+ {
+ throw std::runtime_error("Cannot open model file \"" + input_model_path + "\".\n");
+ }
+ std::vector<char> model_data((std::istreambuf_iterator<char>(fs)),
+ std::istreambuf_iterator<char>());
+ _module = luci::Importer().importModule(circle::GetModel(model_data.data()));
+
+ if (_module == nullptr)
+ {
+ throw std::runtime_error("ERROR: Failed to load '" + input_model_path + "'");
+ }
+
+ // Initialize interpreter
+ _interpreter = std::make_unique<luci_interpreter::Interpreter>(_module.get());
+
+ _observer = std::make_unique<MinMaxObserver>();
+
+ _interpreter->attachObserver(_observer.get());
+}
+
+void RecordMinMax::profileData(const std::string &mode, const std::string &input_data_path,
+ float min_percentile, float max_percentile)
+{
+ HDF5Importer importer(input_data_path);
+ importer.importGroup();
+
+ bool is_raw_data = importer.isRawData();
+
+ const auto num_records = importer.numRecords();
+ if (num_records == 0)
+ throw std::runtime_error("The input data file does not contain any record.");
+
+ const auto input_nodes = loco::input_nodes(_module->graph());
+ const auto num_inputs = input_nodes.size();
+
+ for (int32_t record_idx = 0; record_idx < num_records; record_idx++)
+ {
+ if (num_inputs != importer.numInputs(record_idx))
+ throw std::runtime_error("Wrong number of inputs.");
+
+ if (record_idx % 100 == 0)
+ std::cout << "Recording " << record_idx << "'th data" << std::endl;
+
+ for (int32_t input_idx = 0; input_idx < num_inputs; input_idx++)
+ {
+ const auto *input_node = loco::must_cast<const luci::CircleInput *>(input_nodes[input_idx]);
+ assert(input_node->index() == input_idx);
+ std::vector<char> input_data(getTensorSize(input_node));
+
+ if (!is_raw_data)
+ {
+ DataType dtype;
+ Shape shape(input_node->rank());
+ importer.readTensor(record_idx, input_idx, &dtype, &shape, input_data.data());
+
+ // Check the type and the shape of the input data is valid
+ verifyTypeShape(input_node, dtype, shape);
+ }
+ else
+ {
+ // Skip type/shape check for raw data
+ importer.readTensor(record_idx, input_idx, input_data.data());
+ }
+
+ // TODO: Input data is copied twice (file -> buffer (input_data) -> interpreter inputs)
+ // We can redcue the copy by directly writing data from file to interpreter inputs
+ _interpreter->writeInputTensor(input_node, input_data.data(), input_data.size());
+ }
+
+ _interpreter->interpret();
+ }
+
+ std::cout << "Recording finished. Number of recorded data: " << num_records << std::endl;
+
+ auto minmax_map = _observer->minMaxData()->getMap();
+ for (auto iter = minmax_map->begin(); iter != minmax_map->end(); ++iter)
+ {
+ auto node = iter->first;
+ auto minmax = iter->second;
+
+ float min, max;
+ if (mode == "percentile")
+ {
+ min = getNthPercentile(minmax.min_vector, min_percentile);
+ max = getNthPercentile(minmax.max_vector, max_percentile);
+ }
+ else if (mode == "moving_average")
+ {
+ min = getMovingAverage(minmax.min_vector, 0.9, 16, true);
+ max = getMovingAverage(minmax.max_vector, 0.9, 16, false);
+ }
+ assert(mode == "percentile" || mode == "moving_average");
+ auto quantparam = std::make_unique<luci::CircleQuantParam>();
+ quantparam->min.push_back(min);
+ quantparam->max.push_back(max);
+
+ assert(node->quantparam() == nullptr);
+
+ auto mutable_node = const_cast<luci::CircleNode *>(node);
+ mutable_node->quantparam(std::move(quantparam));
+ }
+}
+
+void RecordMinMax::saveModel(const std::string &output_model_path)
+{
+ // Export to output Circle file
+ luci::CircleExporter exporter;
+ CircleExpContract contract(_module.get(), output_model_path);
+
+ if (!exporter.invoke(&contract))
+ {
+ throw std::runtime_error("ERROR: Failed to export '" + output_model_path + "'");
+ }
+}
+
+} // namespace record_minmax
diff --git a/compiler/record-minmax/tests/RecordFunction.test.cpp b/compiler/record-minmax/tests/RecordFunction.test.cpp
new file mode 100644
index 000000000..13b464db9
--- /dev/null
+++ b/compiler/record-minmax/tests/RecordFunction.test.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RecordFunction.h"
+
+#include <vector>
+#include <cmath>
+
+#include <gtest/gtest.h>
+
+namespace record_minmax
+{
+
+#define EXPECT_FLOAT_NEAR(exp, val) EXPECT_NEAR(exp, val, 1e-5 + 1e-5 * std::abs(exp))
+
+TEST(GetNthPercentileTest, Edge)
+{
+ std::vector<float> input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ EXPECT_FLOAT_NEAR(0, getNthPercentile(input, 0));
+ EXPECT_FLOAT_NEAR(9, getNthPercentile(input, 100));
+}
+
+TEST(GetNthPercentileTest, Simple)
+{
+ std::vector<float> input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ for (float i = 1; i <= 99; i++)
+ {
+ EXPECT_FLOAT_NEAR(0.09 * i, getNthPercentile(input, i));
+ }
+
+ for (float i = 0.5; i <= 99.5; i++)
+ {
+ EXPECT_FLOAT_NEAR(0.09 * std::floor(i) + 0.045, getNthPercentile(input, i));
+ }
+}
+
+TEST(GetNthPercentileTest, Float)
+{
+ std::vector<float> input{8.48424583, 89.39998456, 65.83323245, 87.85243858, 68.85414866,
+ 98.40591775, 16.74266565, 25.09415131, 74.54084952, 29.70536481,
+ 49.26803928, 79.49602425, 53.69395631, 73.73140271, 99.81245733,
+ 46.76997646, 78.37688474, 10.43076744, 30.39480496, 14.30875609,
+ 86.72073486, 17.97364969, 14.66724564, 0.47818459, 17.77138025,
+ 85.68981239, 22.18322696, 78.81541331, 93.04085581, 40.2147895};
+
+ EXPECT_FLOAT_NEAR(2.799942346802177, getNthPercentile(input, 1));
+ EXPECT_FLOAT_NEAR(7.768503955476342, getNthPercentile(input, 3.14));
+ EXPECT_FLOAT_NEAR(99.40456084968194, getNthPercentile(input, 99));
+}
+
+TEST(GetNthPercentileTest, FloatWithNegative)
+{
+ std::vector<float> input{-41.51575417, 39.39998456, 15.83323245, 37.85243858, 18.85414866,
+ 48.40591775, -33.25733435, -24.90584869, 24.54084952, -20.29463519,
+ -0.73196072, 29.49602425, 3.69395631, 23.73140271, 49.81245733,
+ -3.23002354, 28.37688474, -39.56923256, -19.60519504, -35.69124391,
+ 36.72073486, -32.02635031, -35.33275436, -49.52181541, -32.22861975,
+ 35.68981239, -27.81677304, 28.81541331, 43.04085581, -9.7852105};
+
+ EXPECT_FLOAT_NEAR(-47.20005765319782, getNthPercentile(input, 1));
+ EXPECT_FLOAT_NEAR(-42.23149604452366, getNthPercentile(input, 3.14));
+ EXPECT_FLOAT_NEAR(49.40456084968194, getNthPercentile(input, 99));
+}
+
+TEST(GetNthPercentileTest, SigleElement)
+{
+ std::vector<float> input{33};
+
+ EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 0));
+ EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 50));
+ EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 100));
+}
+
+TEST(GetNthPercentileTest, OutOfBoundary_NEG)
+{
+ std::vector<float> input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+ EXPECT_THROW(getNthPercentile(input, -1), std::runtime_error);
+ EXPECT_THROW(getNthPercentile(input, 101), std::runtime_error);
+}
+
+TEST(GetNthPercentileTest, EmptyVector_NEG)
+{
+ std::vector<float> input;
+
+ EXPECT_THROW(getNthPercentile(input, 10), std::runtime_error);
+}
+
+} // namespace record_minmax
diff --git a/compiler/safemain/README.md b/compiler/safemain/README.md
new file mode 100644
index 000000000..18447048e
--- /dev/null
+++ b/compiler/safemain/README.md
@@ -0,0 +1 @@
+# safemain
diff --git a/compiler/souschef/CMakeLists.txt b/compiler/souschef/CMakeLists.txt
new file mode 100644
index 000000000..5a307be16
--- /dev/null
+++ b/compiler/souschef/CMakeLists.txt
@@ -0,0 +1,5 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(souschef STATIC ${SOURCES})
+set_target_properties(souschef PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(souschef PUBLIC include)
diff --git a/compiler/souschef/README.md b/compiler/souschef/README.md
new file mode 100644
index 000000000..a475ce418
--- /dev/null
+++ b/compiler/souschef/README.md
@@ -0,0 +1,3 @@
+# souschef
+
+_souschef_ is common library for _tflchef_ and _circlechef_
diff --git a/compiler/souschef/include/souschef/Arguments.h b/compiler/souschef/include/souschef/Arguments.h
new file mode 100644
index 000000000..4556ce797
--- /dev/null
+++ b/compiler/souschef/include/souschef/Arguments.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_ARGUMENTS_H__
+#define __SOUSCHEF_ARGUMENTS_H__
+
+#include <cstdint>
+#include <string>
+
+namespace souschef
+{
+
+/**
+ * @brief Read-only string sequence view
+ */
+struct Arguments
+{
+ virtual ~Arguments() = default;
+
+ virtual uint32_t count(void) const = 0;
+ virtual const std::string &value(uint32_t n) const = 0;
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_ARGUMENTS_H__
diff --git a/compiler/souschef/include/souschef/Data/Constant.h b/compiler/souschef/include/souschef/Data/Constant.h
new file mode 100644
index 000000000..6bb7bbab1
--- /dev/null
+++ b/compiler/souschef/include/souschef/Data/Constant.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATA_CONSTANT_H__
+#define __SOUSCHEF_DATA_CONSTANT_H__
+
+#include "souschef/DataChef.h"
+#include "souschef/LexicalCast.h"
+
+namespace souschef
+{
+
+template <typename T> class ConstantDataChef final : public DataChef
+{
+public:
+ ConstantDataChef(const T &value) : _value{value}
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::vector<uint8_t> generate(int32_t count) const override
+ {
+ std::vector<uint8_t> res;
+
+ for (uint32_t n = 0; n < count; ++n)
+ {
+ const uint8_t *arr = reinterpret_cast<const uint8_t *>(&_value);
+
+ for (uint32_t b = 0; b < sizeof(T); ++b)
+ {
+ res.emplace_back(arr[b]);
+ }
+ }
+
+ return res;
+ }
+
+private:
+ T _value;
+};
+
+template <typename T> struct ConstantDataChefFactory : public DataChefFactory
+{
+ std::unique_ptr<DataChef> create(const Arguments &args) const
+ {
+ auto const value = to_number<T>(args.value(0));
+ return std::unique_ptr<DataChef>{new ConstantDataChef<T>{value}};
+ }
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_DATA_CONSTANT_H__
diff --git a/compiler/souschef/include/souschef/Data/Explicit.h b/compiler/souschef/include/souschef/Data/Explicit.h
new file mode 100644
index 000000000..6e5ee819e
--- /dev/null
+++ b/compiler/souschef/include/souschef/Data/Explicit.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATA_EXPLICIT_H__
+#define __SOUSCHEF_DATA_EXPLICIT_H__
+
+#include "souschef/DataChef.h"
+#include "souschef/LexicalCast.h"
+
+#include <vector>
+
+namespace souschef
+{
+
+template <typename T> class ExplicitDataChef final : public DataChef
+{
+public:
+ ExplicitDataChef()
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::vector<uint8_t> generate(int32_t count) const override
+ {
+ std::vector<uint8_t> res;
+
+ for (uint32_t n = 0; n < count; ++n)
+ {
+ T const value = (n < _values.size()) ? _values.at(n) : T{};
+ const uint8_t *arr = reinterpret_cast<const uint8_t *>(&value);
+
+ for (uint32_t b = 0; b < sizeof(T); ++b)
+ {
+ res.emplace_back(arr[b]);
+ }
+ }
+
+ return res;
+ }
+
+public:
+ void insert(const T &value) { _values.emplace_back(value); }
+
+private:
+ std::vector<T> _values;
+};
+
+template <typename T> struct ExplicitDataChefFactory : public DataChefFactory
+{
+ std::unique_ptr<DataChef> create(const Arguments &args) const
+ {
+ std::unique_ptr<ExplicitDataChef<T>> res{new ExplicitDataChef<T>};
+
+ for (uint32_t n = 0; n < args.count(); ++n)
+ {
+ auto const value = to_number<T>(args.value(n));
+ res->insert(value);
+ }
+
+ return std::move(res);
+ }
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_DATA_EXPLICIT_H__
diff --git a/compiler/souschef/include/souschef/Data/Gaussian.h b/compiler/souschef/include/souschef/Data/Gaussian.h
new file mode 100644
index 000000000..75570e0b8
--- /dev/null
+++ b/compiler/souschef/include/souschef/Data/Gaussian.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATA_GAUSSIAN_H__
+#define __SOUSCHEF_DATA_GAUSSIAN_H__
+
+#include "souschef/DataChef.h"
+
+namespace souschef
+{
+
+/**
+ * @brief Generate a sequence of random values according to the gaussian(=normal) distribution
+ */
+class GaussianFloat32DataChef final : public DataChef
+{
+public:
+ GaussianFloat32DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::vector<uint8_t> generate(int32_t count) const override;
+
+private:
+ float _mean;
+ float _stddev;
+};
+
+class GaussianInt32DataChef final : public DataChef
+{
+public:
+ GaussianInt32DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::vector<uint8_t> generate(int32_t count) const override;
+
+private:
+ float _mean;
+ float _stddev;
+};
+
+class GaussianUint8DataChef final : public DataChef
+{
+public:
+ GaussianUint8DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::vector<uint8_t> generate(int32_t count) const override;
+
+private:
+ float _mean;
+ float _stddev;
+};
+
+struct GaussianFloat32DataChefFactory : public DataChefFactory
+{
+ std::unique_ptr<DataChef> create(const Arguments &args) const;
+};
+
+struct GaussianInt32DataChefFactory : public DataChefFactory
+{
+ std::unique_ptr<DataChef> create(const Arguments &args) const;
+};
+
+struct GaussianUint8DataChefFactory : public DataChefFactory
+{
+ std::unique_ptr<DataChef> create(const Arguments &args) const;
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_DATA_GAUSSIAN_H__
diff --git a/compiler/souschef/include/souschef/DataChef.def b/compiler/souschef/include/souschef/DataChef.def
new file mode 100644
index 000000000..28901db18
--- /dev/null
+++ b/compiler/souschef/include/souschef/DataChef.def
@@ -0,0 +1,19 @@
+#ifndef DATA_CHEF
+#error "Define DATA_CHEF first"
+#endif // DATA_CHEF
+
+// DATA_CHEF(TYPE, NAME, FACTORY_CLASS)
+// "TYPE" SHOULD BE an enum tag of tflchef::TensorType
+DATA_CHEF(FLOAT32, constant, ConstantDataChefFactory<float>)
+DATA_CHEF(BOOL, constant, ConstantDataChefFactory<bool>)
+DATA_CHEF(UINT8, constant, ConstantDataChefFactory<uint8_t>)
+DATA_CHEF(INT32, constant, ConstantDataChefFactory<int32_t>)
+DATA_CHEF(INT64, constant, ConstantDataChefFactory<int64_t>)
+DATA_CHEF(INT64, explicit, ExplicitDataChefFactory<int64_t>)
+DATA_CHEF(INT32, explicit, ExplicitDataChefFactory<int32_t>)
+DATA_CHEF(UINT8, explicit, ExplicitDataChefFactory<uint8_t>)
+DATA_CHEF(BOOL, explicit, ExplicitDataChefFactory<bool>)
+DATA_CHEF(FLOAT32, explicit, ExplicitDataChefFactory<float>)
+DATA_CHEF(FLOAT32, gaussian, GaussianFloat32DataChefFactory)
+DATA_CHEF(INT32, gaussian, GaussianInt32DataChefFactory)
+DATA_CHEF(UINT8, gaussian, GaussianUint8DataChefFactory)
diff --git a/compiler/souschef/include/souschef/DataChef.h b/compiler/souschef/include/souschef/DataChef.h
new file mode 100644
index 000000000..4a65dfc08
--- /dev/null
+++ b/compiler/souschef/include/souschef/DataChef.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATA_CHEF_H__
+#define __SOUSCHEF_DATA_CHEF_H__
+
+#include "Arguments.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace souschef
+{
+
+using Data = std::vector<uint8_t>;
+
+/**
+ * @brief Data Generator
+ */
+struct DataChef
+{
+ virtual ~DataChef() = default;
+
+ // TODO Allow users to query the type of elements that this DataChef generates
+
+ /**
+ * @brief Generate a sequence of 'count' elements as a byte sequence
+ *
+ * Let D be the return value of generate(N).
+ * Then, D.size() == N * sizeof(T) where T is the element type.
+ */
+ virtual Data generate(int32_t count) const = 0;
+};
+
+/**
+ * @brief Data Generator Factory
+ */
+struct DataChefFactory
+{
+ virtual ~DataChefFactory() = default;
+
+ virtual std::unique_ptr<DataChef> create(const Arguments &args) const = 0;
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_DATA_CHEF_H__
diff --git a/compiler/souschef/include/souschef/DataChefs.h b/compiler/souschef/include/souschef/DataChefs.h
new file mode 100644
index 000000000..7a86a2c2e
--- /dev/null
+++ b/compiler/souschef/include/souschef/DataChefs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATA_CHEFS_H__
+#define __SOUSCHEF_DATA_CHEFS_H__
+
+#include "Data/Constant.h"
+#include "Data/Explicit.h"
+#include "Data/Gaussian.h"
+
+#endif // __SOUSCHEF_DATA_CHEFS_H__
diff --git a/compiler/souschef/include/souschef/Dataset.h b/compiler/souschef/include/souschef/Dataset.h
new file mode 100644
index 000000000..46a12e424
--- /dev/null
+++ b/compiler/souschef/include/souschef/Dataset.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_DATASET_H__
+#define __SOUSCHEF_DATASET_H__
+
+#include <vector>
+
+namespace souschef
+{
+
+template <typename T> class Dataset
+{
+public:
+ Dataset(const std::vector<T> &vec) : _vec{vec}
+ {
+ // DO NOTHING
+ }
+
+public:
+ Dataset(std::vector<T> &&vec) : _vec{std::move(vec)}
+ {
+ // DO NOTHING
+ }
+
+public:
+ template <typename Func> auto map(Func f) const -> Dataset<decltype(f(std::declval<T>()))>
+ {
+ using U = decltype(f(std::declval<T>()));
+ std::vector<U> res;
+
+ for (const auto &elem : _vec)
+ {
+ res.emplace_back(f(elem));
+ }
+
+ return Dataset<U>(std::move(res));
+ }
+
+public:
+ const std::vector<T> &vectorize(void) const { return _vec; }
+
+private:
+ std::vector<T> _vec;
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_DATASET_H__
diff --git a/compiler/souschef/include/souschef/LexicalCast.h b/compiler/souschef/include/souschef/LexicalCast.h
new file mode 100644
index 000000000..d83cb2ab4
--- /dev/null
+++ b/compiler/souschef/include/souschef/LexicalCast.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @brief This file provides string <-> number cast helpers
+ */
+#ifndef __SOUSCHEF_LEXICAL_CAST_H__
+#define __SOUSCHEF_LEXICAL_CAST_H__
+
+#include <string>
+
+namespace souschef
+{
+
+/**
+ * @brief Return a numeric value that corresponds to a given string
+ *
+ * @note This function will throw an exception on casting failure
+ */
+template <typename Number> Number to_number(const std::string &s);
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_LEXICAL_CAST_H__
diff --git a/compiler/souschef/include/souschef/RangedArguments.h b/compiler/souschef/include/souschef/RangedArguments.h
new file mode 100644
index 000000000..dd50f593e
--- /dev/null
+++ b/compiler/souschef/include/souschef/RangedArguments.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_RANGED_ARGUMENTS_H__
+#define __SOUSCHEF_RANGED_ARGUMENTS_H__
+
+#include "Arguments.h"
+
+#include <string>
+
+namespace souschef
+{
+
+template <typename InputIt> class RangedArguments : public Arguments
+{
+public:
+ RangedArguments(InputIt beg, InputIt end) : _beg{beg}, _end{end}
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t count(void) const override { return _end - _beg; }
+
+public:
+ const std::string &value(uint32_t n) const override { return *(_beg + n); }
+
+private:
+ InputIt _beg;
+ InputIt _end;
+};
+
+template <typename InputIt> RangedArguments<InputIt> ranged_arguments(InputIt beg, InputIt end)
+{
+ return RangedArguments<InputIt>{beg, end};
+}
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_RANGED_ARGUMENTS_H__
diff --git a/compiler/souschef/include/souschef/Registry.h b/compiler/souschef/include/souschef/Registry.h
new file mode 100644
index 000000000..9457b6a0f
--- /dev/null
+++ b/compiler/souschef/include/souschef/Registry.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SOUSCHEF_REGISTRY_H__
+#define __SOUSCHEF_REGISTRY_H__
+
+#include <map>
+#include <memory>
+#include <string>
+
+namespace souschef
+{
+
+template <typename T> class Registry
+{
+public:
+ void add(const std::string &name, std::unique_ptr<T> &&entry)
+ {
+ _content[name] = std::move(entry);
+ }
+
+ const T &lookup(const std::string &name) const { return *(_content.at(name)); }
+
+private:
+ std::map<std::string, std::unique_ptr<T>> _content;
+};
+
+} // namespace souschef
+
+#endif // __SOUSCHEF_REGISTRY_H__
diff --git a/compiler/souschef/src/Gaussian.cpp b/compiler/souschef/src/Gaussian.cpp
new file mode 100644
index 000000000..4a5083d8e
--- /dev/null
+++ b/compiler/souschef/src/Gaussian.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "souschef/Data/Gaussian.h"
+#include "souschef/LexicalCast.h"
+
+#include <random>
+#include <chrono>
+
+#include <cassert>
+#include <stdexcept>
+
+namespace souschef
+{
+
+std::vector<uint8_t> GaussianFloat32DataChef::generate(int32_t count) const
+{
+ // TODO Support seed value override
+ auto seed = std::chrono::system_clock::now().time_since_epoch().count();
+
+ std::minstd_rand rand{static_cast<std::minstd_rand::result_type>(seed)};
+ std::normal_distribution<float> dist{_mean, _stddev};
+
+ std::vector<uint8_t> res;
+
+ for (uint32_t n = 0; n < count; ++n)
+ {
+ auto const value = dist(rand);
+ auto const arr = reinterpret_cast<const uint8_t *>(&value);
+
+ for (uint32_t b = 0; b < sizeof(float); ++b)
+ {
+ res.emplace_back(arr[b]);
+ }
+ }
+
+ return res;
+}
+
+std::vector<uint8_t> GaussianInt32DataChef::generate(int32_t count) const
+{
+ // TODO Support seed value override
+ auto seed = std::chrono::system_clock::now().time_since_epoch().count();
+
+ std::minstd_rand rand{static_cast<std::minstd_rand::result_type>(seed)};
+ std::normal_distribution<float> dist{_mean, _stddev};
+
+ std::vector<uint8_t> res;
+
+ for (uint32_t n = 0; n < count; ++n)
+ {
+ auto const value = static_cast<int32_t>(dist(rand));
+ auto const arr = reinterpret_cast<const uint8_t *>(&value);
+
+ for (uint32_t b = 0; b < sizeof(int32_t); ++b)
+ {
+ res.emplace_back(arr[b]);
+ }
+ }
+
+ return res;
+}
+
+std::vector<uint8_t> GaussianUint8DataChef::generate(int32_t count) const
+{
+ // TODO Support seed value override
+ auto seed = std::chrono::system_clock::now().time_since_epoch().count();
+
+ std::minstd_rand rand{static_cast<std::minstd_rand::result_type>(seed)};
+ std::normal_distribution<float> dist{_mean, _stddev};
+
+ std::vector<uint8_t> res;
+
+ for (uint32_t n = 0; n < count; ++n)
+ {
+ auto const value = static_cast<uint8_t>(dist(rand)); // uint8_t for data type
+ auto const arr = reinterpret_cast<const uint8_t *>(&value); // uint8_t for byte streaming
+
+ for (uint32_t b = 0; b < sizeof(uint8_t); ++b)
+ {
+ res.emplace_back(arr[b]);
+ }
+ }
+
+ return res;
+}
+
+std::unique_ptr<DataChef> GaussianFloat32DataChefFactory::create(const Arguments &args) const
+{
+ if (args.count() != 2)
+ {
+ throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
+ }
+
+ auto const mean = to_number<float>(args.value(0));
+ auto const stddev = to_number<float>(args.value(1));
+
+ return std::unique_ptr<DataChef>{new GaussianFloat32DataChef{mean, stddev}};
+}
+
+std::unique_ptr<DataChef> GaussianInt32DataChefFactory::create(const Arguments &args) const
+{
+ if (args.count() != 2)
+ {
+ throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
+ }
+
+ auto const mean = to_number<float>(args.value(0));
+ auto const stddev = to_number<float>(args.value(1));
+
+ return std::unique_ptr<DataChef>{new GaussianInt32DataChef{mean, stddev}};
+}
+
+std::unique_ptr<DataChef> GaussianUint8DataChefFactory::create(const Arguments &args) const
+{
+ if (args.count() != 2)
+ {
+ throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
+ }
+
+ auto const mean = to_number<float>(args.value(0));
+ auto const stddev = to_number<float>(args.value(1));
+
+ return std::unique_ptr<DataChef>{new GaussianUint8DataChef{mean, stddev}};
+}
+
+} // namespace souschef
diff --git a/compiler/souschef/src/LexicalCast.cpp b/compiler/souschef/src/LexicalCast.cpp
new file mode 100644
index 000000000..8e3d4cbbb
--- /dev/null
+++ b/compiler/souschef/src/LexicalCast.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "souschef/LexicalCast.h"
+
+#include <cassert>
+#include <limits>
+
+namespace souschef
+{
+
+template <> float to_number(const std::string &s) { return std::stof(s); }
+template <> int to_number(const std::string &s) { return std::stoi(s); }
+template <> int64_t to_number(const std::string &s) { return std::stoll(s); }
+template <> uint8_t to_number(const std::string &s)
+{
+ int temp = std::stoi(s);
+ assert(temp >= 0);
+ assert(temp <= std::numeric_limits<uint8_t>::max());
+ return static_cast<uint8_t>(temp);
+}
+template <> bool to_number(const std::string &s)
+{
+ if (std::stoi(s) || s == "T" || s == "t" || s == "TRUE" || s == "true")
+ return true;
+ return false;
+}
+
+} // namespace souschef
diff --git a/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt b/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt
index f5292a0d1..64dcc28fd 100644
--- a/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt
+++ b/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt
@@ -141,7 +141,7 @@ add_custom_command(
COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_EXPORT_ACTION_PATH=\"$<TARGET_FILE:nnkit_HDF5_export_action>\"' >> ${TEST_CONFIG}
COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_IMPORT_ACTION_PATH=\"$<TARGET_FILE:nnkit_HDF5_import_action>\"' >> ${TEST_CONFIG}
COMMAND ${CMAKE_COMMAND} -E echo 'MODEL2NNPKG_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'NNPKG_TEST_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tools/nnpackage_tool/nnpkg_test/nnpkg_test.sh\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'NNPKG_TEST_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tests/scripts/nnpkg_test.sh\"' >> ${TEST_CONFIG}
COMMAND ${CMAKE_COMMAND} -E echo 'RUNTIME_LIBRARY_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/Product/out/\"' >> ${TEST_CONFIG}
DEPENDS
nnkit-run
diff --git a/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt b/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt
new file mode 100644
index 000000000..4a59e8849
--- /dev/null
+++ b/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt
@@ -0,0 +1,94 @@
+include("test.lst")
+
+# Do not make test if there are no remote machine information
+if(NOT REMOTE_IP)
+ message(STATUS "tf2nnpackage-value-remote-test: need to set REMOTE IP")
+ return()
+endif(NOT REMOTE_IP)
+
+if(NOT REMOTE_USER)
+ message(STATUS "tf2nnpackage-value-remote-test: need to set REMOTE_USER")
+ return()
+endif(NOT REMOTE_USER)
+
+nnas_include(TargetRequire)
+
+unset(REQUIRED_TARGETS)
+list(APPEND REQUIRED_TARGETS testDataGenerator)
+TargetRequire_Return(${REQUIRED_TARGETS})
+
+message(STATUS "tf2nnpackage-value-remote-test: run tests")
+
+unset(TEST_NAMES)
+
+nncc_find_resource(TensorFlowLiteRecipes)
+set(TFLITE_RECIPE_REPO "${TensorFlowLiteRecipes_DIR}")
+
+file(GLOB SUBDIR RELATIVE ${TFLITE_RECIPE_REPO} ${TFLITE_RECIPE_REPO}/*)
+foreach(DIR IN ITEMS ${SUBDIR})
+ if(IS_DIRECTORY ${TFLITE_RECIPE_REPO}/${DIR})
+ list(APPEND TEST_NAMES ${DIR})
+ endif()
+endforeach()
+
+get_target_property(ARTIFACTS_SRC_PATH testDataGenerator SOURCE_DIR)
+
+# In this test, only the runtime test is performed because the test from tf to
+# nnpackage is done in common-artifacts, and for this runtime test, generation of
+# test data is required. And, tcgenerate in ${ARTIFACTS_SRC_PATH}/exclude.lst
+# means it won't generate test data, which is why below "tcgenerate" macro excludes
+# specific opearators from runtime test.
+# Also, since circlize and optimize macro included in `exclude.lst` file is only
+# needed in common-artifacts, it has no function here.
+macro(circlize)
+endmacro()
+macro(optimize)
+endmacro()
+
+macro(tcgenerate NAME)
+ list(REMOVE_ITEM TEST_NAMES ${NAME})
+endmacro()
+
+include("${ARTIFACTS_SRC_PATH}/exclude.lst")
+
+# Copy testall
+set(TEST_RUNNER_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/testall.sh")
+set(TEST_RUNNER "${CMAKE_CURRENT_BINARY_DIR}/testall.sh")
+
+add_custom_command(
+ OUTPUT ${TEST_RUNNER}
+ COMMAND ${CMAKE_COMMAND} -E copy "${TEST_RUNNER_SOURCE}" "${TEST_RUNNER}"
+ DEPENDS ${TEST_RUNNER_SOURCE}
+ COMMENT "Generate test runner"
+)
+
+list(APPEND TEST_DEPS "${TEST_RUNNER}")
+
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
+# Generate test.config
+set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
+
+add_custom_command(
+ OUTPUT ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'NNPKG_TEST_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tests/scripts/nnpkg_test.sh\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'RUNTIME_LIBRARY_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/Product/out/\"' >> ${TEST_CONFIG}
+ COMMENT "Generate test configuration"
+)
+
+list(APPEND TEST_DEPS "${TEST_CONFIG}")
+
+# This "tf2nnpackage_value_remote_test_deps" target enforces CMake to generate all the dependencies during "build" phase
+add_custom_target(tf2nnpackage_value_remote_test_deps ALL DEPENDS ${TEST_DEPS})
+
+# Run tests
+add_test(
+ NAME tf2nnpackage_value_remote_test
+ COMMAND "${TEST_RUNNER}"
+ "${TEST_CONFIG}"
+ "${ARTIFACTS_BIN_PATH}"
+ "${REMOTE_IP}"
+ "${REMOTE_USER}"
+ ${TEST_NAMES}
+)
diff --git a/compiler/tf2nnpackage-value-remote-test/README.md b/compiler/tf2nnpackage-value-remote-test/README.md
new file mode 100644
index 000000000..36436fc6b
--- /dev/null
+++ b/compiler/tf2nnpackage-value-remote-test/README.md
@@ -0,0 +1,60 @@
+# tf2nnpackage-value-remote-test
+
+`tf2nnpackage-value-remote-test` does random value test for nnpackage file using remote machine, normally Odroid, which `onert` runs on.
+
+### Prerequisites
+
+1. Runtime Library and Binary files
+ - Detailed information is located in [here](../../docs/howto/how-to-cross-build-runtime-for-arm.md)
+ - If you build runtime, related files will be produced in `Product/out`. Do not rename or move it.
+1. Remote machine information and test list
+ - You should create `test.lst` file first as shown below.
+ - Set IP address and username of remote machine using `set` command.
+ ```cmake
+ #--------------- Remote Machine Setting ---------------#
+ set(REMOTE_IP "xxx.xxx.xxx.xxx")
+ set(REMOTE_USER "remote_username")
+ ```
+ - If any recipe is added, or if `REMOTE_IP` and `REMOTE_USER` is not given, `tf2nnpackage-value-remote-test` will not be created.
+1. (Optional) ssh authentication
+ - This test uses `ssh` and `scp` commands, and those commands require a password of remote machine whenever they are called. This means that you should enter the password everytime when `ssh` and `scp` require.
+ - This test resolves the problem by using `ssh-copy-id`, which copies the public key of host machine to `authorized_keys` of remote machine. Because of that, this test will ask the password of remote machine only once, at the first time. This is the only user interaction while running this test.
+ - If you do not want to interact with system, just do `ssh-copy-id ${REMOTE_USER}@${REMOTE_IP}` in advance, before running this test. Once `ssh-copy-id` is done, there will be no user-interaction action while running the test.
+
+### Running
+
+- If you finished prerequisites properly, configuring -> building -> testing steps create cmake test automatically.
+- All the related materials will be sent to `REMOTE_WORKDIR` in remote machine. Default value of `REMOTE_WORKDIR` is `CVT_YYMMDD_hhmmss`, which means Circle Value Test done on YY/MM/DD at hh:mm:ss.
+- `REMOTE_WORKDIR` will not be removed automatically after this test finish.
+ ```sh
+ $ ./nncc configure && ./nncc build
+
+ # Default REMOTE_WORKDIR is CVT_YYMMDD_hhmmss folder
+ $ ./nncc test -R tf2nnpackage_value_remote_test
+
+ # You can set REMOTE_WORKDIR where you have write privilege
+ $ REMOTE_WORKDIR=/path/you/want/ ./nncc test -R tf2circle_value_pbtxt_remote_test
+ ```
+
+### Generated Files While Running
+
+- All related files(`pb`, `circle`, `h5` ... etc.) are taken from `build/compiler/common-artifacts` folder.
+- `nnpkg_test.sh`, runtime products and each nnpackage are sent to `REMOTE_WORKDIR` in remote machine.
+- Each test result is generated in `build/compiler/common-artifacts` with the name `${RECIPE}.log`
+
+### Check Test Result
+
+- Summary of test result will be created as csv file in host.
+ ```sh
+ # Result_latest is symbolic link to the latest csv result file
+ # Print the latest test result
+ $ cat build/compiler/tf2circle-value-pbtxt-remote-test/Result_latest
+ TEST_NAME, TF2CIRCLE, CIRCLE_VALUE_TEST
+ UNIT_Add_000, TRUE, TRUE
+ ...
+
+ # List all result csv files
+ $ ls build/compiler/tf2circle-value-pbtxt-remote-test/Result_*.csv
+ Result_20191119_212521.csv
+ ...
+ ```
diff --git a/compiler/tf2nnpackage-value-remote-test/requires.cmake b/compiler/tf2nnpackage-value-remote-test/requires.cmake
new file mode 100644
index 000000000..06a4a8a6a
--- /dev/null
+++ b/compiler/tf2nnpackage-value-remote-test/requires.cmake
@@ -0,0 +1 @@
+require("common-artifacts")
diff --git a/compiler/tf2nnpackage-value-remote-test/test.lst b/compiler/tf2nnpackage-value-remote-test/test.lst
new file mode 100644
index 000000000..10eb52d92
--- /dev/null
+++ b/compiler/tf2nnpackage-value-remote-test/test.lst
@@ -0,0 +1,3 @@
+#--------------- Remote Machine Setting ---------------#
+# set(REMOTE_IP "xxx.xxx.xxx.xxx")
+# set(REMOTE_USER "remote_username")
diff --git a/compiler/tf2nnpackage-value-remote-test/testall.sh b/compiler/tf2nnpackage-value-remote-test/testall.sh
new file mode 100755
index 000000000..f1c9789b3
--- /dev/null
+++ b/compiler/tf2nnpackage-value-remote-test/testall.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+# Need at least 4 arguments
+if [[ $# -lt 4 ]]; then
+ echo "USAGE: $0 ..."
+ echo
+ echo "ARGUMENTS:"
+ echo " [test.config path]"
+ echo " [WORKDIR]"
+ echo " [REMOTE_IP]"
+ echo " [REMOTE_USER]"
+ echo " [Prefix1]"
+ echo " [Prefix2]"
+ echo " ..."
+ exit 255
+fi
+
+CONFIG_PATH="$1"; shift
+WORKDIR="$1"; shift
+REMOTE_IP="$1"; shift
+REMOTE_USER="$1"; shift
+
+BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+CURRENT_DATETIME=$(date +'%Y%m%d_%H%M%S')
+REMOTE_WORKDIR=${REMOTE_WORKDIR:-"CVT_${CURRENT_DATETIME}"}
+RESULT_CSV="${BINDIR}/Result_${CURRENT_DATETIME}.csv"
+
+source "${CONFIG_PATH}"
+
+echo "-- Found nnpkg_test: ${NNPKG_TEST_PATH}"
+echo "-- Found Runtime library: ${RUNTIME_LIBRARY_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+if [ -z ${NNPKG_TEST_PATH} ] || [ ! -f ${NNPKG_TEST_PATH} ]; then
+ echo "nnpkg_test is not found"
+ exit 4
+fi
+
+# Register remote machine ssh information
+cat /dev/zero | ssh-keygen -q -N ""
+ssh-copy-id -o ConnectTimeout=5 "${REMOTE_USER}@${REMOTE_IP}"
+
+# Odroid IP address validation
+if [[ $? -ne 0 ]]; then
+ echo "Cannot reach to given remote machine. Check IP address or username."
+ exit 5
+fi
+
+# Send runtime library files
+ssh "${REMOTE_USER}@${REMOTE_IP}" "mkdir -p ${REMOTE_WORKDIR}/Product/"
+scp -r "${RUNTIME_LIBRARY_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/Product/"
+
+# Send nnpkg_test.sh
+scp "${NNPKG_TEST_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/"
+
+TESTED=()
+PASSED=()
+FAILED=()
+echo "TEST_NAME, CIRCLE_VALUE_TEST" >> ${RESULT_CSV}
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${BINDIR}/${PASSED_TAG}"
+
+ # Information to be recorded
+ CIRCLE_VALUE_PASSED=FALSE
+
+ cat > "${BINDIR}/${PREFIX}.log" <(
+ exec 2>&1
+
+ # Exit immediately if any command fails
+ set -e
+ # Show commands
+ set -x
+
+ # Run nnpkg_test in remote machine
+ if [ ! -d "${PREFIX}" ] ; then
+ PREFIX=${PREFIX}.opt ;
+ fi
+ scp -r "${PREFIX}/" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/${PREFIX}/"
+ ssh "${REMOTE_USER}@${REMOTE_IP}" "cd ${REMOTE_WORKDIR}; ./nnpkg_test.sh ${PREFIX}"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${BINDIR}/${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${BINDIR}/${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ CIRCLE_VALUE_PASSED=TRUE
+ else
+ FAILED+=("$PREFIX")
+ CIRCLE_VALUE_PASSED=FALSE
+ fi
+
+ echo "${PREFIX}, ${CIRCLE_VALUE_PASSED}" >> ${RESULT_CSV}
+done
+popd
+
+rm -f Result_latest
+ln -s ${RESULT_CSV} Result_latest
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/tf2nnpkg/README.md b/compiler/tf2nnpkg/README.md
new file mode 100644
index 000000000..f8ec2c90f
--- /dev/null
+++ b/compiler/tf2nnpkg/README.md
@@ -0,0 +1 @@
+# tf2nnpkg
diff --git a/compiler/tf2tflite-dredd-pbtxt-test/README.md b/compiler/tf2tflite-dredd-pbtxt-test/README.md
new file mode 100644
index 000000000..091867987
--- /dev/null
+++ b/compiler/tf2tflite-dredd-pbtxt-test/README.md
@@ -0,0 +1 @@
+# tf2tflite-dredd-pbtxt-test
diff --git a/compiler/tf2tfliteV2-conversion-test/CMakeLists.txt b/compiler/tf2tfliteV2-conversion-test/CMakeLists.txt
new file mode 100644
index 000000000..3e7e57747
--- /dev/null
+++ b/compiler/tf2tfliteV2-conversion-test/CMakeLists.txt
@@ -0,0 +1,109 @@
+nncc_find_resource(TensorFlowTests)
+
+#
+# Copy [PREFIX]/test.pbtxt to PREFIX.pbtxt in binary folder
+# Copy [PREFIX]/test.info to PREFIX.info in binary folder
+# Encode PREFIX.pbtxt to PREFIX.pb
+#
+set(TEST_REPO "${TensorFlowTests_DIR}")
+set(TEST_PBTXT_FILENAME "test.pbtxt")
+set(TEST_INFO_FILENAME "test.info")
+
+unset(TESTCASES)
+
+macro(add NAME)
+ list(APPEND TESTCASES ${NAME})
+endmacro(add)
+
+# Read "test.lst"
+include("test.lst")
+# Read "test.local.lst" if exists
+include("test.local.lst" OPTIONAL)
+
+unset(TEST_DEPS)
+unset(TEST_NAMES)
+
+foreach(PREFIX IN ITEMS ${TESTCASES})
+ if(NOT IS_DIRECTORY "${TEST_REPO}/${PREFIX}")
+ message(FATAL_ERROR "Missing '${PREFIX}' test")
+ endif()
+
+ set(PBTXT_SOURCE_PATH "${TEST_REPO}/${PREFIX}/${TEST_PBTXT_FILENAME}")
+ set(INFO_SOURCE_PATH "${TEST_REPO}/${PREFIX}/${TEST_INFO_FILENAME}")
+
+ set(PBTXT_FILE "${PREFIX}.pbtxt")
+ set(PBTXT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${PBTXT_FILE}")
+
+ set(INFO_FILE "${PREFIX}.info")
+ set(INFO_PATH "${CMAKE_CURRENT_BINARY_DIR}/${INFO_FILE}")
+
+ # Copy .pbtxt
+ add_custom_command(OUTPUT ${PBTXT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${PBTXT_SOURCE_PATH}" "${PBTXT_PATH}"
+ DEPENDS ${PBTXT_SOURCE_PATH}
+ COMMENT "Generate ${PBTXT_FILE}"
+ )
+
+ # Copy .info
+ add_custom_command(OUTPUT ${INFO_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${INFO_SOURCE_PATH}" "${INFO_PATH}"
+ DEPENDS ${INFO_SOURCE_PATH}
+ COMMENT "Generate ${INFO_FILE}"
+ )
+
+ list(APPEND TEST_DEPS ${INFO_PATH} ${PBTXT_PATH})
+ list(APPEND TEST_NAMES ${PREFIX})
+endforeach(PREFIX)
+
+##
+## Copy testall
+##
+set(TEST_RUNNER "${CMAKE_CURRENT_BINARY_DIR}/testall.sh")
+set(TEST_RUNNER_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/testall.sh")
+
+add_custom_command(
+ OUTPUT ${TEST_RUNNER}
+ COMMAND ${CMAKE_COMMAND} -E copy "${TEST_RUNNER_SOURCE}" "${TEST_RUNNER}"
+ DEPENDS ${TEST_RUNNER_SOURCE}
+ COMMENT "Generate test runner"
+)
+
+list(APPEND TEST_DEPS "${TEST_RUNNER}")
+
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
+set(VIRTUALENV "${NNCC_OVERLAY_DIR}/venv_1_13_2")
+
+###
+### Generate test.config
+###
+set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
+
+# Get tf2tfliteV2 binary path
+get_target_property(TF2TFLITEV2_BIN_DIR tf2tfliteV2 BINARY_DIR)
+set(TF2TFLITEV2_PATH "${TF2TFLITEV2_BIN_DIR}/tf2tfliteV2.py")
+
+add_custom_command(
+ OUTPUT ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'TF2TFLITEV2_PATH=\"${TF2TFLITEV2_PATH}\"' >> ${TEST_CONFIG}
+ COMMAND ${CMAKE_COMMAND} -E echo 'VIRTUALENV=\"${VIRTUALENV}\"' >> ${TEST_CONFIG}
+ DEPENDS
+ tf2tfliteV2
+ COMMENT "Generate test configuration"
+)
+
+list(APPEND TEST_DEPS "${TEST_CONFIG}")
+
+# This "tf2tfliteV2_conversion_test_deps" target enforces CMake to generate all the dependencies during "build" phase
+add_custom_target(tf2tfliteV2_conversion_test_deps ALL DEPENDS ${TEST_DEPS})
+
+# TODO This test takes a long time and will only be tested once a day.
+# Run tests
+# add_test(
+# NAME tf2tfliteV2_conversion_test
+# COMMAND "${TEST_RUNNER}"
+# "${TEST_CONFIG}"
+# "${CMAKE_CURRENT_BINARY_DIR}"
+# ${TEST_NAMES}
+# )
diff --git a/compiler/tf2tfliteV2-conversion-test/README.md b/compiler/tf2tfliteV2-conversion-test/README.md
new file mode 100644
index 000000000..d4d2a975f
--- /dev/null
+++ b/compiler/tf2tfliteV2-conversion-test/README.md
@@ -0,0 +1,2 @@
+# tf2tfliteV2-conversion-test
+
diff --git a/compiler/tf2tfliteV2-conversion-test/requires.cmake b/compiler/tf2tfliteV2-conversion-test/requires.cmake
new file mode 100644
index 000000000..31b335471
--- /dev/null
+++ b/compiler/tf2tfliteV2-conversion-test/requires.cmake
@@ -0,0 +1,2 @@
+require("common-artifacts")
+require("tf2tfliteV2")
diff --git a/compiler/tf2tfliteV2-conversion-test/test.lst b/compiler/tf2tfliteV2-conversion-test/test.lst
new file mode 100644
index 000000000..2f771ba31
--- /dev/null
+++ b/compiler/tf2tfliteV2-conversion-test/test.lst
@@ -0,0 +1,124 @@
+# TODO Enable skipped tests
+
+add(NET_0000)
+add(NET_0001)
+add(NET_0002)
+add(NET_0003)
+add(NET_0004)
+add(NET_0005)
+add(NET_0006)
+add(NET_0007)
+add(NET_0008)
+add(NET_0009)
+add(NET_0010)
+add(NET_0011)
+add(NET_0012)
+add(NET_0013)
+add(NET_0014)
+add(NET_0015)
+add(NET_0016)
+add(NET_0017)
+add(NET_0018)
+add(NET_0019)
+add(NET_0020)
+add(NET_0021)
+add(NET_0022)
+#add(NET_0023)
+add(NET_0024)
+add(NET_0025)
+add(NET_0026)
+add(NET_0027)
+add(NET_0028)
+add(NET_0029)
+add(NET_0030)
+add(NET_0031)
+add(NET_0032)
+add(NET_0033)
+add(NET_0034)
+add(NET_0035)
+add(NET_0036)
+add(NET_0037)
+add(NET_0038)
+add(NET_0039)
+add(NET_0040)
+add(NET_0041)
+#add(NET_0042)
+add(REGRESSION_0000)
+add(REGRESSION_0001)
+add(REGRESSION_0002)
+add(UNIT_Add_000)
+add(UNIT_Add_001)
+add(UNIT_Add_002)
+#add(UNIT_Add_003)
+add(UNIT_Add_004)
+add(UNIT_Add_005)
+add(UNIT_AvgPool_000)
+add(UNIT_AvgPool_001)
+#add(UNIT_BiasAdd_000)
+#add(UNIT_BiasAdd_001)
+add(UNIT_BiasAdd_002)
+#add(UNIT_ConcatV2_000)
+#add(UNIT_ConcatV2_001)
+add(UNIT_ConcatV2_002)
+#add(UNIT_Const_000)
+#add(UNIT_Const_001)
+add(UNIT_Conv2D_000)
+add(UNIT_Conv2D_001)
+add(UNIT_Conv2D_002)
+add(UNIT_Conv2DBackpropInput_000)
+add(UNIT_Conv2DBackpropInput_001)
+add(UNIT_Conv2DBackpropInput_002)
+add(UNIT_CustomOp_000)
+add(UNIT_CustomOp_001)
+add(UNIT_DepthwiseConv2dNative_000)
+add(UNIT_DepthwiseConv2dNative_001)
+add(UNIT_FusedBatchNorm_000)
+add(UNIT_FusedBatchNorm_001)
+add(UNIT_Maximum_000)
+add(UNIT_Maximum_001)
+add(UNIT_Maximum_002)
+add(UNIT_MaxPool_000)
+add(UNIT_MaxPool_001)
+add(UNIT_Mean_000)
+add(UNIT_Mean_001)
+add(UNIT_Mean_002)
+add(UNIT_Mean_003)
+add(UNIT_MirrorPad_000)
+add(UNIT_Mul_000)
+add(UNIT_Mul_001)
+add(UNIT_Mul_002)
+#add(UNIT_Pack_000)
+#add(UNIT_Pack_001)
+#add(UNIT_Pack_002)
+#add(UNIT_Pack_003)
+#add(UNIT_Pack_004)
+add(UNIT_Pad_000)
+add(UNIT_PadV2_000)
+#add(UNIT_Placeholder_000)
+#add(UNIT_Placeholder_001)
+#add(UNIT_Placeholder_002)
+#add(UNIT_Placeholder_003)
+add(UNIT_RealDiv_000)
+add(UNIT_RealDiv_001)
+add(UNIT_Relu_000)
+add(UNIT_Relu6_000)
+add(UNIT_Reshape_000)
+add(UNIT_Rsqrt_000)
+#add(UNIT_Shape_000)
+add(UNIT_Softmax_000)
+add(UNIT_Softmax_001)
+add(UNIT_Softmax_002)
+add(UNIT_Softmax_003)
+add(UNIT_Sqrt_000)
+add(UNIT_SquaredDifference_000)
+add(UNIT_SquaredDifference_001)
+add(UNIT_Squeeze_000)
+add(UNIT_Squeeze_001)
+add(UNIT_Squeeze_002)
+add(UNIT_Squeeze_003)
+#add(UNIT_StopGradient_000)
+#add(UNIT_StopGradient_001)
+#add(UNIT_StridedSlice_000)
+add(UNIT_Sub_000)
+add(UNIT_Sub_001)
+add(UNIT_Tanh_000)
diff --git a/compiler/tf2tfliteV2-conversion-test/testall.sh b/compiler/tf2tfliteV2-conversion-test/testall.sh
new file mode 100755
index 000000000..2d9a423c5
--- /dev/null
+++ b/compiler/tf2tfliteV2-conversion-test/testall.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+# Need at least 2 arguments
+if [[ $# -lt 2 ]]; then
+ echo "USAGE: $0 ..."
+ echo
+ echo "ARGUMENTS:"
+ echo " [test.config path]"
+ echo " [WORKDIR]"
+ echo " [Prefix1]"
+ echo " [Prefix2]"
+ echo " ..."
+ exit 255
+fi
+
+CONFIG_PATH="$1"; shift
+WORKDIR="$1"; shift
+
+source "${CONFIG_PATH}"
+
+echo "-- Found TF2TFLITEV2: ${TF2TFLITEV2_PATH}"
+echo "-- Found python virtualenv: ${VIRTUALENV}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${PASSED_TAG}"
+
+ cat > "${PREFIX}.log" <(
+ exec 2>&1
+
+ echo "-- Found pbtxt: ${PREFIX}.pbtxt"
+
+ # Exit immediately if any command fails
+ set -e
+ # Show commands
+ set -x
+
+ # Generate tflite
+ source "${VIRTUALENV}/bin/activate"
+ "${VIRTUALENV}/bin/python" "${TF2TFLITEV2_PATH}" \
+ --v1 \
+ --input_path "${WORKDIR}/${PREFIX}.pbtxt" \
+ --input_arrays "$(awk -F, '/^input/ { print $2 }' ${PREFIX}.info | cut -d: -f1 | tr -d ' ' | paste -d, -s)" \
+ --input_shapes "$(cat ${PREFIX}.info | grep '^input' | cut -d '[' -f2 | cut -d ']' -f1 | tr -d ' ' | xargs | tr ' ' ':')" \
+ --output_path "${WORKDIR}/${PREFIX}.tflite" \
+ --output_arrays "$(awk -F, '/^output/ { print $2 }' ${PREFIX}.info | cut -d: -f1 | tr -d ' ' | paste -d, -s)"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ else
+ FAILED+=("$PREFIX")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/tf2tfliteV2-value-pbtxt-test/CMakeLists.txt b/compiler/tf2tfliteV2-value-pbtxt-test/CMakeLists.txt
deleted file mode 100644
index 2526db561..000000000
--- a/compiler/tf2tfliteV2-value-pbtxt-test/CMakeLists.txt
+++ /dev/null
@@ -1,183 +0,0 @@
-find_package(PythonInterp 3 QUIET)
-find_package(PythonLibs 3 QUIET)
-
-if(NOT ${PYTHONINTERP_FOUND})
- message("Build tf2tfliteV2-value-pbtxt-test: FALSE (Python3 is missing)")
- return()
-endif()
-
-if(${PYTHON_VERSION_MINOR} LESS 3)
- message("Build tf2tfliteV2-value-pbtxt-test: FALSE (You need to install Python version higher than 3.3)")
- return()
-endif()
-
-nnas_include(TargetRequire)
-
-unset(REQUIRED_TARGETS)
-list(APPEND REQUIRED_TARGETS tfkit)
-list(APPEND REQUIRED_TARGETS tf2tfliteV2)
-list(APPEND REQUIRED_TARGETS nnkit-run)
-list(APPEND REQUIRED_TARGETS nnkit_tf_backend)
-list(APPEND REQUIRED_TARGETS nnkit_tflite_backend)
-list(APPEND REQUIRED_TARGETS nnkit_randomize_action)
-list(APPEND REQUIRED_TARGETS nnkit_HDF5_export_action)
-list(APPEND REQUIRED_TARGETS nnkit_HDF5_import_action)
-list(APPEND REQUIRED_TARGETS i5diff)
-TargetRequire_Return(${REQUIRED_TARGETS})
-
-message(STATUS "tf2tfliteV2-value-pbtxt-test: run tests")
-
-# Create python virtual environment
-set(VIRTUALENV "${CMAKE_CURRENT_BINARY_DIR}/venv")
-
-add_custom_command(
- OUTPUT ${VIRTUALENV}
- COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUALENV}
-)
-
-# Copy requirements.txt and install required pip packages
-set(REQUIREMENTS_FILE "requirements.txt")
-set(REQUIREMENTS_SRC_PATH "${CMAKE_CURRENT_SOURCE_DIR}/${REQUIREMENTS_FILE}")
-set(REQUIREMENTS_BIN_PATH "${CMAKE_CURRENT_BINARY_DIR}/${REQUIREMENTS_FILE}")
-
-add_custom_command(
- OUTPUT ${REQUIREMENTS_BIN_PATH}
- COMMAND ${CMAKE_COMMAND} -E copy ${REQUIREMENTS_SRC_PATH} ${REQUIREMENTS_BIN_PATH}
- COMMAND ${VIRTUALENV}/bin/python -m pip install --upgrade pip setuptools --timeout 100
- COMMAND ${VIRTUALENV}/bin/python -m pip install -r requirements.txt --upgrade --timeout 100
- DEPENDS ${VIRTUALENV} ${REQUIREMENTS_SRC_PATH}
-)
-
-add_custom_target(tf2tfliteV2_value_pbtxt_python_deps ALL
- DEPENDS ${VIRTUALENV} ${REQUIREMENTS_BIN_PATH} #${TF2TFLITEV2_BIN_PATH}
-)
-
-nncc_find_resource(TensorFlowTests)
-
-#
-# Copy [PREFIX]/test.pbtxt to PREFIX.pbtxt in binary folder
-# Copy [PREFIX]/test.info to PREFIX.info in binary folder
-# Encode PREFIX.pbtxt to PREFIX.pb
-#
-set(TEST_REPO "${TensorFlowTests_DIR}")
-set(TEST_PBTXT_FILENAME "test.pbtxt")
-set(TEST_INFO_FILENAME "test.info")
-
-unset(TESTCASES)
-
-macro(add NAME)
- list(APPEND TESTCASES ${NAME})
-endmacro(add)
-
-# Read "test.lst"
-include("test.lst")
-# Read "test.local.lst" if exists
-include("test.local.lst" OPTIONAL)
-
-unset(TEST_DEPS)
-unset(TEST_NAMES)
-
-foreach(PREFIX IN ITEMS ${TESTCASES})
- if(NOT IS_DIRECTORY "${TEST_REPO}/${PREFIX}")
- message(FATAL_ERROR "Missing '${PREFIX}' test")
- endif()
-
- set(PBTXT_SOURCE_PATH "${TEST_REPO}/${PREFIX}/${TEST_PBTXT_FILENAME}")
- set(INFO_SOURCE_PATH "${TEST_REPO}/${PREFIX}/${TEST_INFO_FILENAME}")
-
- set(PBTXT_FILE "${PREFIX}.pbtxt")
- set(PBTXT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${PBTXT_FILE}")
-
- set(INFO_FILE "${PREFIX}.info")
- set(INFO_PATH "${CMAKE_CURRENT_BINARY_DIR}/${INFO_FILE}")
-
- set(PB_FILE "${PREFIX}.pb")
- set(PB_PATH "${CMAKE_CURRENT_BINARY_DIR}/${PB_FILE}")
-
- # Copy .pbtxt
- add_custom_command(OUTPUT ${PBTXT_PATH}
- COMMAND ${CMAKE_COMMAND} -E copy "${PBTXT_SOURCE_PATH}" "${PBTXT_PATH}"
- DEPENDS ${PBTXT_SOURCE_PATH}
- COMMENT "Generate ${PBTXT_FILE}"
- )
-
- # Copy .info
- add_custom_command(OUTPUT ${INFO_PATH}
- COMMAND ${CMAKE_COMMAND} -E copy "${INFO_SOURCE_PATH}" "${INFO_PATH}"
- DEPENDS ${INFO_SOURCE_PATH}
- COMMENT "Generate ${INFO_FILE}"
- )
-
- # Generate .pb from .pbtxt
- add_custom_command(OUTPUT ${PB_PATH}
- COMMAND $<TARGET_FILE:tfkit> encode ${PBTXT_PATH} ${PB_PATH}
- DEPENDS ${PBTXT_PATH}
- COMMENT "Generate ${PB_FILE}"
- )
-
- list(APPEND TEST_DEPS ${INFO_PATH} ${PB_PATH})
- list(APPEND TEST_NAMES ${PREFIX})
-endforeach(PREFIX)
-
-##
-## Copy testall
-##
-set(TEST_RUNNER "${CMAKE_CURRENT_BINARY_DIR}/testall.sh")
-set(TEST_RUNNER_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/testall.sh")
-
-add_custom_command(
- OUTPUT ${TEST_RUNNER}
- COMMAND ${CMAKE_COMMAND} -E copy "${TEST_RUNNER_SOURCE}" "${TEST_RUNNER}"
- DEPENDS ${TEST_RUNNER_SOURCE}
- COMMENT "Generate test runner"
-)
-
-list(APPEND TEST_DEPS "${TEST_RUNNER}")
-
-###
-### Generate test.config
-###
-set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config")
-
-# Get tf2tfliteV2 binary path
-get_target_property(TF2TFLITEV2_BIN_DIR tf2tfliteV2 BINARY_DIR)
-set(TF2TFLITEV2_PATH "${TF2TFLITEV2_BIN_DIR}/tf2tfliteV2.py")
-
-add_custom_command(
- OUTPUT ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'NNKIT_RUN_PATH=\"$<TARGET_FILE:nnkit-run>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'TF_BACKEND_PATH=\"$<TARGET_FILE:nnkit_tf_backend>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'TFLITE_BACKEND_PATH=\"$<TARGET_FILE:nnkit_tflite_backend>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'RANDOMIZE_ACTION_PATH=\"$<TARGET_FILE:nnkit_randomize_action>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_EXPORT_ACTION_PATH=\"$<TARGET_FILE:nnkit_HDF5_export_action>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_IMPORT_ACTION_PATH=\"$<TARGET_FILE:nnkit_HDF5_import_action>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'I5DIFF_PATH=\"$<TARGET_FILE:i5diff>\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'TF2TFLITEV2_PATH=\"${TF2TFLITEV2_PATH}\"' >> ${TEST_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E echo 'VIRTUALENV=\"${VIRTUALENV}\"' >> ${TEST_CONFIG}
- DEPENDS
- i5diff
- nnkit-run
- nnkit_tf_backend
- nnkit_tflite_backend
- nnkit_randomize_action
- nnkit_HDF5_export_action
- nnkit_HDF5_import_action
- tf2tfliteV2
- tf2tfliteV2_value_pbtxt_python_deps
- COMMENT "Generate test configuration"
-)
-
-list(APPEND TEST_DEPS "${TEST_CONFIG}")
-
-# This "tf2tfliteV2_value_pbtxt_test_deps" target enforces CMake to generate all the dependencies during "build" phase
-add_custom_target(tf2tfliteV2_value_pbtxt_test_deps ALL DEPENDS ${TEST_DEPS})
-
-# Run tests
-add_test(
- NAME tf2tfliteV2_value_pbtxt_test
- COMMAND "${TEST_RUNNER}"
- "${TEST_CONFIG}"
- "${CMAKE_CURRENT_BINARY_DIR}"
- ${TEST_NAMES}
-)
diff --git a/compiler/tf2tfliteV2-value-pbtxt-test/requirements.txt b/compiler/tf2tfliteV2-value-pbtxt-test/requirements.txt
deleted file mode 100644
index 2eef5dbb4..000000000
--- a/compiler/tf2tfliteV2-value-pbtxt-test/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# TODO : Handling TF v2
-tensorflow==1.13.1
diff --git a/compiler/tf2tfliteV2-value-pbtxt-test/requires.cmake b/compiler/tf2tfliteV2-value-pbtxt-test/requires.cmake
deleted file mode 100644
index a51236119..000000000
--- a/compiler/tf2tfliteV2-value-pbtxt-test/requires.cmake
+++ /dev/null
@@ -1,4 +0,0 @@
-require("nnkit")
-require("tfkit")
-require("i5diff")
-require("tf2tfliteV2")
diff --git a/compiler/tf2tfliteV2-value-pbtxt-test/test.lst b/compiler/tf2tfliteV2-value-pbtxt-test/test.lst
deleted file mode 100644
index 328366b16..000000000
--- a/compiler/tf2tfliteV2-value-pbtxt-test/test.lst
+++ /dev/null
@@ -1,101 +0,0 @@
-# TODO Enable skipped tests
-
-add(NET_0000)
-add(NET_0001)
-add(NET_0002)
-add(NET_0003)
-add(NET_0004)
-add(NET_0005)
-add(NET_0006)
-add(NET_0007)
-add(NET_0008)
-add(NET_0009)
-add(NET_0010)
-add(NET_0011)
-add(NET_0012)
-add(NET_0013)
-add(NET_0014)
-add(NET_0015)
-add(NET_0016)
-add(NET_0017)
-add(NET_0018)
-add(NET_0019)
-add(NET_0020)
-add(NET_0021)
-add(NET_0022)
-#add(NET_0023)
-add(NET_0024)
-add(NET_0025)
-#add(NET_0028)
-add(NET_0029)
-add(NET_0030)
-add(NET_0031)
-add(NET_0032)
-add(NET_0033)
-add(NET_0034)
-add(NET_0035)
-add(NET_0036)
-add(NET_0037)
-add(NET_0038)
-add(NET_0039)
-add(NET_0040)
-add(NET_0041)
-add(REGRESSION_0000)
-add(REGRESSION_0001)
-add(REGRESSION_0002)
-add(UNIT_Add_000)
-add(UNIT_Add_001)
-add(UNIT_Add_002)
-add(UNIT_Add_004)
-add(UNIT_Add_005)
-add(UNIT_AvgPool_000)
-add(UNIT_AvgPool_001)
-#add(UNIT_BiasAdd_000)
-#add(UNIT_BiasAdd_001)
-add(UNIT_BiasAdd_002)
-#add(UNIT_ConcatV2_000)
-#add(UNIT_ConcatV2_001)
-add(UNIT_ConcatV2_002)
-#add(UNIT_Const_000)
-#add(UNIT_Const_001)
-add(UNIT_Conv2D_000)
-add(UNIT_Conv2DBackpropInput_000)
-add(UNIT_Conv2DBackpropInput_001)
-add(UNIT_DepthwiseConv2dNative_000)
-add(UNIT_DepthwiseConv2dNative_001)
-add(UNIT_Maximum_000)
-add(UNIT_Maximum_001)
-add(UNIT_Maximum_002)
-add(UNIT_MaxPool_000)
-add(UNIT_MaxPool_001)
-add(UNIT_Mean_000)
-add(UNIT_Mean_001)
-add(UNIT_Mean_002)
-add(UNIT_Mean_003)
-add(UNIT_Mul_000)
-add(UNIT_Mul_001)
-add(UNIT_Mul_002)
-add(UNIT_Pad_000)
-#add(UNIT_Placeholder_000)
-#add(UNIT_Placeholder_001)
-#add(UNIT_Placeholder_002)
-#add(UNIT_Placeholder_003)
-add(UNIT_RealDiv_000)
-add(UNIT_RealDiv_001)
-add(UNIT_Relu_000)
-add(UNIT_Relu6_000)
-add(UNIT_Reshape_000)
-add(UNIT_Rsqrt_000)
-add(UNIT_Softmax_001)
-add(UNIT_Sqrt_000)
-#add(UNIT_SquaredDifference_000)
-#add(UNIT_SquaredDifference_001)
-add(UNIT_Squeeze_000)
-add(UNIT_Squeeze_001)
-#add(UNIT_Squeeze_002)
-#add(UNIT_Squeeze_003)
-#add(UNIT_StopGradient_000)
-#add(UNIT_StopGradient_001)
-add(UNIT_Sub_000)
-add(UNIT_Sub_001)
-add(UNIT_Tanh_000)
diff --git a/compiler/tf2tfliteV2-value-pbtxt-test/testall.sh b/compiler/tf2tfliteV2-value-pbtxt-test/testall.sh
deleted file mode 100755
index 9dde41bfe..000000000
--- a/compiler/tf2tfliteV2-value-pbtxt-test/testall.sh
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-
-# Need at least 2 arguments
-if [[ $# -lt 2 ]]; then
- echo "USAGE: $0 ..."
- echo
- echo "ARGUMENTS:"
- echo " [test.config path]"
- echo " [WORKDIR]"
- echo " [Prefix1]"
- echo " [Prefix2]"
- echo " ..."
- exit 255
-fi
-
-CONFIG_PATH="$1"; shift
-WORKDIR="$1"; shift
-
-source "${CONFIG_PATH}"
-
-echo "-- Found nnkit-run: ${NNKIT_RUN_PATH}"
-echo "-- Found TF backend: ${TF_BACKEND_PATH}"
-echo "-- Found TFLITE backend: ${TFLITE_BACKEND_PATH}"
-echo "-- Found TF2TFLITEV2: ${TF2TFLITEV2_PATH}"
-echo "-- Found randomize action: ${RANDOMIZE_ACTION_PATH}"
-echo "-- Found HDF5 export action: ${HDF5_EXPORT_ACTION_PATH}"
-echo "-- Found HDF5 import action: ${HDF5_IMPORT_ACTION_PATH}"
-echo "-- Found i5diff: ${I5DIFF_PATH}"
-echo "-- Found workdir: ${WORKDIR}"
-
-TESTED=()
-PASSED=()
-FAILED=()
-
-pushd "${WORKDIR}"
-while [[ $# -ne 0 ]]; do
- PREFIX="$1"; shift
-
- TESTED+=("${PREFIX}")
-
- PASSED_TAG="${PREFIX}.passed"
-
- rm -f "${PASSED_TAG}"
-
- cat > "${PREFIX}.log" <(
- exec 2>&1
-
- echo "-- Found pb: ${PREFIX}.pb"
-
- # Exit immediately if any command fails
- set -e
- # Show commands
- set -x
-
- # Generate tflite
- source "${VIRTUALENV}/bin/activate"
- "${VIRTUALENV}/bin/python" "${TF2TFLITEV2_PATH}" \
- --v1 \
- --input_path "${WORKDIR}/${PREFIX}.pb" \
- --input_arrays "$(awk -F, '/^input/ { print $2 }' ${PREFIX}.info | cut -d: -f1 | tr -d ' ' | paste -d, -s)" \
- --input_shapes "$(cat ${PREFIX}.info | grep '^input' | cut -d '[' -f2 | cut -d ']' -f1 | tr -d ' ' | xargs | tr ' ' ':')" \
- --output_path "${WORKDIR}/${PREFIX}.tflite" \
- --output_arrays "$(awk -F, '/^output/ { print $2 }' ${PREFIX}.info | cut -d: -f1 | tr -d ' ' | paste -d, -s)"
-
- # Run TensorFlow
- "${NNKIT_RUN_PATH}" \
- --backend "${TF_BACKEND_PATH}" \
- --backend-arg "${WORKDIR}/${PREFIX}.pb" \
- --backend-arg "${WORKDIR}/${PREFIX}.info" \
- --pre "${RANDOMIZE_ACTION_PATH}" \
- --pre "${HDF5_EXPORT_ACTION_PATH}" \
- --pre-arg "${WORKDIR}/${PREFIX}.input.h5" \
- --post "${HDF5_EXPORT_ACTION_PATH}" \
- --post-arg "${WORKDIR}/${PREFIX}.expected.h5"
-
- # Run TensorFlow Lite
- "${NNKIT_RUN_PATH}" \
- --backend "${TFLITE_BACKEND_PATH}" \
- --backend-arg "${WORKDIR}/${PREFIX}.tflite" \
- --pre "${HDF5_IMPORT_ACTION_PATH}" \
- --pre-arg "${WORKDIR}/${PREFIX}.input.h5" \
- --post "${HDF5_EXPORT_ACTION_PATH}" \
- --post-arg "${WORKDIR}/${PREFIX}.obtained.h5"
-
- "${I5DIFF_PATH}" -d 0.001 "${PREFIX}.expected.h5" "${PREFIX}.obtained.h5"
-
- if [[ $? -eq 0 ]]; then
- touch "${PASSED_TAG}"
- fi
- )
-
- if [[ -f "${PASSED_TAG}" ]]; then
- PASSED+=("$PREFIX")
- else
- FAILED+=("$PREFIX")
- fi
-done
-popd
-
-if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
- echo "FAILED"
- for TEST in "${FAILED[@]}"
- do
- echo "- ${TEST}"
- done
- exit 255
-fi
-
-echo "PASSED"
-exit 0
diff --git a/compiler/tf2tfliteV2/CMakeLists.txt b/compiler/tf2tfliteV2/CMakeLists.txt
index 8a5c2dcd8..5591ac4e1 100644
--- a/compiler/tf2tfliteV2/CMakeLists.txt
+++ b/compiler/tf2tfliteV2/CMakeLists.txt
@@ -9,3 +9,5 @@ add_custom_command(OUTPUT ${tf2tfliteV2_BIN}
)
add_custom_target(tf2tfliteV2 ALL DEPENDS ${tf2tfliteV2_BIN})
+
+install(FILES ${tf2tfliteV2_BIN} DESTINATION bin)
diff --git a/compiler/tf2tfliteV2/README.md b/compiler/tf2tfliteV2/README.md
index 836740a5c..13359aab1 100644
--- a/compiler/tf2tfliteV2/README.md
+++ b/compiler/tf2tfliteV2/README.md
@@ -6,13 +6,19 @@ _tf2tfliteV2_ is a TensorFlow to TensorFlow Lite model Converter.
Even though we alreay have _tf2tflite_, we cannot cover all opeartors in TensorFlow. To expand coverage, we introduce _tf2tfliteV2_ which uses `TensorFlow Lite Converter`(by Google) internally.
## Prerequisite
-- Frozen graph from TensorFlow 1.13.1
+- Frozen graph from TensorFlow 1.13.1 in binary(`*.pb`) or text(`*.pbtxt`) format
- Desired version of TensorFlow(You can use python virtualenv, docker, etc.)
## Example
```
python tf2tfliteV2.py \
> --v1 \
+> -i frozen_graph.pb -o converted.tflite
+> -I model_inputs -O model_outputs
+```
+```
+python tf2tfliteV2.py \
+> --v1 \
> --input_path=frozen_graph.pb \
> --output_path=converted.tflite \
> --input_arrays=model_inputs \
@@ -22,26 +28,34 @@ python tf2tfliteV2.py \
```
python tf2tfliteV2.py \
> --v2 \
-> --input_path=frozen_graph.pb \
+> --input_path=frozen_graph.pbtxt \
> --output_path=converted.tflite \
> --input_arrays=model_inputs \
> --output_arrays=model_outputs
```
+```
+python tf2tfliteV2.py \
+> --v2 \
+> --input_path=multiple_output_graph.pb \
+> --output_path=converted.tflite \
+> --input_arrays=model_inputs \
+> --output_arrays=output,output:1,output:2
+```
## optional argument
```
-h, --help show this help message and exit
--v1 Use TensorFlow Lite Converter 1.x
--v2 Use TensorFlow Lite Converter 2.x
- --input_path INPUT_PATH
+ -i INPUT_PATH, --input_path INPUT_PATH
Full filepath of the input file.
- --output_path OUTPUT_PATH
+ -o OUTPUT_PATH, --output_path OUTPUT_PATH
Full filepath of the output file.
- --input_arrays INPUT_ARRAYS
+ -I INPUT_ARRAYS, --input_arrays INPUT_ARRAYS
Names of the input arrays, comma-separated.
- --input_shapes INPUT_SHAPES
+ -s INPUT_SHAPES, --input_shapes INPUT_SHAPES
Shapes corresponding to --input_arrays, colon-
separated.
- --output_arrays OUTPUT_ARRAYS
+ -O OUTPUT_ARRAYS, --output_arrays OUTPUT_ARRAYS
Names of the output arrays, comma-separated.
```
diff --git a/compiler/tf2tfliteV2/tf2tfliteV2.py b/compiler/tf2tfliteV2/tf2tfliteV2.py
index 8b6ba0dc4..82d6ee232 100755
--- a/compiler/tf2tfliteV2/tf2tfliteV2.py
+++ b/compiler/tf2tfliteV2/tf2tfliteV2.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
# Copyright (C) 2018 The TensorFlow Authors
#
@@ -48,8 +50,13 @@ def _get_parser():
# Input and output path.
parser.add_argument(
- "--input_path", type=str, help="Full filepath of the input file.", required=True)
+ "-i",
+ "--input_path",
+ type=str,
+ help="Full filepath of the input file.",
+ required=True)
parser.add_argument(
+ "-o",
"--output_path",
type=str,
help="Full filepath of the output file.",
@@ -57,15 +64,20 @@ def _get_parser():
# Input and output arrays.
parser.add_argument(
+ "-I",
"--input_arrays",
type=str,
help="Names of the input arrays, comma-separated.",
required=True)
parser.add_argument(
+ "-s",
"--input_shapes",
type=str,
- help="Shapes corresponding to --input_arrays, colon-separated.")
+ help=
+ "Shapes corresponding to --input_arrays, colon-separated.(ex:\"1,4,4,3:1,20,20,3\")"
+ )
parser.add_argument(
+ "-O",
"--output_arrays",
type=str,
help="Names of the output arrays, comma-separated.",
@@ -141,9 +153,14 @@ def _v2_convert(flags):
wrap_func = wrap_frozen_graph(
graph_def,
- inputs=[_str + ":0" for _str in _parse_array(flags.input_arrays)],
- # TODO What if multiple outputs come in?
- outputs=[_str + ":0" for _str in _parse_array(flags.output_arrays)])
+ inputs=[
+ _str + ":0" if len(_str.split(":")) == 1 else _str
+ for _str in _parse_array(flags.input_arrays)
+ ],
+ outputs=[
+ _str + ":0" if len(_str.split(":")) == 1 else _str
+ for _str in _parse_array(flags.output_arrays)
+ ])
converter = tf.lite.TFLiteConverter.from_concrete_functions([wrap_func])
converter.allow_custom_ops = True
diff --git a/compiler/tfinfo-v2/README.md b/compiler/tfinfo-v2/README.md
new file mode 100644
index 000000000..72fbdb06a
--- /dev/null
+++ b/compiler/tfinfo-v2/README.md
@@ -0,0 +1 @@
+# tfinfo-v2
diff --git a/compiler/tfkit/src/PackCommand.cpp b/compiler/tfkit/src/PackCommand.cpp
index 36bf5a71c..a1c4a6fc8 100644
--- a/compiler/tfkit/src/PackCommand.cpp
+++ b/compiler/tfkit/src/PackCommand.cpp
@@ -51,21 +51,18 @@ template <> void pack<float>(tensorflow::TensorProto *input_tensor)
}
else if (input_tensor->float_val().size() == input_flat_size)
{
- // clang-format off
- // TODO fix indentation
- input_tensor->clear_tensor_content();
+ input_tensor->clear_tensor_content();
- std::vector<float> tensor_content;
- for (int i = 0; i < input_flat_size; ++i)
- {
- tensor_content.push_back(input_tensor->float_val(i));
- }
+ std::vector<float> tensor_content;
+ for (int i = 0; i < input_flat_size; ++i)
+ {
+ tensor_content.push_back(input_tensor->float_val(i));
+ }
- input_tensor->set_tensor_content(std::string(
- reinterpret_cast<const char *>(tensor_content.data()), sizeof(float) * input_flat_size));
+ input_tensor->set_tensor_content(std::string(
+ reinterpret_cast<const char *>(tensor_content.data()), sizeof(float) * input_flat_size));
- input_tensor->clear_float_val();
- // clang-format on
+ input_tensor->clear_float_val();
}
else
{
diff --git a/compiler/tfkit/src/UnpackCommand.cpp b/compiler/tfkit/src/UnpackCommand.cpp
index 77ec1edd8..a6711f131 100644
--- a/compiler/tfkit/src/UnpackCommand.cpp
+++ b/compiler/tfkit/src/UnpackCommand.cpp
@@ -49,18 +49,15 @@ template <> void unpack<float>(tensorflow::TensorProto *input_tensor)
}
else if (input_tensor->tensor_content().size() == input_flat_size * sizeof(float))
{
- // clang-format off
- // TODO fix indentation
- input_tensor->clear_float_val();
+ input_tensor->clear_float_val();
- const float *tensor_content =
- reinterpret_cast<const float *>(input_tensor->tensor_content().data());
- for (int i = 0; i < input_flat_size; i++)
- {
- input_tensor->add_float_val(tensor_content[i]);
- }
- input_tensor->clear_tensor_content();
- // clang-format on
+ const float *tensor_content =
+ reinterpret_cast<const float *>(input_tensor->tensor_content().data());
+ for (int i = 0; i < input_flat_size; i++)
+ {
+ input_tensor->add_float_val(tensor_content[i]);
+ }
+ input_tensor->clear_tensor_content();
}
else
{
@@ -139,6 +136,41 @@ template <> void unpack<int8_t>(tensorflow::TensorProto *input_tensor)
}
}
+template <> void unpack<bool>(tensorflow::TensorProto *input_tensor)
+{
+ const auto &input_shape = input_tensor->tensor_shape();
+ assert(input_shape.dim_size() <= 6);
+ int input_flat_size = tfkit::tf::GetElementCount(input_shape);
+
+ // Adjust where shape is not set but actual value exist
+ if (input_tensor->tensor_content().size() > 0 && input_flat_size == -1)
+ {
+ input_flat_size = input_tensor->tensor_content().size() / sizeof(bool);
+ }
+
+ if (input_tensor->tensor_content().size() == 0)
+ {
+ // Do nothing as there is no tensor content to unpack
+ }
+ else if (input_tensor->tensor_content().size() == input_flat_size * sizeof(bool))
+ {
+ input_tensor->clear_bool_val();
+
+ const bool *tensor_content =
+ reinterpret_cast<const bool *>(input_tensor->tensor_content().data());
+ for (int i = 0; i < input_flat_size; i++)
+ {
+ input_tensor->add_bool_val(tensor_content[i]);
+ }
+ input_tensor->clear_tensor_content();
+ }
+ else
+ {
+ throw std::runtime_error{"Number of elements mismatch in unpack<bool>."};
+ // TODO: support for these
+ }
+}
+
void unpack(tensorflow::GraphDef &graph_def)
{
auto nodes = graph_def.mutable_node();
@@ -162,6 +194,9 @@ void unpack(tensorflow::GraphDef &graph_def)
case tensorflow::DT_INT8:
unpack<int8_t>(tensor);
break;
+ case tensorflow::DT_BOOL:
+ unpack<bool>(tensor);
+ break;
default:
throw std::runtime_error{"Unsupported dtype"};
}
diff --git a/compiler/tfl-inspect/CMakeLists.txt b/compiler/tfl-inspect/CMakeLists.txt
index c2c7dfe9d..ba019865f 100644
--- a/compiler/tfl-inspect/CMakeLists.txt
+++ b/compiler/tfl-inspect/CMakeLists.txt
@@ -8,6 +8,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(tfl-inspect ${DRIVER} ${SOURCES})
target_include_directories(tfl-inspect PRIVATE src)
+target_link_libraries(tfl-inspect arser)
+target_link_libraries(tfl-inspect foder)
target_link_libraries(tfl-inspect mio_tflite)
target_link_libraries(tfl-inspect safemain)
-target_link_libraries(tfl-inspect stdex)
diff --git a/compiler/tfl-inspect/driver/Driver.cpp b/compiler/tfl-inspect/driver/Driver.cpp
index 5cad63c4b..a48001169 100644
--- a/compiler/tfl-inspect/driver/Driver.cpp
+++ b/compiler/tfl-inspect/driver/Driver.cpp
@@ -14,74 +14,63 @@
* limitations under the License.
*/
-#include "Model.h"
#include "Dump.h"
-#include <stdex/Memory.h>
+#include <arser/arser.h>
+#include <foder/FileLoader.h>
#include <functional>
#include <iostream>
#include <map>
+#include <memory>
#include <vector>
#include <string>
-using OptionHook = std::function<std::unique_ptr<tflinspect::DumpInterface>(void)>;
-
int entry(int argc, char **argv)
{
- if (argc < 3)
+ arser::Arser arser{"tfl-inspect allows users to retrieve various information from a TensorFlow "
+ "Lite model files"};
+ arser.add_argument("--operators").nargs(0).help("Dump operators in tflite file");
+ arser.add_argument("--conv2d_weight")
+ .nargs(0)
+ .help("Dump Conv2D series weight operators in tflite file");
+ arser.add_argument("--op_version").nargs(0).help("Dump versions of the operators in tflite file");
+ arser.add_argument("tflite").type(arser::DataType::STR).help("TFLite file to inspect");
+
+ try
+ {
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [options] [tflite]" << std::endl;
- std::cerr << " --operators : dump operators in tflite file" << std::endl;
- std::cerr << " --conv2d_weight : dump Conv2D series weight operators in tflite file"
- << std::endl;
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
return 255;
}
- // Simple argument parser (based on map)
- std::map<std::string, OptionHook> argparse;
-
- argparse["--operators"] = [&](void) {
- // dump all operators
- return std::move(stdex::make_unique<tflinspect::DumpOperators>());
- };
-
- argparse["--conv2d_weight"] = [&](void) {
- // dump Conv2D, DepthwiseConv2D weight operators
- return std::move(stdex::make_unique<tflinspect::DumpConv2DWeight>());
- };
+ if (!arser["--operators"] && !arser["--conv2d_weight"] && !arser["--op_version"])
+ {
+ std::cout << "At least one option must be specified" << std::endl;
+ std::cout << arser;
+ return 255;
+ }
std::vector<std::unique_ptr<tflinspect::DumpInterface>> dumps;
- for (int n = 1; n < argc - 1; ++n)
- {
- const std::string tag{argv[n]};
-
- auto it = argparse.find(tag);
- if (it == argparse.end())
- {
- std::cerr << "Option '" << tag << "' is not supported" << std::endl;
- return 255;
- }
- auto dump = it->second();
- assert(dump != nullptr);
- dumps.push_back(std::move(dump));
- }
+ if (arser["--operators"])
+ dumps.push_back(std::make_unique<tflinspect::DumpOperators>());
+ if (arser["--conv2d_weight"])
+ dumps.push_back(std::make_unique<tflinspect::DumpConv2DWeight>());
+ if (arser["--op_version"])
+ dumps.push_back(std::make_unique<tflinspect::DumpOperatorVersion>());
- std::string model_file = argv[argc - 1];
+ std::string model_file = arser.get<std::string>("tflite");
// Load TF lite model from a tflite file
- auto model = tflinspect::load_tflite(model_file);
- if (model == nullptr)
- {
- std::cerr << "ERROR: Failed to load tflite '" << model_file << "'" << std::endl;
- return 255;
- }
-
- const tflite::Model *tflmodel = model->model();
- if (tflmodel == nullptr)
+ foder::FileLoader fileLoader{model_file};
+ std::vector<char> modelData = fileLoader.load();
+ const tflite::Model *tfliteModel = tflite::GetModel(modelData.data());
+ if (tfliteModel == nullptr)
{
std::cerr << "ERROR: Failed to load tflite '" << model_file << "'" << std::endl;
return 255;
@@ -89,7 +78,7 @@ int entry(int argc, char **argv)
for (auto &dump : dumps)
{
- dump->run(std::cout, tflmodel);
+ dump->run(std::cout, tfliteModel);
}
return 0;
diff --git a/compiler/tfl-inspect/requires.cmake b/compiler/tfl-inspect/requires.cmake
index 2aa101e02..25857ad2b 100644
--- a/compiler/tfl-inspect/requires.cmake
+++ b/compiler/tfl-inspect/requires.cmake
@@ -1,3 +1,4 @@
+require("arser")
+require("foder")
require("mio-tflite")
require("safemain")
-require("stdex")
diff --git a/compiler/tfl-inspect/src/Dump.cpp b/compiler/tfl-inspect/src/Dump.cpp
index 8d879a84e..78e77001c 100644
--- a/compiler/tfl-inspect/src/Dump.cpp
+++ b/compiler/tfl-inspect/src/Dump.cpp
@@ -28,19 +28,22 @@ void DumpOperators::run(std::ostream &os, const tflite::Model *model)
{
tflinspect::Reader reader(model);
- assert(reader.num_subgraph() == 1);
- reader.select_subgraph(0);
+ const uint32_t subgraph_size = reader.num_subgraph();
- auto ops = reader.operators();
-
- // dump operators
- for (uint32_t i = 0; i < ops->Length(); ++i)
+ for (uint32_t g = 0; g < subgraph_size; g++)
{
- const auto op = ops->Get(i);
+ reader.select_subgraph(g);
+ auto ops = reader.operators();
- auto op_name = reader.opcode_name(op);
+ // dump operators
+ for (uint32_t i = 0; i < ops->Length(); ++i)
+ {
+ const auto op = ops->Get(i);
- os << op_name << std::endl;
+ auto op_name = reader.opcode_name(op);
+
+ os << op_name << std::endl;
+ }
}
}
@@ -94,44 +97,85 @@ void DumpConv2DWeight::run(std::ostream &os, const tflite::Model *model)
{
tflinspect::Reader reader(model);
- assert(reader.num_subgraph() == 1);
- reader.select_subgraph(0);
-
- auto ops = reader.operators();
+ const uint32_t subgraph_size = reader.num_subgraph();
- // dump Conv2D, DepthwiseConv2D and its weight input operator
- for (uint32_t i = 0; i < ops->Length(); ++i)
+ for (uint32_t g = 0; g < subgraph_size; g++)
{
- const auto op = ops->Get(i);
- auto bc = reader.builtin_code(op);
+ reader.select_subgraph(g);
+ auto ops = reader.operators();
- if (bc == tflite::BuiltinOperator_CONV_2D || bc == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
+ // dump Conv2D, DepthwiseConv2D and its weight input operator
+ for (uint32_t i = 0; i < ops->Length(); ++i)
{
- const std::vector<int32_t> &inputs = tflinspect::as_index_vector(op->inputs());
- if (inputs.size() < 2)
+ const auto op = ops->Get(i);
+ auto bc = reader.builtin_code(op);
+
+ if (bc == tflite::BuiltinOperator_CONV_2D || bc == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
{
- throw std::runtime_error("Operator has invalid input");
+ const std::vector<int32_t> &inputs = tflinspect::as_index_vector(op->inputs());
+ if (inputs.size() < 2)
+ {
+ throw std::runtime_error("Operator has invalid input");
+ }
+ auto weight_input = inputs[1]; // Tensor ID of weight input
+
+ const auto op_weight = operator_match_output(reader, weight_input);
+ const auto buffer_size = tensor_buffer_size(reader, weight_input);
+
+ std::string weight_op_name = "?";
+
+ if (op_weight == nullptr && buffer_size > 0)
+ {
+ weight_op_name = "CONST";
+ }
+ else if (op_weight != nullptr)
+ {
+ weight_op_name = reader.opcode_name(op_weight);
+ }
+
+ auto op_name = reader.opcode_name(op);
+ os << op_name << "," << weight_op_name << std::endl;
}
- auto weight_input = inputs[1]; // Tensor ID of weight input
+ }
+ }
+}
- const auto op_weight = operator_match_output(reader, weight_input);
- const auto buffer_size = tensor_buffer_size(reader, weight_input);
+} // namespace tflinspect
- std::string weight_op_name = "?";
+namespace tflinspect
+{
- if (op_weight == nullptr && buffer_size > 0)
- {
- weight_op_name = "CONST";
- }
- else if (op_weight != nullptr)
- {
- weight_op_name = reader.opcode_name(op_weight);
- }
+void DumpOperatorVersion::run(std::ostream &os, const tflite::Model *model)
+{
+ std::map<std::string, int32_t> op_version_map;
+
+ tflinspect::Reader reader(model);
+
+ const uint32_t subgraph_size = reader.num_subgraph();
+
+ for (uint32_t g = 0; g < subgraph_size; g++)
+ {
+ reader.select_subgraph(g);
+ auto ops = reader.operators();
+
+ // dump Conv2D, DepthwiseConv2D and its weight input operator
+ for (uint32_t i = 0; i < ops->Length(); ++i)
+ {
+ const auto op = ops->Get(i);
auto op_name = reader.opcode_name(op);
- os << op_name << "," << weight_op_name << std::endl;
+ auto op_version = reader.opcodes().at(op->opcode_index())->version();
+
+ if (op_version_map.find(op_name) == op_version_map.end() ||
+ op_version_map[op_name] < op_version)
+ op_version_map[op_name] = op_version;
}
}
+
+ for (auto op : op_version_map)
+ {
+ os << op.first << "," << op.second << std::endl;
+ }
}
} // namespace tflinspect
diff --git a/compiler/tfl-inspect/src/Dump.h b/compiler/tfl-inspect/src/Dump.h
index 798c1db0e..83397a2da 100644
--- a/compiler/tfl-inspect/src/Dump.h
+++ b/compiler/tfl-inspect/src/Dump.h
@@ -51,6 +51,15 @@ public:
void run(std::ostream &os, const tflite::Model *model);
};
+class DumpOperatorVersion final : public DumpInterface
+{
+public:
+ DumpOperatorVersion() = default;
+
+public:
+ void run(std::ostream &os, const tflite::Model *model);
+};
+
} // namespace tflinspect
#endif // __DUMP_H__
diff --git a/compiler/tfl-inspect/src/Model.cpp b/compiler/tfl-inspect/src/Model.cpp
deleted file mode 100644
index 8c3bf379a..000000000
--- a/compiler/tfl-inspect/src/Model.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Model.h"
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-namespace
-{
-
-class MemoryMappedModel final : public tflinspect::Model
-{
-public:
- /**
- * @require fd and data SHOULD be valid
- */
- explicit MemoryMappedModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- ~MemoryMappedModel()
- {
- munmap(_data, _size);
- close(_fd);
- }
-
-public:
- MemoryMappedModel(const MemoryMappedModel &) = delete;
- MemoryMappedModel(MemoryMappedModel &&) = delete;
-
-public:
- const ::tflite::Model *model(void) const override { return ::tflite::GetModel(_data); }
-
-private:
- int _fd = -1;
- void *_data = nullptr;
- size_t _size = 0;
-};
-
-class FileDescriptor final
-{
-public:
- FileDescriptor(int value) : _value{value}
- {
- // DO NOTHING
- }
-
-public:
- // NOTE Copy is not allowed
- FileDescriptor(const FileDescriptor &) = delete;
-
-public:
- // NOTE Move is allowed
- FileDescriptor(FileDescriptor &&fd) { _value = fd.release(); }
-
-public:
- ~FileDescriptor()
- {
- if (_value != -1)
- {
- // Close on descturction
- close(_value);
- }
- }
-
-public:
- int value(void) const { return _value; }
-
-public:
- int release(void)
- {
- auto res = _value;
- _value = -1;
- return res;
- }
-
-private:
- int _value = -1;
-};
-
-} // namespace
-
-namespace tflinspect
-{
-
-std::unique_ptr<Model> load_tflite(const std::string &path)
-{
- FileDescriptor fd = open(path.c_str(), O_RDONLY);
-
- if (fd.value() == -1)
- {
- // Return nullptr on open failure
- return nullptr;
- }
-
- struct stat st;
- if (fstat(fd.value(), &st) == -1)
- {
- // Return nullptr on fstat failure
- return nullptr;
- }
-
- auto size = st.st_size;
- auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd.value(), 0);
-
- if (data == MAP_FAILED)
- {
- // Return nullptr on mmap failure
- return nullptr;
- }
-
- // Check if file is a valid Flatbuffer file
- const uint8_t *u8data = reinterpret_cast<const uint8_t *>(data);
- flatbuffers::Verifier verifier{u8data, static_cast<size_t>(size)};
- if (!tflite::VerifyModelBuffer(verifier))
- {
- munmap(data, size);
- close(fd.release());
- return nullptr;
- }
-
- return std::unique_ptr<tflinspect::Model>{new MemoryMappedModel(fd.release(), data, size)};
-}
-
-} // namespace tflinspect
diff --git a/compiler/tfl-inspect/src/Model.h b/compiler/tfl-inspect/src/Model.h
deleted file mode 100644
index a69fb8be9..000000000
--- a/compiler/tfl-inspect/src/Model.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <mio/tflite/schema_generated.h>
-
-#include <memory>
-
-namespace tflinspect
-{
-
-struct Model
-{
- virtual ~Model() = default;
-
- virtual const ::tflite::Model *model(void) const = 0;
-};
-
-/**
- * @brief Load TensorFlow Lite model (as a raw Model) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<Model> load_tflite(const std::string &path);
-
-} // namespace tflinspeat
-
-#endif // __MODEL_H__
diff --git a/compiler/tfl-inspect/src/Reader.cpp b/compiler/tfl-inspect/src/Reader.cpp
index 7bd2fe2c6..5be289446 100644
--- a/compiler/tfl-inspect/src/Reader.cpp
+++ b/compiler/tfl-inspect/src/Reader.cpp
@@ -50,7 +50,10 @@ std::string opcode_name(const tflite::OperatorCode *opcode)
if (!opcode->custom_code())
return "(invalid custom)";
- return opcode->custom_code()->c_str();
+ std::string custom_op = "CUSTOM(";
+ custom_op += opcode->custom_code()->c_str();
+ custom_op += ")";
+ return custom_op;
}
tflite::BuiltinOperator code = opcode->builtin_code();
diff --git a/compiler/tfl-verify/CMakeLists.txt b/compiler/tfl-verify/CMakeLists.txt
index a368b2930..d33059fde 100644
--- a/compiler/tfl-verify/CMakeLists.txt
+++ b/compiler/tfl-verify/CMakeLists.txt
@@ -6,7 +6,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(tfl-verify ${SOURCES})
target_include_directories(tfl-verify PRIVATE src)
+target_link_libraries(tfl-verify foder)
target_link_libraries(tfl-verify mio_tflite)
target_link_libraries(tfl-verify safemain)
target_link_libraries(tfl-verify cwrap)
-target_link_libraries(tfl-verify stdex)
diff --git a/compiler/tfl-verify/requires.cmake b/compiler/tfl-verify/requires.cmake
index e479a8329..ed6b84db5 100644
--- a/compiler/tfl-verify/requires.cmake
+++ b/compiler/tfl-verify/requires.cmake
@@ -1,4 +1,4 @@
+require("foder")
require("mio-tflite")
require("safemain")
require("cwrap")
-require("stdex")
diff --git a/compiler/tfl-verify/src/Driver.cpp b/compiler/tfl-verify/src/Driver.cpp
index 367c731a6..81f6d5489 100644
--- a/compiler/tfl-verify/src/Driver.cpp
+++ b/compiler/tfl-verify/src/Driver.cpp
@@ -16,9 +16,8 @@
#include "VerifyFlatBuffers.h"
-#include <stdex/Memory.h>
-
#include <iostream>
+#include <memory>
#include <string>
int entry(int argc, char **argv)
@@ -30,7 +29,7 @@ int entry(int argc, char **argv)
std::cerr << "USAGE: " << argv[0] << " [tflite]" << std::endl;
return 255;
}
- auto verifier = stdex::make_unique<VerifyFlatbuffers>();
+ auto verifier = std::make_unique<VerifyFlatbuffers>();
std::string model_file = argv[argc - 1];
diff --git a/compiler/tfl-verify/src/Model.cpp b/compiler/tfl-verify/src/Model.cpp
deleted file mode 100644
index efac1210d..000000000
--- a/compiler/tfl-verify/src/Model.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Model.h"
-
-#include <cwrap/Fildes.h>
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-namespace
-{
-
-class MemoryMappedModel final : public ModelData
-{
-public:
- /**
- * @require fd and data SHOULD be valid
- */
- explicit MemoryMappedModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- ~MemoryMappedModel()
- {
- munmap(_data, _size);
- close(_fd);
- }
-
-public:
- MemoryMappedModel(const MemoryMappedModel &) = delete;
- MemoryMappedModel(MemoryMappedModel &&) = delete;
-
-public:
- const void *data(void) const override { return _data; };
- const size_t size(void) const override { return _size; };
-
-private:
- int _fd = -1;
- void *_data = nullptr;
- size_t _size = 0;
-};
-
-} // namespace
-
-std::unique_ptr<ModelData> load_modeldata(const std::string &path)
-{
- cwrap::Fildes fd(open(path.c_str(), O_RDONLY));
-
- if (fd.get() == -1)
- {
- // Return nullptr on open failure
- return nullptr;
- }
-
- struct stat st;
- if (fstat(fd.get(), &st) == -1)
- {
- // Return nullptr on fstat failure
- return nullptr;
- }
-
- auto size = st.st_size;
- auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd.get(), 0);
-
- if (data == MAP_FAILED)
- {
- // Return nullptr on mmap failure
- return nullptr;
- }
-
- return std::unique_ptr<ModelData>{new MemoryMappedModel(fd.release(), data, size)};
-}
diff --git a/compiler/tfl-verify/src/Model.h b/compiler/tfl-verify/src/Model.h
deleted file mode 100644
index 44f40e24c..000000000
--- a/compiler/tfl-verify/src/Model.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <memory>
-#include <string>
-
-struct ModelData
-{
- virtual ~ModelData() = default;
-
- virtual const void *data(void) const = 0;
- virtual const size_t size(void) const = 0;
-};
-
-/**
- * @brief Load TF Lite model (as a raw data) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<ModelData> load_modeldata(const std::string &path);
-
-#endif // __MODEL_H__
diff --git a/compiler/tfl-verify/src/VerifyFlatBuffers.cpp b/compiler/tfl-verify/src/VerifyFlatBuffers.cpp
index 253fcb210..7fb48a71e 100644
--- a/compiler/tfl-verify/src/VerifyFlatBuffers.cpp
+++ b/compiler/tfl-verify/src/VerifyFlatBuffers.cpp
@@ -16,16 +16,16 @@
#include "VerifyFlatBuffers.h"
-#include "Model.h"
-
+#include <foder/FileLoader.h>
#include <mio/tflite/schema_generated.h>
int VerifyFlatbuffers::run(const std::string &model_file)
{
- auto modeldata = load_modeldata(model_file);
+ foder::FileLoader fileLoader{model_file};
+ std::vector<char> modeldata = fileLoader.load();
- const uint8_t *data = reinterpret_cast<const uint8_t *>(modeldata->data());
- flatbuffers::Verifier verifier{data, modeldata->size()};
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(modeldata.data());
+ flatbuffers::Verifier verifier{data, modeldata.size()};
if (!tflite::VerifyModelBuffer(verifier))
{
diff --git a/compiler/tflchef/CMakeLists.txt b/compiler/tflchef/CMakeLists.txt
index 71c5e2ab1..ebc873342 100644
--- a/compiler/tflchef/CMakeLists.txt
+++ b/compiler/tflchef/CMakeLists.txt
@@ -1,15 +1,19 @@
nnas_find_package(Protobuf QUIET)
if(NOT Protobuf_FOUND)
+ message(STATUS "Build tflchef: FAILED (missing Protobuf)")
return()
endif(NOT Protobuf_FOUND)
if(NOT TARGET mio_tflite)
+ message(STATUS "Build tflchef: FAILED (missing mio_tflite)")
return()
endif(NOT TARGET mio_tflite)
# Recipe Parser
add_subdirectory(proto)
+# Log
+add_subdirectory(log)
# Core Library
add_subdirectory(core)
# TFlite Library
diff --git a/compiler/tflchef/core/CMakeLists.txt b/compiler/tflchef/core/CMakeLists.txt
index 6a6282027..43f6b8b03 100644
--- a/compiler/tflchef/core/CMakeLists.txt
+++ b/compiler/tflchef/core/CMakeLists.txt
@@ -4,4 +4,6 @@ add_library(tflchef_core STATIC ${SOURCES})
target_include_directories(tflchef_core PUBLIC include)
target_include_directories(tflchef_core PRIVATE src)
target_link_libraries(tflchef_core tflchef_proto)
+target_link_libraries(tflchef_core tflchef_log)
target_link_libraries(tflchef_core mio_tflite)
+target_link_libraries(tflchef_core souschef)
diff --git a/compiler/tflchef/core/src/Arguments.h b/compiler/tflchef/core/src/Arguments.h
deleted file mode 100644
index 341aea6c9..000000000
--- a/compiler/tflchef/core/src/Arguments.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __ARGUMENTS_H__
-#define __ARGUMENTS_H__
-
-#include <cstdint>
-#include <string>
-
-/**
- * @brief Read-only string sequence view
- */
-struct Arguments
-{
- virtual ~Arguments() = default;
-
- virtual uint32_t count(void) const = 0;
- virtual const std::string &value(uint32_t n) const = 0;
-};
-
-#endif // __ARGUMENTS_H__
diff --git a/compiler/tflchef/core/src/Convert.cpp b/compiler/tflchef/core/src/Convert.cpp
index 86a31d9b7..dc8e31db0 100644
--- a/compiler/tflchef/core/src/Convert.cpp
+++ b/compiler/tflchef/core/src/Convert.cpp
@@ -41,6 +41,8 @@ tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &v
return tflite::ActivationFunctionType_NONE;
case tflchef::RELU:
return tflite::ActivationFunctionType_RELU;
+ case tflchef::RELU_N1_TO_1:
+ return tflite::ActivationFunctionType_RELU_N1_TO_1;
case tflchef::RELU6:
return tflite::ActivationFunctionType_RELU6;
default:
@@ -70,3 +72,18 @@ tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
throw std::runtime_error{"Unknown tensor type"};
}
+
+tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value)
+{
+ switch (value)
+ {
+ case tflchef::REFLECT:
+ return tflite::MirrorPadMode_REFLECT;
+ case tflchef::SYMMETRIC:
+ return tflite::MirrorPadMode_SYMMETRIC;
+ default:
+ break;
+ }
+
+ throw std::runtime_error{"Unknown mirrorpad mode"};
+}
diff --git a/compiler/tflchef/core/src/Convert.h b/compiler/tflchef/core/src/Convert.h
index ed15a5572..b56e6ef69 100644
--- a/compiler/tflchef/core/src/Convert.h
+++ b/compiler/tflchef/core/src/Convert.h
@@ -27,5 +27,6 @@
tflite::Padding as_tflite_padding(const tflchef::Padding &value);
tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &value);
tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value);
+tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value);
#endif // __CONVERT_H__
diff --git a/compiler/tflchef/core/src/CustomOp/AddV2.cpp b/compiler/tflchef/core/src/CustomOp/AddV2.cpp
new file mode 100644
index 000000000..dffd336cd
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/AddV2.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AddV2.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+flatbuffers::Offset<void> AddV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+AddV2Chef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.type() == "AddV2");
+
+ /**
+ * REGISTER_OP("AddV2")
+ .Input("x: T")
+ .Input("y: T")
+ .Output("z: T")
+ .Attr(
+ "T: {bfloat16, half, float, double, uint8, int8, int16, int32, int64, "
+ "complex64, complex128}")
+ .SetShapeFn(shape_inference::BroadcastBinaryOpShapeFn)
+ .SetIsAggregate()
+ .SetIsCommutative();
+ */
+
+ auto flex_buffers = std::make_unique<flexbuffers::Builder>();
+ size_t map_start = flex_buffers->StartMap();
+
+ // TODO Support more data types
+ flex_buffers->Int("T", tflite::TensorType_FLOAT32);
+
+ flex_buffers->EndMap(map_start);
+ flex_buffers->Finish();
+
+ auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer());
+ return circle_custom_options;
+}
+
+std::unique_ptr<OpChef> AddV2ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new AddV2Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/CustomOp/AddV2.h b/compiler/tflchef/core/src/CustomOp/AddV2.h
new file mode 100644
index 000000000..dbbaf5a62
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/AddV2.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ADDV2_H__
+#define __OP_ADDV2_H__
+
+#include "OpChef.h"
+
+class AddV2Chef final : public OpChef
+{
+public:
+ explicit AddV2Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct AddV2ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ADDV2_H__
diff --git a/compiler/tflchef/core/src/CustomOp/All.cpp b/compiler/tflchef/core/src/CustomOp/All.cpp
new file mode 100644
index 000000000..b3ae821a4
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/All.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "All.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+flatbuffers::Offset<void> AllChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+AllChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.type() == "All");
+
+ /**
+ * REGISTER_OP("All")
+ .Input("input: bool")
+ .Input("reduction_indices: Tidx")
+ .Output("output: bool")
+ .Attr("keep_dims: bool = false")
+ .Attr("Tidx: {int32, int64} = DT_INT32")
+ .SetShapeFn(shape_inference::ReductionShape);
+ */
+
+ auto flex_buffers = std::make_unique<flexbuffers::Builder>();
+ size_t map_start = flex_buffers->StartMap();
+
+ // TODO Support more data types
+ flex_buffers->Int("Tidx", tflite::TensorType_INT32);
+ flex_buffers->Bool("keep_dims", operation.all_options().keep_dims());
+
+ flex_buffers->EndMap(map_start);
+ flex_buffers->Finish();
+
+ auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer());
+ return circle_custom_options;
+}
+
+std::unique_ptr<OpChef> AllChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new AllChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/CustomOp/All.h b/compiler/tflchef/core/src/CustomOp/All.h
new file mode 100644
index 000000000..f7949f3d2
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/All.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ALL_H__
+#define __OP_ALL_H__
+
+#include "OpChef.h"
+
+class AllChef final : public OpChef
+{
+public:
+ explicit AllChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct AllChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ALL_H__
diff --git a/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.cpp b/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.cpp
new file mode 100644
index 000000000..595f3b9bb
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMulV2.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+flatbuffers::Offset<void> BatchMatMulV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+BatchMatMulV2Chef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.type() == "BatchMatMulV2");
+
+ /**
+ * REGISTER_OP("BatchMatMulV2")
+ .Input("x: T")
+ .Input("y: T")
+ .Output("output: T")
+ .Attr(
+ "T: {bfloat16, half, float, double, int32, int64, complex64, "
+ "complex128}")
+ .Attr("adj_x: bool = false")
+ .Attr("adj_y: bool = false")
+ .SetShapeFn(shape_inference::BatchMatMulV2Shape);
+ */
+
+ auto flex_buffers = std::make_unique<flexbuffers::Builder>();
+ size_t map_start = flex_buffers->StartMap();
+
+ flex_buffers->Bool("adj_x", operation.batch_matmul_options().adj_x());
+ flex_buffers->Bool("adj_y", operation.batch_matmul_options().adj_y());
+ // TODO Support more data types
+ flex_buffers->Int("T", tflite::TensorType_FLOAT32);
+
+ flex_buffers->EndMap(map_start);
+ flex_buffers->Finish();
+
+ auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer());
+ return circle_custom_options;
+}
+
+std::unique_ptr<OpChef> BatchMatMulV2ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new BatchMatMulV2Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.h b/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.h
new file mode 100644
index 000000000..d20f4d2a5
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/BatchMatMulV2.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_BATCH_MATMUL_V2_H__
+#define __OP_BATCH_MATMUL_V2_H__
+
+#include "OpChef.h"
+
+class BatchMatMulV2Chef final : public OpChef
+{
+public:
+ explicit BatchMatMulV2Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct BatchMatMulV2ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_BATCH_MATMUL_V2_H__
diff --git a/compiler/tflchef/core/src/CustomOp/MatMul.cpp b/compiler/tflchef/core/src/CustomOp/MatMul.cpp
new file mode 100644
index 000000000..ba34aa8db
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/MatMul.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatMul.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+flatbuffers::Offset<void> MatMulChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+MatMulChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.type() == "MatMul");
+
+ /**
+ * REGISTER_OP("MatMul")
+ .Input("a: T")
+ .Input("b: T")
+ .Output("product: T")
+ .Attr("transpose_a: bool = false")
+ .Attr("transpose_b: bool = false")
+ .Attr("T: {half, float, double, int32, complex64, complex128}")
+ .SetShapeFn(shape_inference::MatMulShape)
+ */
+
+ auto flex_buffers = std::make_unique<flexbuffers::Builder>();
+ size_t map_start = flex_buffers->StartMap();
+
+ flex_buffers->Bool("transpose_a", operation.matmul_options().transpose_a());
+ flex_buffers->Bool("transpose_b", operation.matmul_options().transpose_b());
+ // TODO how do we support other types?
+ flex_buffers->Int("T", tflite::TensorType_FLOAT32);
+
+ flex_buffers->EndMap(map_start);
+ flex_buffers->Finish();
+
+ auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer());
+ return circle_custom_options;
+}
+
+std::unique_ptr<OpChef> MatMulChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MatMulChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/CustomOp/MatMul.h b/compiler/tflchef/core/src/CustomOp/MatMul.h
new file mode 100644
index 000000000..b0307f977
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/MatMul.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MATMUL_H__
+#define __OP_MATMUL_H__
+
+#include "OpChef.h"
+
+class MatMulChef final : public OpChef
+{
+public:
+ explicit MatMulChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MatMulChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MATMUL_H__
diff --git a/compiler/tflchef/core/src/CustomOp/MatrixBandPart.cpp b/compiler/tflchef/core/src/CustomOp/MatrixBandPart.cpp
new file mode 100644
index 000000000..d12597edb
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/MatrixBandPart.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixBandPart.h"
+
+#include "flatbuffers/flexbuffers.h"
+
+flatbuffers::Offset<void> MatrixBandPartChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+MatrixBandPartChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.type() == "MatrixBandPart");
+
+ /**
+ * REGISTER_OP("MatrixBandPart")
+ .Input("input: T")
+ .Input("num_lower: Tindex")
+ .Input("num_upper: Tindex")
+ .Output("band: T")
+ .Attr("T: type")
+ .Attr("Tindex: {int32, int64} = DT_INT64")
+ .SetShapeFn(shape_inference::UnchangedShape);
+ */
+
+ auto flex_buffers = std::make_unique<flexbuffers::Builder>();
+ size_t map_start = flex_buffers->StartMap();
+
+ // TODO Support more data types
+ flex_buffers->Int("T", tflite::TensorType_FLOAT32);
+ flex_buffers->Int("Tindex", tflite::TensorType_INT64);
+
+ flex_buffers->EndMap(map_start);
+ flex_buffers->Finish();
+
+ auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer());
+ return circle_custom_options;
+}
+
+std::unique_ptr<OpChef> MatrixBandPartChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MatrixBandPartChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/CustomOp/MatrixBandPart.h b/compiler/tflchef/core/src/CustomOp/MatrixBandPart.h
new file mode 100644
index 000000000..54a8a3afb
--- /dev/null
+++ b/compiler/tflchef/core/src/CustomOp/MatrixBandPart.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MATRIXBANDPART_H__
+#define __OP_MATRIXBANDPART_H__
+
+#include "OpChef.h"
+
+class MatrixBandPartChef final : public OpChef
+{
+public:
+ explicit MatrixBandPartChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MatrixBandPartChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MATRIXBANDPART_H__
diff --git a/compiler/tflchef/core/src/Data/Constant.h b/compiler/tflchef/core/src/Data/Constant.h
deleted file mode 100644
index ebe1f3d93..000000000
--- a/compiler/tflchef/core/src/Data/Constant.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CONSTANT_FILLER_H__
-#define __CONSTANT_FILLER_H__
-
-#include "DataChef.h"
-#include "LexicalCast.h"
-
-template <typename T> class ConstantDataChef final : public DataChef
-{
-public:
- ConstantDataChef(const T &value) : _value{value}
- {
- // DO NOTHING
- }
-
-public:
- std::vector<uint8_t> generate(int32_t count) const override
- {
- std::vector<uint8_t> res;
-
- for (uint32_t n = 0; n < count; ++n)
- {
- const uint8_t *arr = reinterpret_cast<const uint8_t *>(&_value);
-
- for (uint32_t b = 0; b < sizeof(T); ++b)
- {
- res.emplace_back(arr[b]);
- }
- }
-
- return res;
- }
-
-private:
- T _value;
-};
-
-template <typename T> struct ConstantDataChefFactory : public DataChefFactory
-{
- std::unique_ptr<DataChef> create(const Arguments &args) const
- {
- auto const value = to_number<T>(args.value(0));
- return std::unique_ptr<DataChef>{new ConstantDataChef<T>{value}};
- }
-};
-
-#endif // __CONSTANT_FILLER_H__
diff --git a/compiler/tflchef/core/src/Data/Explicit.h b/compiler/tflchef/core/src/Data/Explicit.h
deleted file mode 100644
index 088e791b9..000000000
--- a/compiler/tflchef/core/src/Data/Explicit.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __EXPLICIT_FILLER_H__
-#define __EXPLICIT_FILLER_H__
-
-#include "DataChef.h"
-#include "LexicalCast.h"
-
-#include <vector>
-
-template <typename T> class ExplicitDataChef final : public DataChef
-{
-public:
- ExplicitDataChef()
- {
- // DO NOTHING
- }
-
-public:
- std::vector<uint8_t> generate(int32_t count) const override
- {
- std::vector<uint8_t> res;
-
- for (uint32_t n = 0; n < count; ++n)
- {
- T const value = (n < _values.size()) ? _values.at(n) : T{};
- const uint8_t *arr = reinterpret_cast<const uint8_t *>(&value);
-
- for (uint32_t b = 0; b < sizeof(T); ++b)
- {
- res.emplace_back(arr[b]);
- }
- }
-
- return res;
- }
-
-public:
- void insert(const T &value) { _values.emplace_back(value); }
-
-private:
- std::vector<T> _values;
-};
-
-template <typename T> struct ExplicitDataChefFactory : public DataChefFactory
-{
- std::unique_ptr<DataChef> create(const Arguments &args) const
- {
- std::unique_ptr<ExplicitDataChef<T>> res{new ExplicitDataChef<T>};
-
- for (uint32_t n = 0; n < args.count(); ++n)
- {
- auto const value = to_number<T>(args.value(n));
- res->insert(value);
- }
-
- return std::move(res);
- }
-};
-
-#endif // __EXPLICIT_FILLER_H__
diff --git a/compiler/tflchef/core/src/Data/Gaussian.cpp b/compiler/tflchef/core/src/Data/Gaussian.cpp
deleted file mode 100644
index c515d1104..000000000
--- a/compiler/tflchef/core/src/Data/Gaussian.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Gaussian.h"
-#include "LexicalCast.h"
-
-#include <random>
-#include <chrono>
-
-#include <cassert>
-#include <stdexcept>
-
-std::vector<uint8_t> GaussianFloat32DataChef::generate(int32_t count) const
-{
- // TODO Support seed value override
- int seed = std::chrono::system_clock::now().time_since_epoch().count();
-
- std::minstd_rand rand{seed};
- std::normal_distribution<float> dist{_mean, _stddev};
-
- std::vector<uint8_t> res;
-
- for (uint32_t n = 0; n < count; ++n)
- {
- auto const value = dist(rand);
- auto const arr = reinterpret_cast<const uint8_t *>(&value);
-
- for (uint32_t b = 0; b < sizeof(float); ++b)
- {
- res.emplace_back(arr[b]);
- }
- }
-
- return res;
-}
-
-std::vector<uint8_t> GaussianInt32DataChef::generate(int32_t count) const
-{
- // TODO Support seed value override
- int seed = std::chrono::system_clock::now().time_since_epoch().count();
-
- std::minstd_rand rand{seed};
- std::normal_distribution<float> dist{_mean, _stddev};
-
- std::vector<uint8_t> res;
-
- for (uint32_t n = 0; n < count; ++n)
- {
- auto const value = static_cast<int32_t>(dist(rand));
- auto const arr = reinterpret_cast<const uint8_t *>(&value);
-
- for (uint32_t b = 0; b < sizeof(int32_t); ++b)
- {
- res.emplace_back(arr[b]);
- }
- }
-
- return res;
-}
-
-std::vector<uint8_t> GaussianUint8DataChef::generate(int32_t count) const
-{
- // TODO Support seed value override
- int seed = std::chrono::system_clock::now().time_since_epoch().count();
-
- std::minstd_rand rand{seed};
- std::normal_distribution<float> dist{_mean, _stddev};
-
- std::vector<uint8_t> res;
-
- for (uint32_t n = 0; n < count; ++n)
- {
- auto const value = static_cast<uint8_t>(dist(rand)); // uint8_t for data type
- auto const arr = reinterpret_cast<const uint8_t *>(&value); // uint8_t for byte streaming
-
- for (uint32_t b = 0; b < sizeof(uint8_t); ++b)
- {
- res.emplace_back(arr[b]);
- }
- }
-
- return res;
-}
-
-std::unique_ptr<DataChef> GaussianFloat32DataChefFactory::create(const Arguments &args) const
-{
- if (args.count() != 2)
- {
- throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
- }
-
- auto const mean = to_number<float>(args.value(0));
- auto const stddev = to_number<float>(args.value(1));
-
- return std::unique_ptr<DataChef>{new GaussianFloat32DataChef{mean, stddev}};
-}
-
-std::unique_ptr<DataChef> GaussianInt32DataChefFactory::create(const Arguments &args) const
-{
- if (args.count() != 2)
- {
- throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
- }
-
- auto const mean = to_number<float>(args.value(0));
- auto const stddev = to_number<float>(args.value(1));
-
- return std::unique_ptr<DataChef>{new GaussianInt32DataChef{mean, stddev}};
-}
-
-std::unique_ptr<DataChef> GaussianUint8DataChefFactory::create(const Arguments &args) const
-{
- if (args.count() != 2)
- {
- throw std::runtime_error{"invalid argument count: two arguments (mean/stddev) are expected"};
- }
-
- auto const mean = to_number<float>(args.value(0));
- auto const stddev = to_number<float>(args.value(1));
-
- return std::unique_ptr<DataChef>{new GaussianUint8DataChef{mean, stddev}};
-}
diff --git a/compiler/tflchef/core/src/Data/Gaussian.h b/compiler/tflchef/core/src/Data/Gaussian.h
deleted file mode 100644
index 81a28d2d1..000000000
--- a/compiler/tflchef/core/src/Data/Gaussian.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __GAUSSIAN_FILLER_H__
-#define __GAUSSIAN_FILLER_H__
-
-#include "DataChef.h"
-
-/**
- * @brief Generate a sequence of random values according to the gaussian(=normal) distribution
- */
-class GaussianFloat32DataChef final : public DataChef
-{
-public:
- GaussianFloat32DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
- {
- // DO NOTHING
- }
-
-public:
- std::vector<uint8_t> generate(int32_t count) const override;
-
-private:
- float _mean;
- float _stddev;
-};
-
-class GaussianInt32DataChef final : public DataChef
-{
-public:
- GaussianInt32DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
- {
- // DO NOTHING
- }
-
-public:
- std::vector<uint8_t> generate(int32_t count) const override;
-
-private:
- float _mean;
- float _stddev;
-};
-
-class GaussianUint8DataChef final : public DataChef
-{
-public:
- GaussianUint8DataChef(float mean, float stddev) : _mean{mean}, _stddev{stddev}
- {
- // DO NOTHING
- }
-
-public:
- std::vector<uint8_t> generate(int32_t count) const override;
-
-private:
- float _mean;
- float _stddev;
-};
-
-struct GaussianFloat32DataChefFactory : public DataChefFactory
-{
- std::unique_ptr<DataChef> create(const Arguments &args) const;
-};
-
-struct GaussianInt32DataChefFactory : public DataChefFactory
-{
- std::unique_ptr<DataChef> create(const Arguments &args) const;
-};
-
-struct GaussianUint8DataChefFactory : public DataChefFactory
-{
- std::unique_ptr<DataChef> create(const Arguments &args) const;
-};
-
-#endif // __GAUSSIAN_FILLER_H__
diff --git a/compiler/tflchef/core/src/DataChef.def b/compiler/tflchef/core/src/DataChef.def
deleted file mode 100644
index 89d34a202..000000000
--- a/compiler/tflchef/core/src/DataChef.def
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef DATA_CHEF
-#error "Define DATA_CHEF first"
-#endif // DATA_CHEF
-
-// DATA_CHEF(TYPE, NAME, FACTORY_CLASS)
-// "TYPE" SHOULD BE an enum tag of tflchef::TensorType
-DATA_CHEF(FLOAT32, constant, ConstantDataChefFactory<float>)
-DATA_CHEF(BOOL, constant, ConstantDataChefFactory<bool>)
-DATA_CHEF(INT32, explicit, ExplicitDataChefFactory<int>)
-DATA_CHEF(UINT8, explicit, ExplicitDataChefFactory<uint8_t>)
-DATA_CHEF(BOOL, explicit, ExplicitDataChefFactory<bool>)
-DATA_CHEF(FLOAT32, explicit, ExplicitDataChefFactory<float>)
-DATA_CHEF(FLOAT32, gaussian, GaussianFloat32DataChefFactory)
-DATA_CHEF(INT32, gaussian, GaussianInt32DataChefFactory)
-DATA_CHEF(UINT8, gaussian, GaussianUint8DataChefFactory)
diff --git a/compiler/tflchef/core/src/DataChef.h b/compiler/tflchef/core/src/DataChef.h
deleted file mode 100644
index d0571028a..000000000
--- a/compiler/tflchef/core/src/DataChef.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __DATA_CHEF_H__
-#define __DATA_CHEF_H__
-
-#include "Arguments.h"
-
-#include <cstdint>
-#include <memory>
-#include <vector>
-
-using Data = std::vector<uint8_t>;
-
-/**
- * @brief Data Generator
- */
-struct DataChef
-{
- virtual ~DataChef() = default;
-
- // TODO Allow users to query the type of elements that this DataChef generates
-
- /**
- * @brief Generate a sequence of 'count' elements as a byte sequence
- *
- * Let D be the return value of generate(N).
- * Then, D.size() == N * sizeof(T) where T is the element type.
- */
- virtual Data generate(int32_t count) const = 0;
-};
-
-/**
- * @brief Data Generator Factory
- */
-struct DataChefFactory
-{
- virtual ~DataChefFactory() = default;
-
- virtual std::unique_ptr<DataChef> create(const Arguments &args) const = 0;
-};
-
-#endif // __DATA_CHEF_H__
diff --git a/compiler/tflchef/core/src/DataChefs.h b/compiler/tflchef/core/src/DataChefs.h
deleted file mode 100644
index 2310ae89d..000000000
--- a/compiler/tflchef/core/src/DataChefs.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __DATA_CHEFS_H__
-#define __DATA_CHEFS_H__
-
-#include "Data/Constant.h"
-#include "Data/Explicit.h"
-#include "Data/Gaussian.h"
-
-#endif // __DATA_CHEFS_H__
diff --git a/compiler/tflchef/core/src/Dataset.h b/compiler/tflchef/core/src/Dataset.h
deleted file mode 100644
index 9d5c7a43f..000000000
--- a/compiler/tflchef/core/src/Dataset.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __DATASET_H__
-#define __DATASET_H__
-
-#include <vector>
-
-template <typename T> class Dataset
-{
-public:
- Dataset(const std::vector<T> &vec) : _vec{vec}
- {
- // DO NOTHING
- }
-
-public:
- Dataset(std::vector<T> &&vec) : _vec{std::move(vec)}
- {
- // DO NOTHING
- }
-
-public:
- template <typename Func> auto map(Func f) const -> Dataset<decltype(f(std::declval<T>()))>
- {
- using U = decltype(f(std::declval<T>()));
- std::vector<U> res;
-
- for (const auto &elem : _vec)
- {
- res.emplace_back(f(elem));
- }
-
- return Dataset<U>(std::move(res));
- }
-
-public:
- const std::vector<T> &vectorize(void) const { return _vec; }
-
-private:
- std::vector<T> _vec;
-};
-
-#endif // __DATASET_H__
diff --git a/compiler/tflchef/core/src/LexicalCast.cpp b/compiler/tflchef/core/src/LexicalCast.cpp
deleted file mode 100644
index 38a5f9290..000000000
--- a/compiler/tflchef/core/src/LexicalCast.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LexicalCast.h"
-
-#include <cassert>
-#include <limits>
-
-template <> float to_number(const std::string &s) { return std::stof(s); }
-template <> int to_number(const std::string &s) { return std::stoi(s); }
-template <> uint8_t to_number(const std::string &s)
-{
- int temp = std::stoi(s);
- assert(temp >= 0);
- assert(temp <= std::numeric_limits<uint8_t>::max());
- return static_cast<uint8_t>(temp);
-}
-template <> bool to_number(const std::string &s)
-{
- if (std::stoi(s) || s == "T" || s == "t" || s == "TRUE" || s == "true")
- return true;
- return false;
-}
diff --git a/compiler/tflchef/core/src/LexicalCast.h b/compiler/tflchef/core/src/LexicalCast.h
deleted file mode 100644
index 4aeccb482..000000000
--- a/compiler/tflchef/core/src/LexicalCast.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @brief This file provides string <-> number cast helpers
- */
-#ifndef __LEXICAL_CAST_H__
-#define __LEXICAL_CAST_H__
-
-#include <string>
-
-/**
- * @brief Return a numeric value that corresponds to a given string
- *
- * @note This function will throw an exception on casting failure
- */
-template <typename Number> Number to_number(const std::string &s);
-
-#endif // __LEXICAL_CAST_H__
diff --git a/compiler/tflchef/core/src/ModelChef.cpp b/compiler/tflchef/core/src/ModelChef.cpp
index 2c69efd4b..932a649c5 100644
--- a/compiler/tflchef/core/src/ModelChef.cpp
+++ b/compiler/tflchef/core/src/ModelChef.cpp
@@ -15,17 +15,19 @@
*/
#include "tflchef/ModelChef.h"
-#include "Arguments.h"
+#include <souschef/RangedArguments.h>
+#include <souschef/Registry.h>
#include "Convert.h"
-#include "DataChef.h"
-#include "DataChefs.h"
+#include <souschef/DataChefs.h>
#include "OpChef.h"
#include "OpChefs.h"
-#include "Dataset.h"
+#include <souschef/Dataset.h>
+
+#include "Log.h"
#include <iterator>
#include <map>
@@ -42,34 +44,7 @@
namespace
{
-template <typename InputIt> class RangedArguments : public Arguments
-{
-public:
- RangedArguments(InputIt beg, InputIt end) : _beg{beg}, _end{end}
- {
- // DO NOTHING
- }
-
-public:
- uint32_t count(void) const override { return _end - _beg; }
-
-public:
- const std::string &value(uint32_t n) const override { return *(_beg + n); }
-
-private:
- InputIt _beg;
- InputIt _end;
-};
-
-template <typename InputIt> RangedArguments<InputIt> ranged_arguments(InputIt beg, InputIt end)
-{
- return RangedArguments<InputIt>{beg, end};
-}
-
-} // namespace
-
-namespace
-{
+using namespace souschef;
template <typename T> std::vector<T> as_vector(const ::google::protobuf::RepeatedPtrField<T> &field)
{
@@ -147,20 +122,6 @@ private:
namespace
{
-template <typename T> class Registry
-{
-public:
- void add(const std::string &name, std::unique_ptr<T> &&entry)
- {
- _content[name] = std::move(entry);
- }
-
- const T &lookup(const std::string &name) const { return *(_content.at(name)); }
-
-private:
- std::map<std::string, std::unique_ptr<T>> _content;
-};
-
struct DataChefRegistry final : public Registry<DataChefFactory>
{
};
@@ -168,6 +129,7 @@ struct DataChefRegistry final : public Registry<DataChefFactory>
DataChefRegistry &data_chef_registry(const tflchef::TensorType &type)
{
static DataChefRegistry s32;
+ static DataChefRegistry s64;
static DataChefRegistry fp32;
static DataChefRegistry u8;
static DataChefRegistry boolean;
@@ -176,6 +138,8 @@ DataChefRegistry &data_chef_registry(const tflchef::TensorType &type)
{
case tflchef::INT32:
return s32;
+ case tflchef::INT64:
+ return s64;
case tflchef::FLOAT32:
return fp32;
case tflchef::UINT8:
@@ -199,14 +163,54 @@ OpChefRegistry &op_chef_registry(void)
return registry;
}
-// @brief This will prepare a set of unique operator codes in the mode recipe
-std::set<tflite::BuiltinOperator> gather_opcode_set(const ::tflchef::ModelRecipe &model_recipe)
+/// @brief This will prepare a map of unique builtin codes in the model recipe
+std::map<tflite::BuiltinOperator, int32_t>
+gather_builtincode_map(const ::tflchef::ModelRecipe &model_recipe)
+{
+ // Key and value of the map are BuiltinOperator and operator version
+ std::map<tflite::BuiltinOperator, int32_t> builtin_map;
+
+ for (const auto &operation : model_recipe.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
+ continue;
+
+ // Various operation version is unified as the highest version among them
+ if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
+ builtin_map[op_chef->code()] < operation.version())
+ builtin_map[op_chef->code()] = operation.version();
+ }
+
+ // Add ops used in Graphs(subgraphs)
+ for (int g = 0; g < model_recipe.graph_size(); ++g)
+ {
+ const auto &graph = model_recipe.graph(g);
+ for (const auto &operation : graph.operation())
+ {
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
+ continue;
+
+ // Various operation version is unified as the highest version among them
+ if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
+ builtin_map[op_chef->code()] < operation.version())
+ builtin_map[op_chef->code()] = operation.version();
+ }
+ }
+
+ return builtin_map;
+}
+
+/// @brief This will prepare a set of unique custom codes in the mode recipe
+std::set<std::string> gather_customcode_set(const ::tflchef::ModelRecipe &model_recipe)
{
- std::set<tflite::BuiltinOperator> opcode_set;
+ std::set<std::string> customcode_set;
for (const auto &operation : model_recipe.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
- opcode_set.insert(op_chef->code());
+ if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
+ customcode_set.insert(operation.type());
}
// Add ops used in Graphs(subgraphs)
@@ -216,525 +220,386 @@ std::set<tflite::BuiltinOperator> gather_opcode_set(const ::tflchef::ModelRecipe
for (const auto &operation : graph.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
- opcode_set.insert(op_chef->code());
+ if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
+ customcode_set.insert(operation.type());
}
}
- return opcode_set;
+ return customcode_set;
}
} // namespace
-namespace tflchef
+namespace
{
-/**
- * @brief Generate a (in-memory) TensorFlow Lite model from a given model recipe
- */
-GeneratedModel cook(const ::tflchef::ModelRecipe &model_recipe)
+struct CookParams
{
-// Initialize Op Chef Registry
-#define OP_CHEF(NAME, FACTORY_CLASS) \
- op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
-#include "OpChef.def"
-#undef OP_CHEF
+ std::vector<flatbuffers::Offset<::tflite::Buffer>> &buffer_vec;
+ std::vector<flatbuffers::Offset<::tflite::OperatorCode>> &code_vec;
+ std::vector<flatbuffers::Offset<::tflite::SubGraph>> &subgraph_vec;
+ std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder;
+ std::map<tflite::BuiltinOperator, int32_t> &builtin_code_map;
+ std::string noname;
+};
-// Initialize Data Chef Registry
-#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
- data_chef_registry(::tflchef::TYPE) \
- .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
-#include "DataChef.def"
-#undef DATA_CHEF
+template <typename T> void cook_graph(const T &graph, CookParams &cp)
+{
+ LOGGER(l);
- //
- // Create FlatBufferBuilder
- //
- auto flatbuffer_builder =
- std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
+ std::vector<flatbuffers::Offset<::tflite::Buffer>> &buffer_vec = cp.buffer_vec;
+ std::vector<flatbuffers::Offset<::tflite::OperatorCode>> &code_vec = cp.code_vec;
+ std::vector<flatbuffers::Offset<::tflite::SubGraph>> &subgraph_vec = cp.subgraph_vec;
+ std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder = cp.flatbuffer_builder;
+ std::map<tflite::BuiltinOperator, int32_t> &builtin_code_map = cp.builtin_code_map;
// Operand-related
- std::vector<flatbuffers::Offset<::tflite::Buffer>> buffer_vec;
+ std::vector<flatbuffers::Offset<::tflite::Tensor>> tensor_vec;
// Operation-related
- std::vector<flatbuffers::Offset<::tflite::OperatorCode>> code_vec;
+ std::vector<flatbuffers::Offset<::tflite::Operator>> operator_vec;
+
+ // default name for graph
+ std::string graph_name = cp.noname;
+ if (graph.has_name())
+ graph_name = graph.name();
+
+ // Tensor Name -> Tensor ID mapping (per Graph)
+ std::map<std::string, int32_t> symbol_table;
+
+ auto lookup = [&symbol_table, &graph_name](const std::string &name) {
+ if (symbol_table.find(name) != symbol_table.end())
+ return symbol_table.at(name);
+ else if (name == "")
+ return -1; // -1 in TFLite means that optional input tensor is empty.
+ else
+ {
+ std::string msg = "tflchef : input not found in " + graph_name + " graph";
+ throw std::runtime_error(msg.c_str());
+ }
+ };
- // Graphs-related
- std::vector<flatbuffers::Offset<::tflite::SubGraph>> subgraph_vec;
+ int32_t buffer_start = buffer_vec.size();
+ int32_t buffer_index = 0;
- // Create OperatorCode
- std::set<tflite::BuiltinOperator> opcode_set = gather_opcode_set(model_recipe);
- for (auto opcode : opcode_set)
+ // Create buffer(s) 1~n(I) for input(s)
+ const auto size_input = graph.input_size();
+ for (int ci = 0; ci < size_input; ++ci)
{
- tflite::OperatorCodeBuilder code_builder{*flatbuffer_builder};
- code_builder.add_builtin_code(opcode);
- auto code = code_builder.Finish();
- // Update OperatorCode vector
- code_vec.emplace_back(code);
+ tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
}
-
- // Create an Empty Buffer
- //
- // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
- // (Please refer to the comment for Tensor.buffer field in schema)
+ // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
+ const auto size_output = graph.output_size();
+ for (int co = 0; co < size_output; ++co)
{
tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
buffer_vec.emplace_back(buffer_builder.Finish());
}
- //
- // Create Main graph
- //
- {
- // Operand-related
- std::vector<flatbuffers::Offset<::tflite::Tensor>> tensor_vec;
+ auto input_names = as_dataset(graph.input()).vectorize();
+ auto output_names = as_dataset(graph.output()).vectorize();
- // Operation-related
- std::vector<flatbuffers::Offset<::tflite::Operator>> operator_vec;
-
- // Tensor Name -> Tensor ID mapping (per Graph)
- std::map<std::string, int32_t> symbol_table;
-
- auto lookup = [&symbol_table](const std::string &name) { return symbol_table.at(name); };
+ for (const auto &operand : graph.operand())
+ {
+ assert(operand.has_name());
- int32_t buffer_start = buffer_vec.size();
- int32_t buffer_index = 0;
+ assert(operand.has_type());
- // Create buffer(s) 1~n(I) for input(s)
- const auto size_input = model_recipe.input_size();
- for (int ci = 0; ci < size_input; ++ci)
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape;
+ std::vector<int32_t> dims;
+ if (operand.has_shape())
{
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_vec.emplace_back(buffer_builder.Finish());
- }
- // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
- const auto size_output = model_recipe.output_size();
- for (int co = 0; co < size_output; ++co)
- {
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_vec.emplace_back(buffer_builder.Finish());
+ dims = as_dims(operand.shape());
+ shape = flatbuffer_builder->CreateVector(dims);
}
- // default name for main graph
- std::string graph_name = "main";
- if (model_recipe.has_name())
- graph_name = model_recipe.name();
+ auto name = flatbuffer_builder->CreateString(operand.name());
- auto input_names = as_dataset(model_recipe.input()).vectorize();
- auto output_names = as_dataset(model_recipe.output()).vectorize();
+ buffer_index = 0;
- for (const auto &operand : model_recipe.operand())
+ // Create Buffer if filler is specified
+ if (operand.has_filler())
{
- assert(operand.has_name());
-
- assert(operand.has_type());
- assert(operand.has_shape());
-
- std::vector<int32_t> dims = as_dims(operand.shape());
-
- auto shape = flatbuffer_builder->CreateVector(dims);
- auto name = flatbuffer_builder->CreateString(operand.name());
-
- buffer_index = 0;
-
- // Create Buffer if filler is specified
- if (operand.has_filler())
- {
- const auto &filler = operand.filler();
+ const auto &filler = operand.filler();
- assert(filler.has_tag());
+ assert(filler.has_tag());
- auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
- auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
+ auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
+ auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
- assert(chef != nullptr);
+ assert(chef != nullptr);
- // Create Data
- auto data_vec = chef->generate(element_count(dims));
- auto data = flatbuffer_builder->CreateVector(data_vec);
+ // Create Data
+ int32_t count = (element_count(dims) > 0) ? element_count(dims) : filler.arg_size();
+ auto data_vec = chef->generate(count);
+ auto data = flatbuffer_builder->CreateVector(data_vec);
- // Create Buffer
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_builder.add_data(data);
- auto buffer = buffer_builder.Finish();
+ // Create Buffer
+ tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_builder.add_data(data);
+ auto buffer = buffer_builder.Finish();
- // Update Buffer Index & Vector
- buffer_index = buffer_vec.size();
- buffer_vec.emplace_back(buffer);
+ // Update Buffer Index & Vector
+ buffer_index = buffer_vec.size();
+ buffer_vec.emplace_back(buffer);
+ }
+ else
+ {
+ // if this is input or output, assign to that buffer_index
+ int idx = 0;
+ for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
+ {
+ if (*it == operand.name())
+ {
+ buffer_index = buffer_start + idx;
+ break;
+ }
}
- else
+ if (buffer_index == 0)
{
- // if this is input or output, assign to that buffer_index
- int idx = 0;
- for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
+ idx = 0;
+ for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
{
if (*it == operand.name())
{
- buffer_index = buffer_start + idx;
+ buffer_index = buffer_start + size_input + idx;
break;
}
}
- if (buffer_index == 0)
- {
- idx = 0;
- for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
- {
- if (*it == operand.name())
- {
- buffer_index = buffer_start + size_input + idx;
- break;
- }
- }
- }
}
-
- flatbuffers::Offset<tflite::QuantizationParameters> quant_index;
-
- // Create QuantizationParameters if quant is specified
- if (operand.has_quant())
+ if (buffer_index == 0)
{
- const auto &quant = operand.quant();
-
- // Create each parameters
- // NOTE if some parameters are not given, those will be set to default value
- std::vector<float> quant_max_vec(quant.max_size());
- std::vector<float> quant_min_vec(quant.min_size());
- std::vector<float> quant_scale_vec(quant.scale_size());
- std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
-
- for (uint32_t i = 0; i < quant.max_size(); ++i)
- quant_max_vec.at(i) = quant.max(i);
- for (uint32_t i = 0; i < quant.min_size(); ++i)
- quant_min_vec.at(i) = quant.min(i);
- for (uint32_t i = 0; i < quant.scale_size(); ++i)
- quant_scale_vec.at(i) = quant.scale(i);
- for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
- quant_zero_point_vec.at(i) = quant.zero_point(i);
-
- auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
- auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
- auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
- auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
-
- // Create QuantizationParameters
- tflite::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
- quant_builder.add_max(quant_max);
- quant_builder.add_min(quant_min);
- quant_builder.add_scale(quant_scale);
- quant_builder.add_zero_point(quant_zero_point);
-
- // Update QuantizationParameters Index
- quant_index = quant_builder.Finish();
- }
-
- // Create Tensor
- tflite::TensorBuilder tensor_builder{*flatbuffer_builder};
-
- tensor_builder.add_shape(shape);
- tensor_builder.add_type(as_tflite_tensortype(operand.type()));
- tensor_builder.add_buffer(buffer_index);
- tensor_builder.add_name(name);
- if (operand.has_quant())
- tensor_builder.add_quantization(quant_index);
-
- // Append!
- tensor_vec.emplace_back(tensor_builder.Finish());
-
- // Update Tensor Name -> Tensor Index Map
- int32_t tensor_index = symbol_table.size();
- const auto &tensor_name = operand.name();
+ // we couldn't find the buffer; create an empty buffer for this tensor
+ buffer_index = buffer_vec.size();
- symbol_table[tensor_name] = tensor_index;
+ tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
}
+ assert(buffer_index != 0);
- // Create Operator
- for (const auto &operation : model_recipe.operation())
- {
- assert(operation.has_type());
-
- auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
-
- // Create 'inputs'
- std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
- auto inputs = flatbuffer_builder->CreateVector(input_vec);
-
- // Create 'outputs'
- std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
- auto outputs = flatbuffer_builder->CreateVector(output_vec);
-
- // Create Option
- auto options = op_chef->value(*flatbuffer_builder);
-
- // Create Operator
- tflite::OperatorBuilder op_builder{*flatbuffer_builder};
-
- // Get operator code index from opcode_set with assumption, order of
- // opcode_set is same as that of code_vec
- auto op_it = opcode_set.find(op_chef->code());
- assert(op_it != opcode_set.end());
- uint32_t opcode_index = std::distance(opcode_set.begin(), op_it);
-
- op_builder.add_opcode_index(opcode_index);
- op_builder.add_inputs(inputs);
- op_builder.add_outputs(outputs);
- op_builder.add_builtin_options_type(op_chef->type());
- op_builder.add_builtin_options(options);
+ flatbuffers::Offset<tflite::QuantizationParameters> quant_index;
- // Append Operator
- operator_vec.emplace_back(op_builder.Finish());
+ // Create QuantizationParameters if quant is specified
+ if (operand.has_quant())
+ {
+ const auto &quant = operand.quant();
+
+ // Create each parameters
+ // NOTE if some parameters are not given, those will be set to default value
+ std::vector<float> quant_max_vec(quant.max_size());
+ std::vector<float> quant_min_vec(quant.min_size());
+ std::vector<float> quant_scale_vec(quant.scale_size());
+ std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
+
+ for (uint32_t i = 0; i < quant.max_size(); ++i)
+ quant_max_vec.at(i) = quant.max(i);
+ for (uint32_t i = 0; i < quant.min_size(); ++i)
+ quant_min_vec.at(i) = quant.min(i);
+ for (uint32_t i = 0; i < quant.scale_size(); ++i)
+ quant_scale_vec.at(i) = quant.scale(i);
+ for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
+ quant_zero_point_vec.at(i) = quant.zero_point(i);
+
+ auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
+ auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
+ auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
+ auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
+
+ // Create QuantizationParameters
+ tflite::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
+ quant_builder.add_max(quant_max);
+ quant_builder.add_min(quant_min);
+ quant_builder.add_scale(quant_scale);
+ quant_builder.add_zero_point(quant_zero_point);
+
+ // Update QuantizationParameters Index
+ quant_index = quant_builder.Finish();
}
- // Create network input/output vector
- std::vector<int32_t> input_vec = as_dataset(model_recipe.input()).map(lookup).vectorize();
- std::vector<int32_t> output_vec = as_dataset(model_recipe.output()).map(lookup).vectorize();
+ // Create Tensor
+ tflite::TensorBuilder tensor_builder{*flatbuffer_builder};
- // Create "SubGraph" arguments
- auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
- auto inputs = flatbuffer_builder->CreateVector(input_vec);
- auto outputs = flatbuffer_builder->CreateVector(output_vec);
- auto operators = flatbuffer_builder->CreateVector(operator_vec);
- auto name = flatbuffer_builder->CreateString(graph_name);
+ tensor_builder.add_shape(shape);
+ tensor_builder.add_type(as_tflite_tensortype(operand.type()));
+ tensor_builder.add_buffer(buffer_index);
+ tensor_builder.add_name(name);
+ if (operand.has_quant())
+ tensor_builder.add_quantization(quant_index);
+
+ // Append!
+ tensor_vec.emplace_back(tensor_builder.Finish());
- tflite::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
+ // Update Tensor Name -> Tensor Index Map
+ int32_t tensor_index = symbol_table.size();
+ const auto &tensor_name = operand.name();
- subgraph_builder.add_tensors(tensors);
- subgraph_builder.add_inputs(inputs);
- subgraph_builder.add_outputs(outputs);
- subgraph_builder.add_operators(operators);
- subgraph_builder.add_name(name);
+ INFO(l) << "Symbol [" << tensor_name << "] = Tensor " << tensor_index << std::endl;
- subgraph_vec.emplace_back(subgraph_builder.Finish());
+ symbol_table[tensor_name] = tensor_index;
}
- //
- // Create subgraphs if exist
- // TODO refactor main graph and subgraphs generation to reduce duplicate codes
- //
- for (int g = 0; g < model_recipe.graph_size(); ++g)
+ // Create Operator
+ for (const auto &operation : graph.operation())
{
- // Operand-related
- std::vector<flatbuffers::Offset<::tflite::Tensor>> tensor_vec;
-
- // Operation-related
- std::vector<flatbuffers::Offset<::tflite::Operator>> operator_vec;
-
- // Tensor Name -> Tensor ID mapping (per Graph)
- std::map<std::string, int32_t> symbol_table;
-
- auto lookup = [&symbol_table](const std::string &name) { return symbol_table.at(name); };
-
- const auto &graph = model_recipe.graph(g);
-
- int32_t buffer_start = buffer_vec.size();
- int32_t buffer_index = 0;
-
- // Create buffer(s) for input(s)
- const auto size_input = graph.input_size();
- for (int ci = 0; ci < size_input; ++ci)
- {
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_vec.emplace_back(buffer_builder.Finish());
- }
- // Create buffer(s) for output(s)
- const auto size_output = graph.output_size();
- for (int co = 0; co < size_output; ++co)
- {
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_vec.emplace_back(buffer_builder.Finish());
- }
-
- // default name for sub graph
- // TODO naming rule here may have conflit if recipe file provides it.
- // fix this when this happens.
- std::ostringstream stringStream;
- stringStream << "sub_" << (g + 1);
- std::string graph_name = stringStream.str();
- if (graph.has_name())
- graph_name = graph.name();
+ assert(operation.has_type());
- auto input_names = as_dataset(graph.input()).vectorize();
- auto output_names = as_dataset(graph.output()).vectorize();
-
- for (const auto &operand : graph.operand())
- {
- assert(operand.has_name());
-
- assert(operand.has_type());
- assert(operand.has_shape());
-
- std::vector<int32_t> dims = as_dims(operand.shape());
-
- auto shape = flatbuffer_builder->CreateVector(dims);
- auto name = flatbuffer_builder->CreateString(operand.name());
+ auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
- // Create Buffer if filler is specified
- if (operand.has_filler())
- {
- const auto &filler = operand.filler();
+ // Create 'inputs'
+ std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
+ auto inputs = flatbuffer_builder->CreateVector(input_vec);
- assert(filler.has_tag());
+ // Create 'outputs'
+ std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
+ auto outputs = flatbuffer_builder->CreateVector(output_vec);
- auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
- auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
+ // Create Option
+ auto options = op_chef->value(*flatbuffer_builder);
- assert(chef != nullptr);
+ // Create Custom option
+ auto circle_custom_options = op_chef->custom_value(*flatbuffer_builder);
- // Create Data
- auto data_vec = chef->generate(element_count(dims));
- auto data = flatbuffer_builder->CreateVector(data_vec);
+ // Create Operator
+ tflite::OperatorBuilder op_builder{*flatbuffer_builder};
+
+ // Get operator code index from builtin_code_set with assumption, order of
+ // builtin_code_set is same as that of code_vec
+ auto op_it = builtin_code_map.find(op_chef->code());
+ assert(op_it != builtin_code_map.end());
+ uint32_t opcode_index = std::distance(builtin_code_map.begin(), op_it);
+
+ op_builder.add_opcode_index(opcode_index);
+ op_builder.add_inputs(inputs);
+ op_builder.add_outputs(outputs);
+ op_builder.add_builtin_options_type(op_chef->type());
+ op_builder.add_builtin_options(options);
+ op_builder.add_custom_options(circle_custom_options);
+ op_builder.add_custom_options_format(tflite::CustomOptionsFormat_FLEXBUFFERS);
+ // Append Operator
+ operator_vec.emplace_back(op_builder.Finish());
+ }
- // Create Buffer
- tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
- buffer_builder.add_data(data);
- auto buffer = buffer_builder.Finish();
+ // Create network input/output vector
+ std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
+ std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
- // Update Buffer Index & Vector
- buffer_index = buffer_vec.size();
- buffer_vec.emplace_back(buffer);
- }
- else
- {
- // if this is input or output, assign to that buffer_index
- int idx = 0;
- buffer_index = 0;
- for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
- {
- if (*it == operand.name())
- {
- buffer_index = buffer_start + idx;
- break;
- }
- }
- if (buffer_index == 0)
- {
- idx = 0;
- for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
- {
- if (*it == operand.name())
- {
- buffer_index = buffer_start + size_input + idx;
- break;
- }
- }
- }
- }
- // NOTE buffer_index can be 0 when this operand does not have a filler or not I/O
+ // Create "SubGraph" arguments
+ auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
+ auto inputs = flatbuffer_builder->CreateVector(input_vec);
+ auto outputs = flatbuffer_builder->CreateVector(output_vec);
+ auto operators = flatbuffer_builder->CreateVector(operator_vec);
+ auto name = flatbuffer_builder->CreateString(graph_name);
- flatbuffers::Offset<tflite::QuantizationParameters> quant_index;
+ tflite::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
- // Create QuantizationParameters if quant is specified
- if (operand.has_quant())
- {
- const auto &quant = operand.quant();
-
- // Create each parameters
- // NOTE if some parameters are not given, those will be set to default value
- std::vector<float> quant_max_vec(quant.max_size());
- std::vector<float> quant_min_vec(quant.min_size());
- std::vector<float> quant_scale_vec(quant.scale_size());
- std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
-
- for (uint32_t i = 0; i < quant.max_size(); ++i)
- quant_max_vec.at(i) = quant.max(i);
- for (uint32_t i = 0; i < quant.min_size(); ++i)
- quant_min_vec.at(i) = quant.min(i);
- for (uint32_t i = 0; i < quant.scale_size(); ++i)
- quant_scale_vec.at(i) = quant.scale(i);
- for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
- quant_zero_point_vec.at(i) = quant.zero_point(i);
-
- auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
- auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
- auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
- auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
-
- // Create QuantizationParameters
- tflite::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
- quant_builder.add_max(quant_max);
- quant_builder.add_min(quant_min);
- quant_builder.add_scale(quant_scale);
- quant_builder.add_zero_point(quant_zero_point);
-
- // Update QuantizationParameters Index
- quant_index = quant_builder.Finish();
- }
+ subgraph_builder.add_tensors(tensors);
+ subgraph_builder.add_inputs(inputs);
+ subgraph_builder.add_outputs(outputs);
+ subgraph_builder.add_operators(operators);
+ subgraph_builder.add_name(name);
- // Create Tensor
- tflite::TensorBuilder tensor_builder{*flatbuffer_builder};
+ subgraph_vec.emplace_back(subgraph_builder.Finish());
+}
- tensor_builder.add_shape(shape);
- tensor_builder.add_type(as_tflite_tensortype(operand.type()));
- tensor_builder.add_buffer(buffer_index);
- tensor_builder.add_name(name);
- if (operand.has_quant())
- tensor_builder.add_quantization(quant_index);
+} // namespace
- // Append!
- tensor_vec.emplace_back(tensor_builder.Finish());
+namespace tflchef
+{
- // Update Tensor Name -> Tensor Index Map
- int32_t tensor_index = symbol_table.size();
- const auto &tensor_name = operand.name();
+/**
+ * @brief Generate a (in-memory) TensorFlow Lite model from a given model recipe
+ */
+GeneratedModel cook(const ::tflchef::ModelRecipe &model_recipe)
+{
+// Initialize Op Chef Registry
+#define OP_CHEF(NAME, FACTORY_CLASS) \
+ op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
+#include "OpChef.def"
+#undef OP_CHEF
- symbol_table[tensor_name] = tensor_index;
- }
+// Initialize Data Chef Registry
+#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
+ data_chef_registry(::tflchef::TYPE) \
+ .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
+#include <souschef/DataChef.def>
+#undef DATA_CHEF
- // Create Operator
- for (const auto &operation : graph.operation())
- {
- assert(operation.has_type());
+ //
+ // Create FlatBufferBuilder
+ //
+ auto flatbuffer_builder =
+ std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
- auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
+ // Operand-related
+ std::vector<flatbuffers::Offset<::tflite::Buffer>> buffer_vec;
- // Create 'inputs'
- std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
- auto inputs = flatbuffer_builder->CreateVector(input_vec);
+ // Operation-related
+ std::vector<flatbuffers::Offset<::tflite::OperatorCode>> code_vec;
- // Create 'outputs'
- std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
- auto outputs = flatbuffer_builder->CreateVector(output_vec);
+ // Graphs-related
+ std::vector<flatbuffers::Offset<::tflite::SubGraph>> subgraph_vec;
- // Create Option
- auto options = op_chef->value(*flatbuffer_builder);
+ // Create OperatorCode with Builtin Operator
+ auto builtin_code_map = gather_builtincode_map(model_recipe);
+ for (auto const &opcode : builtin_code_map)
+ {
+ tflite::OperatorCodeBuilder code_builder{*flatbuffer_builder};
+ code_builder.add_builtin_code(opcode.first);
+ code_builder.add_version(opcode.second);
+ auto code = code_builder.Finish();
+ // Update OperatorCode vector
+ code_vec.emplace_back(code);
+ }
- // Create Operator
- tflite::OperatorBuilder op_builder{*flatbuffer_builder};
+ // Create OperatorCode with Custom Operator
+ std::set<std::string> custom_code_set = gather_customcode_set(model_recipe);
+ if (custom_code_set.size() &&
+ builtin_code_map.find(tflite::BuiltinOperator_CUSTOM) == builtin_code_map.end())
+ builtin_code_map[tflite::BuiltinOperator_CUSTOM] = 1;
- // Get operator code index from opcode_set with assumption, order of
- // opcode_set is same as that of code_vec
- auto op_it = opcode_set.find(op_chef->code());
- assert(op_it != opcode_set.end());
- uint32_t opcode_index = std::distance(opcode_set.begin(), op_it);
+ for (auto opcode : custom_code_set)
+ {
+ auto custom_code = flatbuffer_builder->CreateString(opcode);
+ tflite::OperatorCodeBuilder code_builder{*flatbuffer_builder};
+ code_builder.add_builtin_code(tflite::BuiltinOperator_CUSTOM);
+ code_builder.add_custom_code(custom_code);
+ auto code = code_builder.Finish();
+ // Update OperatorCode vector
+ code_vec.emplace_back(code);
+ }
- op_builder.add_opcode_index(opcode_index);
- op_builder.add_inputs(inputs);
- op_builder.add_outputs(outputs);
- op_builder.add_builtin_options_type(op_chef->type());
- op_builder.add_builtin_options(options);
+ // Create an Empty Buffer
+ //
+ // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
+ // (Please refer to the comment for Tensor.buffer field in schema)
+ {
+ tflite::BufferBuilder buffer_builder{*flatbuffer_builder};
+ buffer_vec.emplace_back(buffer_builder.Finish());
+ }
- // Append Operator
- operator_vec.emplace_back(op_builder.Finish());
- }
+ //
+ // Create Main graph
+ //
+ CookParams cp{buffer_vec, code_vec, subgraph_vec, flatbuffer_builder, builtin_code_map, "main"};
- // Create network input/output vector
- std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
- std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
+ cook_graph<::tflchef::ModelRecipe>(model_recipe, cp);
- // Create "SubGraph" arguments
- auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
- auto inputs = flatbuffer_builder->CreateVector(input_vec);
- auto outputs = flatbuffer_builder->CreateVector(output_vec);
- auto operators = flatbuffer_builder->CreateVector(operator_vec);
- auto name = flatbuffer_builder->CreateString(graph_name);
+ //
+ // Create subgraphs if exist
+ //
+ for (int g = 0; g < model_recipe.graph_size(); ++g)
+ {
+ const auto &graph = model_recipe.graph(g);
- tflite::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
+ std::ostringstream stringStream;
+ stringStream << "sub_" << (g + 1);
- subgraph_builder.add_tensors(tensors);
- subgraph_builder.add_inputs(inputs);
- subgraph_builder.add_outputs(outputs);
- subgraph_builder.add_operators(operators);
- subgraph_builder.add_name(name);
+ CookParams cp{buffer_vec, code_vec, subgraph_vec,
+ flatbuffer_builder, builtin_code_map, stringStream.str()};
- subgraph_vec.emplace_back(subgraph_builder.Finish());
+ cook_graph<::tflchef::Graph>(graph, cp);
}
// Create "Model" arguments
diff --git a/compiler/tflchef/core/src/Op/AddN.cpp b/compiler/tflchef/core/src/Op/AddN.cpp
new file mode 100644
index 000000000..2ac02d219
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/AddN.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AddN.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> AddNChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::AddNOptionsBuilder add_n_options_builder{fbb};
+
+ return add_n_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> AddNChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new AddNChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/AddN.h b/compiler/tflchef/core/src/Op/AddN.h
new file mode 100644
index 000000000..44dcc63ef
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/AddN.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ADD_N_H__
+#define __OP_ADD_N_H__
+
+#include "OpChef.h"
+
+class AddNChef final : public OpChef
+{
+public:
+ explicit AddNChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ADD_N; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_AddNOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct AddNChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ADD_N_H__
diff --git a/compiler/tflchef/core/src/Op/ArgMin.cpp b/compiler/tflchef/core/src/Op/ArgMin.cpp
new file mode 100644
index 000000000..b599270b0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ArgMin.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ArgMin.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ArgMinChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_argmin_options());
+
+ auto tflite_output_type = as_tflite_tensortype(operation.argmin_options().output_type());
+
+ tflite::ArgMinOptionsBuilder argmin_options_builder{fbb};
+ argmin_options_builder.add_output_type(tflite_output_type);
+
+ return argmin_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ArgMinChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ArgMinChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ArgMin.h b/compiler/tflchef/core/src/Op/ArgMin.h
new file mode 100644
index 000000000..222039f91
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ArgMin.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ARGMIN_H__
+#define __OP_ARGMIN_H__
+
+#include "OpChef.h"
+
+class ArgMinChef final : public OpChef
+{
+public:
+ explicit ArgMinChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ARG_MIN; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ArgMinOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ArgMinChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ARGMIN_H__
diff --git a/compiler/tflchef/core/src/Op/BatchMatMul.cpp b/compiler/tflchef/core/src/Op/BatchMatMul.cpp
new file mode 100644
index 000000000..7722bcc5e
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/BatchMatMul.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMul.h"
+
+flatbuffers::Offset<void> BatchMatMulChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_batch_matmul_options());
+
+ tflite::BatchMatMulOptionsBuilder batch_matmul_options_options_builder{fbb};
+ batch_matmul_options_options_builder.add_adj_x(operation.batch_matmul_options().adj_x());
+ batch_matmul_options_options_builder.add_adj_y(operation.batch_matmul_options().adj_y());
+
+ return batch_matmul_options_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> BatchMatMulChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new BatchMatMulChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/BatchMatMul.h b/compiler/tflchef/core/src/Op/BatchMatMul.h
new file mode 100644
index 000000000..eaf943cb0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/BatchMatMul.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_BATCH_MATMUL_H__
+#define __OP_BATCH_MATMUL_H__
+
+#include "OpChef.h"
+
+class BatchMatMulChef final : public OpChef
+{
+public:
+ explicit BatchMatMulChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_BATCH_MATMUL; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_BatchMatMulOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct BatchMatMulChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_BATCH_MATMUL_H__
diff --git a/compiler/tflchef/core/src/Op/Cast.cpp b/compiler/tflchef/core/src/Op/Cast.cpp
new file mode 100644
index 000000000..1a29f9ac4
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Cast.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Cast.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> CastChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ if (!operation.has_cast_options())
+ return 0;
+
+ auto tflite_in_data_type = as_tflite_tensortype(operation.cast_options().in_data_type());
+ auto tflite_out_data_type = as_tflite_tensortype(operation.cast_options().out_data_type());
+
+ tflite::CastOptionsBuilder cast_options_builder{fbb};
+ cast_options_builder.add_in_data_type(tflite_in_data_type);
+ cast_options_builder.add_out_data_type(tflite_out_data_type);
+
+ return cast_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> CastChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new CastChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Cast.h b/compiler/tflchef/core/src/Op/Cast.h
new file mode 100644
index 000000000..84c8e29e4
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Cast.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CAST_H__
+#define __OP_CAST_H__
+
+#include "OpChef.h"
+
+class CastChef final : public OpChef
+{
+public:
+ explicit CastChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CAST; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_CastOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct CastChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_CAST_H__
diff --git a/compiler/tflchef/core/src/Op/Ceil.cpp b/compiler/tflchef/core/src/Op/Ceil.cpp
new file mode 100644
index 000000000..3da047727
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Ceil.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Ceil.h"
+
+flatbuffers::Offset<void> CeilChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Ceil. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> CeilChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new CeilChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Ceil.h b/compiler/tflchef/core/src/Op/Ceil.h
new file mode 100644
index 000000000..5a42b7f00
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Ceil.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CEIL_H__
+#define __OP_CEIL_H__
+
+#include "OpChef.h"
+
+class CeilChef final : public OpChef
+{
+public:
+ explicit CeilChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CEIL; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct CeilChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_CEIL_H__
diff --git a/compiler/tflchef/core/src/Op/Conv2D.cpp b/compiler/tflchef/core/src/Op/Conv2D.cpp
index d99c53351..0b3ef8a6d 100644
--- a/compiler/tflchef/core/src/Op/Conv2D.cpp
+++ b/compiler/tflchef/core/src/Op/Conv2D.cpp
@@ -25,14 +25,18 @@ flatbuffers::Offset<void> Conv2DChef::value(flatbuffers::FlatBufferBuilder &fbb)
assert(operation.has_conv2d_options());
- auto tflite_padding = as_tflite_padding(operation.conv2d_options().padding());
- auto tflite_activation = as_tflite_activation(operation.conv2d_options().activation());
+ const auto &conv2d_options = operation.conv2d_options();
+
+ auto tflite_padding = as_tflite_padding(conv2d_options.padding());
+ auto tflite_activation = as_tflite_activation(conv2d_options.activation());
tflite::Conv2DOptionsBuilder conv2d_options_builder{fbb};
conv2d_options_builder.add_padding(tflite_padding);
- conv2d_options_builder.add_stride_h(operation.conv2d_options().stride_h());
- conv2d_options_builder.add_stride_w(operation.conv2d_options().stride_w());
+ conv2d_options_builder.add_stride_h(conv2d_options.stride_h());
+ conv2d_options_builder.add_stride_w(conv2d_options.stride_w());
conv2d_options_builder.add_fused_activation_function(tflite_activation);
+ conv2d_options_builder.add_dilation_w_factor(conv2d_options.dilation_w_factor());
+ conv2d_options_builder.add_dilation_h_factor(conv2d_options.dilation_h_factor());
return conv2d_options_builder.Finish().Union();
}
diff --git a/compiler/tflchef/core/src/Op/DepthToSpace.cpp b/compiler/tflchef/core/src/Op/DepthToSpace.cpp
new file mode 100644
index 000000000..f0531d98d
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/DepthToSpace.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthToSpace.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> DepthToSpaceChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_depth_to_space_options());
+
+ auto options = operation.depth_to_space_options();
+
+ auto tflite_block_size = options.block_size();
+
+ tflite::DepthToSpaceOptionsBuilder options_builder{fbb};
+
+ options_builder.add_block_size(tflite_block_size);
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> DepthToSpaceChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new DepthToSpaceChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/DepthToSpace.h b/compiler/tflchef/core/src/Op/DepthToSpace.h
new file mode 100644
index 000000000..32cb24211
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/DepthToSpace.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_DEPTHTOSPACE_H__
+#define __OP_DEPTHTOSPACE_H__
+
+#include "OpChef.h"
+
+class DepthToSpaceChef final : public OpChef
+{
+public:
+ explicit DepthToSpaceChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_DEPTH_TO_SPACE;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_DepthToSpaceOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct DepthToSpaceChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_DEPTHTOSPACE_H__
diff --git a/compiler/tflchef/core/src/Op/DepthwiseConv2D.cpp b/compiler/tflchef/core/src/Op/DepthwiseConv2D.cpp
index e04cf50ff..5da5b63e4 100644
--- a/compiler/tflchef/core/src/Op/DepthwiseConv2D.cpp
+++ b/compiler/tflchef/core/src/Op/DepthwiseConv2D.cpp
@@ -36,6 +36,8 @@ flatbuffers::Offset<void> DepthwiseConv2DChef::value(flatbuffers::FlatBufferBuil
options_builder.add_stride_h(options.stride_h());
options_builder.add_depth_multiplier(options.depth_multiplier());
options_builder.add_fused_activation_function(tflite_activation);
+ options_builder.add_dilation_w_factor(options.dilation_w_factor());
+ options_builder.add_dilation_h_factor(options.dilation_h_factor());
return options_builder.Finish().Union();
}
diff --git a/compiler/tflchef/core/src/Op/ELU.cpp b/compiler/tflchef/core/src/Op/ELU.cpp
new file mode 100644
index 000000000..d9dae16af
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ELU.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ELU.h"
+
+flatbuffers::Offset<void> ELUChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> ELUChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ELUChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ELU.h b/compiler/tflchef/core/src/Op/ELU.h
new file mode 100644
index 000000000..e164c0071
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ELU.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ELU_H__
+#define __OP_ELU_H__
+
+#include "OpChef.h"
+
+class ELUChef final : public OpChef
+{
+public:
+ explicit ELUChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ELU; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ELUChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ELU_H__
diff --git a/compiler/tflchef/core/src/Op/ExpandDims.cpp b/compiler/tflchef/core/src/Op/ExpandDims.cpp
new file mode 100644
index 000000000..c6082811f
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ExpandDims.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExpandDims.h"
+#include "Convert.h"
+
+flatbuffers::Offset<void> ExpandDimsChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::ExpandDimsOptionsBuilder expand_dims_options_builder{fbb};
+
+ return expand_dims_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ExpandDimsChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ExpandDimsChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ExpandDims.h b/compiler/tflchef/core/src/Op/ExpandDims.h
new file mode 100644
index 000000000..1f4c34a98
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ExpandDims.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_EXPAND_DIMS_H__
+#define __OP_EXPAND_DIMS_H__
+
+#include "OpChef.h"
+
+class ExpandDimsChef final : public OpChef
+{
+public:
+ explicit ExpandDimsChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_EXPAND_DIMS; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ExpandDimsOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ExpandDimsChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_EXPAND_DIMS_H__
diff --git a/compiler/tflchef/core/src/Op/Fill.cpp b/compiler/tflchef/core/src/Op/Fill.cpp
new file mode 100644
index 000000000..4a6829459
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Fill.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Fill.h"
+
+#include <cassert>
+#include <vector>
+
+flatbuffers::Offset<void> FillChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::FillOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> FillChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new FillChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Fill.h b/compiler/tflchef/core/src/Op/Fill.h
new file mode 100644
index 000000000..60f9084c8
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Fill.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_FILL_H__
+#define __OP_FILL_H__
+
+#include "OpChef.h"
+
+class FillChef final : public OpChef
+{
+public:
+ explicit FillChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_FILL; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_FillOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct FillChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_FILL_H__
diff --git a/compiler/tflchef/core/src/Op/Floor.cpp b/compiler/tflchef/core/src/Op/Floor.cpp
new file mode 100644
index 000000000..8f6820152
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Floor.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Floor.h"
+
+flatbuffers::Offset<void> FloorChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Floor. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> FloorChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new FloorChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Floor.h b/compiler/tflchef/core/src/Op/Floor.h
new file mode 100644
index 000000000..23385d737
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Floor.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_FLOOR_H__
+#define __OP_FLOOR_H__
+
+#include "OpChef.h"
+
+class FloorChef final : public OpChef
+{
+public:
+ explicit FloorChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_FLOOR; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct FloorChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_FLOOR_H__
diff --git a/compiler/tflchef/core/src/Op/FloorMod.cpp b/compiler/tflchef/core/src/Op/FloorMod.cpp
new file mode 100644
index 000000000..d17795a72
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/FloorMod.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FloorMod.h"
+
+flatbuffers::Offset<void> FloorModChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::FloorModOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> FloorModChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new FloorModChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/FloorMod.h b/compiler/tflchef/core/src/Op/FloorMod.h
new file mode 100644
index 000000000..b501f61e0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/FloorMod.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_FLOOR_MOD_H__
+#define __OP_FLOOR_MOD_H__
+
+#include "OpChef.h"
+
+class FloorModChef final : public OpChef
+{
+public:
+ explicit FloorModChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_FLOOR_MOD; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_FloorModOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct FloorModChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_FLOOR_MOD_H__
diff --git a/compiler/tflchef/core/src/Op/Gather.cpp b/compiler/tflchef/core/src/Op/Gather.cpp
new file mode 100644
index 000000000..2b62c7be2
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Gather.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gather.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> GatherChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_gather_options());
+
+ auto options = operation.gather_options();
+
+ tflite::GatherOptionsBuilder options_builder{fbb};
+
+ options_builder.add_axis(options.axis());
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> GatherChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new GatherChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Gather.h b/compiler/tflchef/core/src/Op/Gather.h
new file mode 100644
index 000000000..d937178c8
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Gather.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_GATHER_H__
+#define __OP_GATHER_H__
+
+#include "OpChef.h"
+
+class GatherChef final : public OpChef
+{
+public:
+ explicit GatherChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_GATHER; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_GatherOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct GatherChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_GATHER_H__
diff --git a/compiler/tflchef/core/src/Op/GatherNd.cpp b/compiler/tflchef/core/src/Op/GatherNd.cpp
new file mode 100644
index 000000000..c04db5350
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/GatherNd.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GatherNd.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> GatherNdChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::GatherNdOptionsBuilder gather_nd_options_builder{fbb};
+ return gather_nd_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> GatherNdChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new GatherNdChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/GatherNd.h b/compiler/tflchef/core/src/Op/GatherNd.h
new file mode 100644
index 000000000..8865e7756
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/GatherNd.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_GATHER_ND_H__
+#define __OP_GATHER_ND_H__
+
+#include "OpChef.h"
+
+class GatherNdChef final : public OpChef
+{
+public:
+ explicit GatherNdChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_GATHER_ND; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_GatherNdOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct GatherNdChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_GATHER_ND_H__
diff --git a/compiler/tflchef/core/src/Op/Greater.cpp b/compiler/tflchef/core/src/Op/Greater.cpp
new file mode 100644
index 000000000..81765aee5
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Greater.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Greater.h"
+
+flatbuffers::Offset<void> GreaterChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::GreaterOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> GreaterChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new GreaterChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Greater.h b/compiler/tflchef/core/src/Op/Greater.h
new file mode 100644
index 000000000..c54eaa6cc
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Greater.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_GREATER_H__
+#define __OP_GREATER_H__
+
+#include "OpChef.h"
+
+class GreaterChef final : public OpChef
+{
+public:
+ explicit GreaterChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_GREATER; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_GreaterOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct GreaterChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_GREATER_H__
diff --git a/compiler/tflchef/core/src/Op/GreaterEqual.cpp b/compiler/tflchef/core/src/Op/GreaterEqual.cpp
new file mode 100644
index 000000000..80045f6aa
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/GreaterEqual.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GreaterEqual.h"
+
+flatbuffers::Offset<void> GreaterEqualChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::GreaterEqualOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> GreaterEqualChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new GreaterEqualChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/GreaterEqual.h b/compiler/tflchef/core/src/Op/GreaterEqual.h
new file mode 100644
index 000000000..105bac8a7
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/GreaterEqual.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_GREATEREQUAL_H__
+#define __OP_GREATEREQUAL_H__
+
+#include "OpChef.h"
+
+class GreaterEqualChef final : public OpChef
+{
+public:
+ explicit GreaterEqualChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_GREATER_EQUAL;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_GreaterEqualOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct GreaterEqualChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_GREATEREQUAL_H__
diff --git a/compiler/tflchef/core/src/Op/If.cpp b/compiler/tflchef/core/src/Op/If.cpp
new file mode 100644
index 000000000..b0e575e2b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/If.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "If.h"
+
+flatbuffers::Offset<void> IfChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_if_options());
+
+ tflite::IfOptionsBuilder if_options_builder{fbb};
+ if_options_builder.add_then_subgraph_index(operation.if_options().then_subgraph_index());
+ if_options_builder.add_else_subgraph_index(operation.if_options().else_subgraph_index());
+
+ return if_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> IfChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new IfChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/If.h b/compiler/tflchef/core/src/Op/If.h
new file mode 100644
index 000000000..7e18c5609
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/If.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_IF_H__
+#define __OP_IF_H__
+
+#include "OpChef.h"
+
+class IfChef final : public OpChef
+{
+public:
+ explicit IfChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_IF; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_IfOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct IfChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_IF_H__
diff --git a/compiler/tflchef/core/src/Op/L2Normalize.cpp b/compiler/tflchef/core/src/Op/L2Normalize.cpp
new file mode 100644
index 000000000..62d15e56e
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/L2Normalize.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Normalize.h"
+#include "Convert.h"
+
+flatbuffers::Offset<void> L2Normalize::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ tflite::L2NormOptionsBuilder l2norm_options_builder{fbb};
+ auto tflite_activation = as_tflite_activation(operation.l2norm_options().activation());
+ l2norm_options_builder.add_fused_activation_function(tflite_activation);
+ return l2norm_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> L2NormalizeChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new L2Normalize{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/L2Normalize.h b/compiler/tflchef/core/src/Op/L2Normalize.h
new file mode 100644
index 000000000..dd5f21cae
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/L2Normalize.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_L2NORMALIZE_H__
+#define __OP_L2NORMALIZE_H__
+
+#include "OpChef.h"
+
+class L2Normalize final : public OpChef
+{
+public:
+ explicit L2Normalize(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_L2_NORMALIZATION;
+ }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_L2NormOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct L2NormalizeChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_L2NORMALIZE_H__
diff --git a/compiler/tflchef/core/src/Op/L2Pool2D.cpp b/compiler/tflchef/core/src/Op/L2Pool2D.cpp
new file mode 100644
index 000000000..f22bb9642
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/L2Pool2D.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Pool2D.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> L2Pool2DChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_l2pool2d_options());
+
+ auto options = operation.l2pool2d_options();
+
+ auto tflite_padding = as_tflite_padding(options.padding());
+ auto tflite_activation = as_tflite_activation(options.activation());
+
+ tflite::Pool2DOptionsBuilder options_builder{fbb};
+ options_builder.add_padding(tflite_padding);
+ options_builder.add_stride_h(options.stride_h());
+ options_builder.add_stride_w(options.stride_w());
+ options_builder.add_filter_width(options.filter_width());
+ options_builder.add_filter_height(options.filter_height());
+ options_builder.add_fused_activation_function(tflite_activation);
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> L2Pool2DChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new L2Pool2DChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/L2Pool2D.h b/compiler/tflchef/core/src/Op/L2Pool2D.h
new file mode 100644
index 000000000..6bd8bdb4d
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/L2Pool2D.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_L2_POOL_2D_H__
+#define __OP_L2_POOL_2D_H__
+
+#include "OpChef.h"
+
+class L2Pool2DChef final : public OpChef
+{
+public:
+ explicit L2Pool2DChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_L2_POOL_2D; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_Pool2DOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct L2Pool2DChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_L2_POOL_2D_H__
diff --git a/compiler/tflchef/core/src/Op/LeakyRelu.cpp b/compiler/tflchef/core/src/Op/LeakyRelu.cpp
new file mode 100644
index 000000000..247739ac0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LeakyRelu.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LeakyRelu.h"
+
+flatbuffers::Offset<void> LeakyReluChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ assert(_operation->has_leaky_relu_options());
+
+ const auto &options = _operation->leaky_relu_options();
+
+ tflite::LeakyReluOptionsBuilder options_builder{fbb};
+ options_builder.add_alpha(options.alpha());
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> LeakyReluChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LeakyReluChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/LeakyRelu.h b/compiler/tflchef/core/src/Op/LeakyRelu.h
new file mode 100644
index 000000000..5449e593b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LeakyRelu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LEAKY_RELU_H__
+#define __OP_LEAKY_RELU_H__
+
+#include "OpChef.h"
+
+class LeakyReluChef final : public OpChef
+{
+public:
+ explicit LeakyReluChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LEAKY_RELU; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_LeakyReluOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LeakyReluChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LEAKY_RELU_H__
diff --git a/compiler/tflchef/core/src/Op/Less.cpp b/compiler/tflchef/core/src/Op/Less.cpp
new file mode 100644
index 000000000..c143d8332
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Less.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Less.h"
+
+flatbuffers::Offset<void> LessChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::LessOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> LessChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LessChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Less.h b/compiler/tflchef/core/src/Op/Less.h
new file mode 100644
index 000000000..280036c3a
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Less.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LESS_H__
+#define __OP_LESS_H__
+
+#include "OpChef.h"
+
+class LessChef final : public OpChef
+{
+public:
+ explicit LessChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LESS; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_LessOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LessChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LESS_H__
diff --git a/compiler/tflchef/core/src/Op/LessEqual.cpp b/compiler/tflchef/core/src/Op/LessEqual.cpp
new file mode 100644
index 000000000..dc383e785
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LessEqual.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LessEqual.h"
+
+flatbuffers::Offset<void> LessEqualChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::LessEqualOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> LessEqualChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LessEqualChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/LessEqual.h b/compiler/tflchef/core/src/Op/LessEqual.h
new file mode 100644
index 000000000..1315b9c53
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LessEqual.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LESSEQUAL_H__
+#define __OP_LESSEQUAL_H__
+
+#include "OpChef.h"
+
+class LessEqualChef final : public OpChef
+{
+public:
+ explicit LessEqualChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LESS_EQUAL; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_LessEqualOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LessEqualChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LESSEQUAL_H__
diff --git a/compiler/tflchef/core/src/Op/LocalResponseNormalization.cpp b/compiler/tflchef/core/src/Op/LocalResponseNormalization.cpp
new file mode 100644
index 000000000..f5430d4ca
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LocalResponseNormalization.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LocalResponseNormalization.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void>
+LocalResponseNormalizationChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_local_response_normalization_options());
+
+ auto &lrn_options = operation.local_response_normalization_options();
+
+ auto tflite_radius = lrn_options.radius();
+ auto tflite_bias = lrn_options.bias();
+ auto tflite_alpha = lrn_options.alpha();
+ auto tflite_beta = lrn_options.beta();
+
+ tflite::LocalResponseNormalizationOptionsBuilder lrn_options_builder{fbb};
+
+ lrn_options_builder.add_radius(tflite_radius);
+ lrn_options_builder.add_bias(tflite_bias);
+ lrn_options_builder.add_alpha(tflite_alpha);
+ lrn_options_builder.add_beta(tflite_beta);
+
+ return lrn_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+LocalResponseNormalizationChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LocalResponseNormalizationChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/LocalResponseNormalization.h b/compiler/tflchef/core/src/Op/LocalResponseNormalization.h
new file mode 100644
index 000000000..62a2355f2
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LocalResponseNormalization.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LOCAL_RESPONSE_NORMALIZATION_H__
+#define __OP_LOCAL_RESPONSE_NORMALIZATION_H__
+
+#include "OpChef.h"
+
+class LocalResponseNormalizationChef final : public OpChef
+{
+public:
+ explicit LocalResponseNormalizationChef(const tflchef::Operation *operation)
+ : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_LocalResponseNormalizationOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LocalResponseNormalizationChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LOCAL_RESPONSE_NORMALIZATION_H__
diff --git a/compiler/tflchef/core/src/Op/Log.cpp b/compiler/tflchef/core/src/Op/Log.cpp
new file mode 100644
index 000000000..c4e65adec
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Log.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Log.h"
+
+flatbuffers::Offset<void> LogChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Log. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> LogChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LogChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Log.h b/compiler/tflchef/core/src/Op/Log.h
new file mode 100644
index 000000000..2cc8a663b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Log.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LOG_H__
+#define __OP_LOG_H__
+
+#include "OpChef.h"
+
+class LogChef final : public OpChef
+{
+public:
+ explicit LogChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LOG; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LogChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LOG_H__
diff --git a/compiler/tflchef/core/src/Op/LogSoftmax.cpp b/compiler/tflchef/core/src/Op/LogSoftmax.cpp
new file mode 100644
index 000000000..eb2f13243
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LogSoftmax.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogSoftmax.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> LogSoftmaxChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::LogSoftmaxOptionsBuilder soft_options_builder{fbb};
+
+ return soft_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> LogSoftmaxChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LogSoftmaxChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/LogSoftmax.h b/compiler/tflchef/core/src/Op/LogSoftmax.h
new file mode 100644
index 000000000..3ce08b739
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LogSoftmax.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LOG_SOFTMAX_H__
+#define __OP_LOG_SOFTMAX_H__
+
+#include "OpChef.h"
+
+class LogSoftmaxChef final : public OpChef
+{
+public:
+ explicit LogSoftmaxChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LOG_SOFTMAX; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_LogSoftmaxOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LogSoftmaxChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LOG_SOFTMAX_H__
diff --git a/compiler/tflchef/core/src/Op/LogicalAnd.cpp b/compiler/tflchef/core/src/Op/LogicalAnd.cpp
new file mode 100644
index 000000000..64a6113db
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LogicalAnd.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogicalAnd.h"
+
+flatbuffers::Offset<void> LogicalAndChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::LogicalAndOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> LogicalAndChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LogicalAndChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/LogicalAnd.h b/compiler/tflchef/core/src/Op/LogicalAnd.h
new file mode 100644
index 000000000..1f272274f
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/LogicalAnd.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LOGICALAND_H__
+#define __OP_LOGICALAND_H__
+
+#include "OpChef.h"
+
+class LogicalAndChef final : public OpChef
+{
+public:
+ explicit LogicalAndChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LOGICAL_AND; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_LogicalAndOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LogicalAndChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LOGICALAND_H__
diff --git a/compiler/tflchef/core/src/Op/Logistic.cpp b/compiler/tflchef/core/src/Op/Logistic.cpp
new file mode 100644
index 000000000..4a5808235
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Logistic.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Logistic.h"
+
+flatbuffers::Offset<void> LogisticChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Logistic. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> LogisticChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new LogisticChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Logistic.h b/compiler/tflchef/core/src/Op/Logistic.h
new file mode 100644
index 000000000..c158af34d
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Logistic.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_LOGISTIC_H__
+#define __OP_LOGISTIC_H__
+
+#include "OpChef.h"
+
+class LogisticChef final : public OpChef
+{
+public:
+ explicit LogisticChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_LOGISTIC; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct LogisticChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_LOGISTIC_H__
diff --git a/compiler/tflchef/core/src/Op/MatrixDiag.cpp b/compiler/tflchef/core/src/Op/MatrixDiag.cpp
new file mode 100644
index 000000000..de505c056
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MatrixDiag.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixDiag.h"
+
+flatbuffers::Offset<void> MatrixDiagChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::MatrixDiagOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> MatrixDiagChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MatrixDiagChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/MatrixDiag.h b/compiler/tflchef/core/src/Op/MatrixDiag.h
new file mode 100644
index 000000000..cbadf6b99
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MatrixDiag.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MATRIX_DIAG_H__
+#define __OP_MATRIX_DIAG_H__
+
+#include "OpChef.h"
+
+class MatrixDiagChef final : public OpChef
+{
+public:
+ explicit MatrixDiagChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_MATRIX_DIAG; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_MatrixDiagOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MatrixDiagChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MATRIX_DIAG_H__
diff --git a/compiler/tflchef/core/src/Op/MatrixSetDiag.cpp b/compiler/tflchef/core/src/Op/MatrixSetDiag.cpp
new file mode 100644
index 000000000..0a4ee71c9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MatrixSetDiag.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixSetDiag.h"
+
+flatbuffers::Offset<void> MatrixSetDiagChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::MatrixSetDiagOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> MatrixSetDiagChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MatrixSetDiagChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/MatrixSetDiag.h b/compiler/tflchef/core/src/Op/MatrixSetDiag.h
new file mode 100644
index 000000000..8114d32a8
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MatrixSetDiag.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MATRIX_SET_DIAG_H__
+#define __OP_MATRIX_SET_DIAG_H__
+
+#include "OpChef.h"
+
+class MatrixSetDiagChef final : public OpChef
+{
+public:
+ explicit MatrixSetDiagChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_MATRIX_SET_DIAG;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_MatrixSetDiagOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MatrixSetDiagChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MATRIX_SET_DIAG_H__
diff --git a/compiler/tflchef/core/src/Op/Maximum.cpp b/compiler/tflchef/core/src/Op/Maximum.cpp
new file mode 100644
index 000000000..8f415e2e4
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Maximum.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Maximum.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> MaximumChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::MaximumMinimumOptionsBuilder maximum_options_builder{fbb};
+ return maximum_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> MaximumChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MaximumChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Maximum.h b/compiler/tflchef/core/src/Op/Maximum.h
new file mode 100644
index 000000000..53e95240c
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Maximum.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MAXIMUM_H__
+#define __OP_MAXIMUM_H__
+
+#include "OpChef.h"
+
+class MaximumChef final : public OpChef
+{
+public:
+ explicit MaximumChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_MAXIMUM; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_MaximumMinimumOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MaximumChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MAXIMUM_H__
diff --git a/compiler/tflchef/core/src/Op/Minimum.cpp b/compiler/tflchef/core/src/Op/Minimum.cpp
new file mode 100644
index 000000000..cc0c91901
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Minimum.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Minimum.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> MinimumChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::MaximumMinimumOptionsBuilder minimum_options_builder{fbb};
+ return minimum_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> MinimumChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MinimumChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Minimum.h b/compiler/tflchef/core/src/Op/Minimum.h
new file mode 100644
index 000000000..3990e1eca
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Minimum.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MINIMUM_H__
+#define __OP_MINIMUM_H__
+
+#include "OpChef.h"
+
+class MinimumChef final : public OpChef
+{
+public:
+ explicit MinimumChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_MINIMUM; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_MaximumMinimumOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MinimumChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MINIMUM_H__
diff --git a/compiler/tflchef/core/src/Op/MirrorPad.cpp b/compiler/tflchef/core/src/Op/MirrorPad.cpp
new file mode 100644
index 000000000..2d68b6986
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MirrorPad.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MirrorPad.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> MirrorPadChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_mirrorpad_options());
+
+ auto options = operation.mirrorpad_options();
+
+ auto tflite_mode = as_tflite_mirrorpadmode(options.mode());
+
+ tflite::MirrorPadOptionsBuilder options_builder{fbb};
+ options_builder.add_mode(tflite_mode);
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> MirrorPadChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new MirrorPadChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/MirrorPad.h b/compiler/tflchef/core/src/Op/MirrorPad.h
new file mode 100644
index 000000000..49461df35
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/MirrorPad.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MIRRORPAD_H__
+#define __OP_MIRRORPAD_H__
+
+#include "OpChef.h"
+
+class MirrorPadChef final : public OpChef
+{
+public:
+ explicit MirrorPadChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_MIRROR_PAD; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_MirrorPadOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct MirrorPadChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_MIRRORPAD_H__
diff --git a/compiler/tflchef/core/src/Op/Neg.cpp b/compiler/tflchef/core/src/Op/Neg.cpp
new file mode 100644
index 000000000..0e9fb9321
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Neg.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Neg.h"
+#include "Convert.h"
+
+flatbuffers::Offset<void> NegChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::NegOptionsBuilder neg_options_builder{fbb};
+
+ return neg_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> NegChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new NegChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Neg.h b/compiler/tflchef/core/src/Op/Neg.h
new file mode 100644
index 000000000..f7a2692c9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Neg.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_NEG_H__
+#define __OP_NEG_H__
+
+#include "OpChef.h"
+
+class NegChef final : public OpChef
+{
+public:
+ explicit NegChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_NEG; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NegOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct NegChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_NEG_H__
diff --git a/compiler/tflchef/core/src/Op/NotEqual.cpp b/compiler/tflchef/core/src/Op/NotEqual.cpp
new file mode 100644
index 000000000..a408266f9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/NotEqual.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NotEqual.h"
+
+flatbuffers::Offset<void> NotEqualChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::NotEqualOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> NotEqualChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new NotEqualChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/NotEqual.h b/compiler/tflchef/core/src/Op/NotEqual.h
new file mode 100644
index 000000000..3fd254773
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/NotEqual.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_NOT_EQUAL_H__
+#define __OP_NOT_EQUAL_H__
+
+#include "OpChef.h"
+
+class NotEqualChef final : public OpChef
+{
+public:
+ explicit NotEqualChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_NOT_EQUAL; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_NotEqualOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct NotEqualChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_NOT_EQUAL_H__
diff --git a/compiler/tflchef/core/src/Op/OneHot.cpp b/compiler/tflchef/core/src/Op/OneHot.cpp
new file mode 100644
index 000000000..421e50c9f
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/OneHot.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OneHot.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> OneHotChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_onehot_options());
+
+ auto options = operation.onehot_options();
+
+ tflite::OneHotOptionsBuilder options_builder{fbb};
+
+ options_builder.add_axis(options.axis());
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> OneHotChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new OneHotChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/OneHot.h b/compiler/tflchef/core/src/Op/OneHot.h
new file mode 100644
index 000000000..b29cb7978
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/OneHot.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ONEHOT_H__
+#define __OP_ONEHOT_H__
+
+#include "OpChef.h"
+
+class OneHotChef final : public OpChef
+{
+public:
+ explicit OneHotChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ONE_HOT; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_OneHotOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct OneHotChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ONEHOT_H__
diff --git a/compiler/tflchef/core/src/Op/PRelu.cpp b/compiler/tflchef/core/src/Op/PRelu.cpp
new file mode 100644
index 000000000..30e8b8ef4
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/PRelu.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PRelu.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> PReluChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No option for PRelu
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> PReluChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new PReluChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/PRelu.h b/compiler/tflchef/core/src/Op/PRelu.h
new file mode 100644
index 000000000..4a5a935ed
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/PRelu.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_PRELU_H__
+#define __OP_PRELU_H__
+
+#include "OpChef.h"
+
+class PReluChef final : public OpChef
+{
+public:
+ explicit PReluChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_PRELU; }
+
+ // no builtin options for PRelu
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct PReluChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_PRELU_H__
diff --git a/compiler/tflchef/core/src/Op/Pow.cpp b/compiler/tflchef/core/src/Op/Pow.cpp
new file mode 100644
index 000000000..25e180237
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Pow.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pow.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> PowChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::PowOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> PowChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new PowChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Pow.h b/compiler/tflchef/core/src/Op/Pow.h
new file mode 100644
index 000000000..f2d809e92
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Pow.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_POW_H__
+#define __OP_POW_H__
+
+#include "OpChef.h"
+
+class PowChef final : public OpChef
+{
+public:
+ explicit PowChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_POW; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_PowOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct PowChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_POW_H__
diff --git a/compiler/tflchef/core/src/Op/Range.cpp b/compiler/tflchef/core/src/Op/Range.cpp
new file mode 100644
index 000000000..189c46526
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Range.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Range.h"
+
+#include <cassert>
+#include <vector>
+
+flatbuffers::Offset<void> RangeChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::RangeOptionsBuilder options_builder{fbb};
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> RangeChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new RangeChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Range.h b/compiler/tflchef/core/src/Op/Range.h
new file mode 100644
index 000000000..f294d15a7
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Range.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RANGE_H__
+#define __OP_RANGE_H__
+
+#include "OpChef.h"
+
+class RangeChef final : public OpChef
+{
+public:
+ explicit RangeChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_RANGE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_RangeOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct RangeChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_RANGE_H__
diff --git a/compiler/tflchef/core/src/Op/Rank.cpp b/compiler/tflchef/core/src/Op/Rank.cpp
new file mode 100644
index 000000000..4eb2aa776
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Rank.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Rank.h"
+
+flatbuffers::Offset<void> RankChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::RankOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> RankChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new RankChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Rank.h b/compiler/tflchef/core/src/Op/Rank.h
new file mode 100644
index 000000000..0bce38095
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Rank.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RANK_H__
+#define __OP_RANK_H__
+
+#include "OpChef.h"
+
+class RankChef final : public OpChef
+{
+public:
+ explicit RankChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_RANK; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_RankOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct RankChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_RANK_H__
diff --git a/compiler/tflchef/core/src/Op/ReLUN1To1.cpp b/compiler/tflchef/core/src/Op/ReLUN1To1.cpp
new file mode 100644
index 000000000..d57e82341
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReLUN1To1.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLUN1To1.h"
+
+flatbuffers::Offset<void> ReLUN1To1Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> ReLUN1To1ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReLUN1To1Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReLUN1To1.h b/compiler/tflchef/core/src/Op/ReLUN1To1.h
new file mode 100644
index 000000000..e034c7999
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReLUN1To1.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RELU_N1_TO_1_H__
+#define __OP_RELU_N1_TO_1_H__
+
+#include "OpChef.h"
+
+class ReLUN1To1Chef final : public OpChef
+{
+public:
+ explicit ReLUN1To1Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_RELU_N1_TO_1; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReLUN1To1ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_RELU_N1_TO_1_H__
diff --git a/compiler/tflchef/core/src/Op/ReduceAny.cpp b/compiler/tflchef/core/src/Op/ReduceAny.cpp
new file mode 100644
index 000000000..c94c8a3a4
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceAny.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceAny.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ReduceAnyChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_reduce_any_options());
+
+ auto keep_dims = operation.reduce_any_options().keep_dims();
+
+ tflite::ReducerOptionsBuilder reducer_options_builder{fbb};
+ reducer_options_builder.add_keep_dims(keep_dims);
+
+ return reducer_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ReduceAnyChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReduceAnyChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReduceAny.h b/compiler/tflchef/core/src/Op/ReduceAny.h
new file mode 100644
index 000000000..cf6531732
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceAny.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REDUCE_ANY_H__
+#define __OP_REDUCE_ANY_H__
+
+#include "OpChef.h"
+
+class ReduceAnyChef final : public OpChef
+{
+public:
+ explicit ReduceAnyChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_REDUCE_ANY; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ReducerOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReduceAnyChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REDUCE_ANY_H__
diff --git a/compiler/tflchef/core/src/Op/ReduceMax.cpp b/compiler/tflchef/core/src/Op/ReduceMax.cpp
new file mode 100644
index 000000000..31543cdc0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceMax.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceMax.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ReduceMaxChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_reduce_max_options());
+
+ auto keep_dims = operation.reduce_max_options().keep_dims();
+
+ tflite::ReducerOptionsBuilder reduce_max_options_builder{fbb};
+ reduce_max_options_builder.add_keep_dims(keep_dims);
+
+ return reduce_max_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ReduceMaxChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReduceMaxChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReduceMax.h b/compiler/tflchef/core/src/Op/ReduceMax.h
new file mode 100644
index 000000000..854c5b87d
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceMax.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REDUCEMAX_H__
+#define __OP_REDUCEMAX_H__
+
+#include "OpChef.h"
+
+class ReduceMaxChef final : public OpChef
+{
+public:
+ explicit ReduceMaxChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_REDUCE_MAX; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ReducerOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReduceMaxChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REDUCEMAX_H__
diff --git a/compiler/tflchef/core/src/Op/ReduceMin.cpp b/compiler/tflchef/core/src/Op/ReduceMin.cpp
new file mode 100644
index 000000000..e194a1837
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceMin.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceMin.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ReduceMinChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_reduce_min_options());
+
+ auto keep_dims = operation.reduce_min_options().keep_dims();
+
+ tflite::ReducerOptionsBuilder reduce_min_options_builder{fbb};
+ reduce_min_options_builder.add_keep_dims(keep_dims);
+
+ return reduce_min_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ReduceMinChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReduceMinChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReduceMin.h b/compiler/tflchef/core/src/Op/ReduceMin.h
new file mode 100644
index 000000000..f29d273b9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceMin.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REDUCEMIN_H__
+#define __OP_REDUCEMIN_H__
+
+#include "OpChef.h"
+
+class ReduceMinChef final : public OpChef
+{
+public:
+ explicit ReduceMinChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_REDUCE_MIN; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ReducerOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReduceMinChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REDUCEMIN_H__
diff --git a/compiler/tflchef/core/src/Op/ReduceProd.cpp b/compiler/tflchef/core/src/Op/ReduceProd.cpp
new file mode 100644
index 000000000..c89aca27e
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceProd.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceProd.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ReduceProdChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_reduce_prod_options());
+
+ auto keep_dims = operation.reduce_prod_options().keep_dims();
+
+ tflite::ReducerOptionsBuilder reducer_options_builder{fbb};
+ reducer_options_builder.add_keep_dims(keep_dims);
+
+ return reducer_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ReduceProdChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReduceProdChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReduceProd.h b/compiler/tflchef/core/src/Op/ReduceProd.h
new file mode 100644
index 000000000..d5a11fdbc
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReduceProd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REDUCE_PROD_H__
+#define __OP_REDUCE_PROD_H__
+
+#include "OpChef.h"
+
+class ReduceProdChef final : public OpChef
+{
+public:
+ explicit ReduceProdChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_REDUCE_PROD; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ReducerOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReduceProdChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REDUCE_PROD_H__
diff --git a/compiler/tflchef/core/src/Op/Reshape.cpp b/compiler/tflchef/core/src/Op/Reshape.cpp
index 99555e898..01e521913 100644
--- a/compiler/tflchef/core/src/Op/Reshape.cpp
+++ b/compiler/tflchef/core/src/Op/Reshape.cpp
@@ -41,7 +41,8 @@ flatbuffers::Offset<void> ReshapeChef::value(flatbuffers::FlatBufferBuilder &fbb
{
auto &operation = (*_operation);
- assert(operation.has_reshape_options());
+ if (!operation.has_reshape_options())
+ return 0;
auto options = operation.reshape_options();
auto shapes = vector_new_shape(options);
diff --git a/compiler/tflchef/core/src/Op/ResizeBilinear.cpp b/compiler/tflchef/core/src/Op/ResizeBilinear.cpp
new file mode 100644
index 000000000..3d9299ce0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ResizeBilinear.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeBilinear.h"
+#include "Convert.h"
+
+#include <cassert>
+#include <vector>
+
+flatbuffers::Offset<void> ResizeBilinearChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_resize_bilinear_options());
+
+ auto options = operation.resize_bilinear_options();
+
+ tflite::ResizeBilinearOptionsBuilder options_builder{fbb};
+
+ options_builder.add_align_corners(options.align_corners());
+ options_builder.add_half_pixel_centers(options.half_pixel_centers());
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ResizeBilinearChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ResizeBilinearChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ResizeBilinear.h b/compiler/tflchef/core/src/Op/ResizeBilinear.h
new file mode 100644
index 000000000..9bd618538
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ResizeBilinear.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RESIZE_BILINEAR_H__
+#define __OP_RESIZE_BILINEAR_H__
+
+#include "OpChef.h"
+
+class ResizeBilinearChef final : public OpChef
+{
+public:
+ explicit ResizeBilinearChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_RESIZE_BILINEAR;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ResizeBilinearOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ResizeBilinearChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_RESIZE_BILINEAR_H__
diff --git a/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.cpp b/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.cpp
new file mode 100644
index 000000000..1f930404f
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeNearestNeighbor.h"
+#include "Convert.h"
+
+#include <cassert>
+#include <vector>
+
+flatbuffers::Offset<void>
+ResizeNearestNeighborChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_resize_nearest_neighbor_options());
+
+ auto options = operation.resize_nearest_neighbor_options();
+
+ tflite::ResizeNearestNeighborOptionsBuilder options_builder{fbb};
+
+ options_builder.add_align_corners(options.align_corners());
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+ResizeNearestNeighborChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ResizeNearestNeighborChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.h b/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.h
new file mode 100644
index 000000000..e6ee832a8
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ResizeNearestNeighbor.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RESIZE_NEAREST_NEIGHBOR_H__
+#define __OP_RESIZE_NEAREST_NEIGHBOR_H__
+
+#include "OpChef.h"
+
+class ResizeNearestNeighborChef final : public OpChef
+{
+public:
+ explicit ResizeNearestNeighborChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ResizeNearestNeighborOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ResizeNearestNeighborChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_RESIZE_NEAREST_NEIGHBOR_H__
diff --git a/compiler/tflchef/core/src/Op/ReverseSequence.cpp b/compiler/tflchef/core/src/Op/ReverseSequence.cpp
new file mode 100644
index 000000000..93541172b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReverseSequence.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseSequence.h"
+
+flatbuffers::Offset<void> ReverseSequenceChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_reverse_sequence_options());
+
+ auto options = operation.reverse_sequence_options();
+
+ auto tflite_seq_dim = options.seq_dim();
+ auto tflite_batch_dim = options.batch_dim();
+
+ tflite::ReverseSequenceOptionsBuilder options_builder{fbb};
+
+ options_builder.add_seq_dim(tflite_seq_dim);
+ options_builder.add_batch_dim(tflite_batch_dim);
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+ReverseSequenceChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReverseSequenceChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReverseSequence.h b/compiler/tflchef/core/src/Op/ReverseSequence.h
new file mode 100644
index 000000000..329505cf0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReverseSequence.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REVERSE_SEQUENCE_H__
+#define __OP_REVERSE_SEQUENCE_H__
+
+#include "OpChef.h"
+
+class ReverseSequenceChef final : public OpChef
+{
+public:
+ explicit ReverseSequenceChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_REVERSE_SEQUENCE;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ReverseSequenceOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReverseSequenceChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REVERSE_SEQUENCE_H__
diff --git a/compiler/tflchef/core/src/Op/ReverseV2.cpp b/compiler/tflchef/core/src/Op/ReverseV2.cpp
new file mode 100644
index 000000000..58ace1dd1
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReverseV2.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseV2.h"
+
+flatbuffers::Offset<void> ReverseV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::ReverseV2OptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ReverseV2ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ReverseV2Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ReverseV2.h b/compiler/tflchef/core/src/Op/ReverseV2.h
new file mode 100644
index 000000000..a48a2d96a
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ReverseV2.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_REVERSEV2_H__
+#define __OP_REVERSEV2_H__
+
+#include "OpChef.h"
+
+class ReverseV2Chef final : public OpChef
+{
+public:
+ explicit ReverseV2Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_REVERSE_V2; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ReverseV2Options;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ReverseV2ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_REVERSEV2_H__
diff --git a/compiler/tflchef/core/src/Op/Round.cpp b/compiler/tflchef/core/src/Op/Round.cpp
new file mode 100644
index 000000000..e16c86518
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Round.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Round.h"
+
+flatbuffers::Offset<void> RoundChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Round. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> RoundChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new RoundChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Round.h b/compiler/tflchef/core/src/Op/Round.h
new file mode 100644
index 000000000..7f0fbe370
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Round.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ROUND_H__
+#define __OP_ROUND_H__
+
+#include "OpChef.h"
+
+class RoundChef final : public OpChef
+{
+public:
+ explicit RoundChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ROUND; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct RoundChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ROUND_H__
diff --git a/compiler/tflchef/core/src/Op/ScatterNd.cpp b/compiler/tflchef/core/src/Op/ScatterNd.cpp
new file mode 100644
index 000000000..7114dda6e
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ScatterNd.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScatterNd.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> ScatterNdChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::ScatterNdOptionsBuilder scatter_nd_options_builder{fbb};
+
+ return scatter_nd_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ScatterNdChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ScatterNdChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ScatterNd.h b/compiler/tflchef/core/src/Op/ScatterNd.h
new file mode 100644
index 000000000..2c89cf6a0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ScatterNd.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SCATTER_ND_H__
+#define __OP_SCATTER_ND_H__
+
+#include "OpChef.h"
+
+class ScatterNdChef final : public OpChef
+{
+public:
+ explicit ScatterNdChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SCATTER_ND; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ScatterNdOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ScatterNdChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SCATTER_ND_H__
diff --git a/compiler/tflchef/core/src/Op/SegmentSum.cpp b/compiler/tflchef/core/src/Op/SegmentSum.cpp
new file mode 100644
index 000000000..934bcb0ec
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SegmentSum.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SegmentSum.h"
+
+flatbuffers::Offset<void> SegmentSumChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SegmentSumOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SegmentSumChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SegmentSumChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SegmentSum.h b/compiler/tflchef/core/src/Op/SegmentSum.h
new file mode 100644
index 000000000..c0ebfba52
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SegmentSum.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SEGMENT_SUM_H__
+#define __OP_SEGMENT_SUM_H__
+
+#include "OpChef.h"
+
+class SegmentSumChef final : public OpChef
+{
+public:
+ explicit SegmentSumChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SEGMENT_SUM; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SegmentSumOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SegmentSumChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SEGMENT_SUM_H__
diff --git a/compiler/tflchef/core/src/Op/Select.cpp b/compiler/tflchef/core/src/Op/Select.cpp
new file mode 100644
index 000000000..31be736c9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Select.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Select.h"
+
+flatbuffers::Offset<void> SelectChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SelectOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SelectChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SelectChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Select.h b/compiler/tflchef/core/src/Op/Select.h
new file mode 100644
index 000000000..91ace16e1
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Select.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SELECT_H__
+#define __OP_SELECT_H__
+
+#include "OpChef.h"
+
+class SelectChef final : public OpChef
+{
+public:
+ explicit SelectChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SELECT; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SelectOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SelectChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SELECT_H__
diff --git a/compiler/tflchef/core/src/Op/SelectV2.cpp b/compiler/tflchef/core/src/Op/SelectV2.cpp
new file mode 100644
index 000000000..f6c0bfc49
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SelectV2.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SelectV2.h"
+
+flatbuffers::Offset<void> SelectV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SelectV2OptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SelectV2ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SelectV2Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SelectV2.h b/compiler/tflchef/core/src/Op/SelectV2.h
new file mode 100644
index 000000000..36d74c344
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SelectV2.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SELECT_V2_H__
+#define __OP_SELECT_V2_H__
+
+#include "OpChef.h"
+
+class SelectV2Chef final : public OpChef
+{
+public:
+ explicit SelectV2Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SELECT_V2; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SelectV2Options;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SelectV2ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SELECT_V2_H__
diff --git a/compiler/tflchef/core/src/Op/Sin.cpp b/compiler/tflchef/core/src/Op/Sin.cpp
new file mode 100644
index 000000000..1752ce7a1
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Sin.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sin.h"
+
+flatbuffers::Offset<void> SinChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ // No tflite option for Sin. Use void.
+ return flatbuffers::Offset<void>();
+}
+
+std::unique_ptr<OpChef> SinChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SinChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Sin.h b/compiler/tflchef/core/src/Op/Sin.h
new file mode 100644
index 000000000..121b73b68
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Sin.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SIN_H__
+#define __OP_SIN_H__
+
+#include "OpChef.h"
+
+class SinChef final : public OpChef
+{
+public:
+ explicit SinChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SIN; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SinChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SIN_H__
diff --git a/compiler/tflchef/core/src/Op/Slice.cpp b/compiler/tflchef/core/src/Op/Slice.cpp
new file mode 100644
index 000000000..27ae80a8a
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Slice.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Slice.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SliceChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SliceOptionsBuilder slice_options_builder{fbb};
+ return slice_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SliceChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SliceChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Slice.h b/compiler/tflchef/core/src/Op/Slice.h
new file mode 100644
index 000000000..06fd6347b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Slice.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SLICE_H__
+#define __OP_SLICE_H__
+
+#include "OpChef.h"
+
+class SliceChef final : public OpChef
+{
+public:
+ explicit SliceChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SLICE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SliceOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SliceChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SLICE_H__
diff --git a/compiler/tflchef/core/src/Op/SpaceToBatchND.cpp b/compiler/tflchef/core/src/Op/SpaceToBatchND.cpp
new file mode 100644
index 000000000..74e052826
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SpaceToBatchND.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToBatchND.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SpaceToBatchNDChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SpaceToBatchNDOptionsBuilder space_to_batch_nd_options_builder{fbb};
+
+ return space_to_batch_nd_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SpaceToBatchNDChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SpaceToBatchNDChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SpaceToBatchND.h b/compiler/tflchef/core/src/Op/SpaceToBatchND.h
new file mode 100644
index 000000000..e263bdc61
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SpaceToBatchND.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SPACETOBATCHND_H__
+#define __OP_SPACETOBATCHND_H__
+
+#include "OpChef.h"
+
+class SpaceToBatchNDChef final : public OpChef
+{
+public:
+ explicit SpaceToBatchNDChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_SPACE_TO_BATCH_ND;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SpaceToBatchNDOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SpaceToBatchNDChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SPACETOBATCHND_H__
diff --git a/compiler/tflchef/core/src/Op/SpaceToDepth.cpp b/compiler/tflchef/core/src/Op/SpaceToDepth.cpp
new file mode 100644
index 000000000..98eed8c26
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SpaceToDepth.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToDepth.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SpaceToDepthChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_space_to_depth_options());
+
+ auto tflite_block_size = operation.space_to_depth_options().block_size();
+
+ tflite::SpaceToDepthOptionsBuilder std_options_builder{fbb};
+ std_options_builder.add_block_size(tflite_block_size);
+
+ return std_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SpaceToDepthChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SpaceToDepthChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SpaceToDepth.h b/compiler/tflchef/core/src/Op/SpaceToDepth.h
new file mode 100644
index 000000000..db852feac
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SpaceToDepth.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SPACETODEPTH_H__
+#define __OP_SPACETODEPTH_H__
+
+#include "OpChef.h"
+
+class SpaceToDepthChef final : public OpChef
+{
+public:
+ explicit SpaceToDepthChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_SPACE_TO_DEPTH;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SpaceToDepthOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SpaceToDepthChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SPACETODEPTH_H__
diff --git a/compiler/tflchef/core/src/Op/SparseToDense.cpp b/compiler/tflchef/core/src/Op/SparseToDense.cpp
new file mode 100644
index 000000000..f1f8a7150
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SparseToDense.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SparseToDense.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SparseToDenseChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_sparse_to_dense_options());
+
+ auto tflite_validate_indices = operation.sparse_to_dense_options().validate_indices();
+
+ tflite::SparseToDenseOptionsBuilder sparse_to_dense_options_builder(fbb);
+ sparse_to_dense_options_builder.add_validate_indices(tflite_validate_indices);
+
+ return sparse_to_dense_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SparseToDenseChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SparseToDenseChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SparseToDense.h b/compiler/tflchef/core/src/Op/SparseToDense.h
new file mode 100644
index 000000000..02cbd6a6d
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SparseToDense.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SPARSETODENSE_H__
+#define __OP_SPARSETODENSE_H__
+
+#include "OpChef.h"
+
+class SparseToDenseChef final : public OpChef
+{
+public:
+ explicit SparseToDenseChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_SPARSE_TO_DENSE;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SparseToDenseOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SparseToDenseChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SPARSETODENSE_H__
diff --git a/compiler/tflchef/core/src/Op/Split.cpp b/compiler/tflchef/core/src/Op/Split.cpp
new file mode 100644
index 000000000..f4704e537
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Split.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Split.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SplitChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_split_options());
+
+ auto num_splits = operation.split_options().num_splits();
+
+ tflite::SplitOptionsBuilder split_options_builder{fbb};
+ split_options_builder.add_num_splits(num_splits);
+
+ return split_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SplitChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SplitChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Split.h b/compiler/tflchef/core/src/Op/Split.h
new file mode 100644
index 000000000..db6158069
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Split.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SPLIT_H__
+#define __OP_SPLIT_H__
+
+#include "OpChef.h"
+
+class SplitChef final : public OpChef
+{
+public:
+ explicit SplitChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SPLIT; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SplitOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SplitChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SPLIT_H__
diff --git a/compiler/tflchef/core/src/Op/SplitV.cpp b/compiler/tflchef/core/src/Op/SplitV.cpp
new file mode 100644
index 000000000..fa93db6ba
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SplitV.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SplitV.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SplitVChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_split_v_options());
+
+ auto num_splits = operation.split_v_options().num_splits();
+
+ tflite::SplitVOptionsBuilder split_v_options_builder{fbb};
+ split_v_options_builder.add_num_splits(num_splits);
+
+ return split_v_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SplitVChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SplitVChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SplitV.h b/compiler/tflchef/core/src/Op/SplitV.h
new file mode 100644
index 000000000..c37736e31
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SplitV.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SPLIT_V_H__
+#define __OP_SPLIT_V_H__
+
+#include "OpChef.h"
+
+class SplitVChef final : public OpChef
+{
+public:
+ explicit SplitVChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SPLIT_V; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SplitVOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SplitVChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SPLIT_V_H__
diff --git a/compiler/tflchef/core/src/Op/Square.cpp b/compiler/tflchef/core/src/Op/Square.cpp
new file mode 100644
index 000000000..fd3538072
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Square.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Square.h"
+
+flatbuffers::Offset<void> SquareChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SquareOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SquareChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SquareChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Square.h b/compiler/tflchef/core/src/Op/Square.h
new file mode 100644
index 000000000..5b76e6302
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Square.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SQUARE_H__
+#define __OP_SQUARE_H__
+
+#include "OpChef.h"
+
+class SquareChef final : public OpChef
+{
+public:
+ explicit SquareChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SQUARE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SquareOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SquareChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SQUARE_H__
diff --git a/compiler/tflchef/core/src/Op/SquaredDifference.cpp b/compiler/tflchef/core/src/Op/SquaredDifference.cpp
new file mode 100644
index 000000000..757c148a9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SquaredDifference.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SquaredDifference.h"
+
+flatbuffers::Offset<void> SquaredDifferenceChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::SquaredDifferenceOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef>
+SquaredDifferenceChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SquaredDifferenceChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/SquaredDifference.h b/compiler/tflchef/core/src/Op/SquaredDifference.h
new file mode 100644
index 000000000..f919975f9
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/SquaredDifference.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SQUAREDDIFFERENCE_H__
+#define __OP_SQUAREDDIFFERENCE_H__
+
+#include "OpChef.h"
+
+class SquaredDifferenceChef final : public OpChef
+{
+public:
+ explicit SquaredDifferenceChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_SQUARED_DIFFERENCE;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_SquaredDifferenceOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SquaredDifferenceChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SQUAREDDIFFERENCE_H__
diff --git a/compiler/tflchef/core/src/Op/Squeeze.cpp b/compiler/tflchef/core/src/Op/Squeeze.cpp
new file mode 100644
index 000000000..8d6ef42d6
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Squeeze.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Squeeze.h"
+#include "Convert.h"
+
+#include <cassert>
+#include <vector>
+
+flatbuffers::Offset<void> SqueezeChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_squeeze_options());
+
+ const auto &options = operation.squeeze_options();
+ // Note: 'CreateVector' should be placed before 'CreateOptions'
+ // Read flatbuffers.h 'void NotNested()' for more information
+ auto fb_squeeze_dims =
+ fbb.CreateVector(options.squeeze_dim().data(), options.squeeze_dim().size());
+
+ return tflite::CreateSqueezeOptions(fbb, fb_squeeze_dims).Union();
+}
+
+std::unique_ptr<OpChef> SqueezeChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SqueezeChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Squeeze.h b/compiler/tflchef/core/src/Op/Squeeze.h
new file mode 100644
index 000000000..2787231f2
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Squeeze.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SQUEEZE_H__
+#define __OP_SQUEEZE_H__
+
+#include "OpChef.h"
+
+class SqueezeChef final : public OpChef
+{
+public:
+ explicit SqueezeChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SQUEEZE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_SqueezeOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SqueezeChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SQUEEZE_H__
diff --git a/compiler/tflchef/core/src/Op/StridedSlice.cpp b/compiler/tflchef/core/src/Op/StridedSlice.cpp
new file mode 100644
index 000000000..587a95c66
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/StridedSlice.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StridedSlice.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> StridedSliceChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_strided_slice_options());
+
+ tflite::StridedSliceOptionsBuilder strided_slice_options_builder{fbb};
+ strided_slice_options_builder.add_begin_mask(operation.strided_slice_options().begin_mask());
+ strided_slice_options_builder.add_end_mask(operation.strided_slice_options().end_mask());
+ strided_slice_options_builder.add_ellipsis_mask(
+ operation.strided_slice_options().ellipsis_mask());
+ strided_slice_options_builder.add_new_axis_mask(
+ operation.strided_slice_options().new_axis_mask());
+ strided_slice_options_builder.add_shrink_axis_mask(
+ operation.strided_slice_options().shrink_axis_mask());
+
+ return strided_slice_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> StridedSliceChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new StridedSliceChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/StridedSlice.h b/compiler/tflchef/core/src/Op/StridedSlice.h
new file mode 100644
index 000000000..49da44f12
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/StridedSlice.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_STRIDEDSLICE_H__
+#define __OP_STRIDEDSLICE_H__
+
+#include "OpChef.h"
+
+class StridedSliceChef final : public OpChef
+{
+public:
+ explicit StridedSliceChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_STRIDED_SLICE;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_StridedSliceOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct StridedSliceChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_STRIDEDSLICE_H__
diff --git a/compiler/tflchef/core/src/Op/Sum.cpp b/compiler/tflchef/core/src/Op/Sum.cpp
new file mode 100644
index 000000000..6b79d3ec5
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Sum.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sum.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> SumChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_sum_options());
+
+ auto keep_dims = operation.sum_options().keep_dims();
+
+ tflite::ReducerOptionsBuilder sum_options_builder{fbb};
+ sum_options_builder.add_keep_dims(keep_dims);
+
+ return sum_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> SumChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new SumChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Sum.h b/compiler/tflchef/core/src/Op/Sum.h
new file mode 100644
index 000000000..d3cc8c173
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Sum.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SUM_H__
+#define __OP_SUM_H__
+
+#include "OpChef.h"
+
+class SumChef final : public OpChef
+{
+public:
+ explicit SumChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_SUM; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_ReducerOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct SumChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_SUM_H__
diff --git a/compiler/tflchef/core/src/Op/Tile.cpp b/compiler/tflchef/core/src/Op/Tile.cpp
new file mode 100644
index 000000000..18710b4b0
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Tile.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tile.h"
+
+flatbuffers::Offset<void> TileChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::TileOptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> TileChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new TileChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Tile.h b/compiler/tflchef/core/src/Op/Tile.h
new file mode 100644
index 000000000..2870ff174
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Tile.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_TILE_H__
+#define __OP_TILE_H__
+
+#include "OpChef.h"
+
+class TileChef final : public OpChef
+{
+public:
+ explicit TileChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_TILE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_TileOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct TileChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_TILE_H__
diff --git a/compiler/tflchef/core/src/Op/TopKV2.cpp b/compiler/tflchef/core/src/Op/TopKV2.cpp
new file mode 100644
index 000000000..08c4de66b
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/TopKV2.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TopKV2.h"
+
+flatbuffers::Offset<void> TopKV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::TopKV2OptionsBuilder options_builder{fbb};
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> TopKV2ChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new TopKV2Chef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/TopKV2.h b/compiler/tflchef/core/src/Op/TopKV2.h
new file mode 100644
index 000000000..554822332
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/TopKV2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_TOPK_V2_H__
+#define __OP_TOPK_V2_H__
+
+#include "OpChef.h"
+
+class TopKV2Chef final : public OpChef
+{
+public:
+ explicit TopKV2Chef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_TOPK_V2; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_TopKV2Options; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct TopKV2ChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_TOPK_V2_H__
diff --git a/compiler/tflchef/core/src/Op/TransposeConv.cpp b/compiler/tflchef/core/src/Op/TransposeConv.cpp
new file mode 100644
index 000000000..c9e452714
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/TransposeConv.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TransposeConv.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> TransposeConvChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ tflite::TransposeConvOptionsBuilder options_builder{fbb};
+
+ assert(operation.has_transpose_conv_options());
+
+ auto tflite_padding = as_tflite_padding(operation.transpose_conv_options().padding());
+
+ options_builder.add_padding(tflite_padding);
+
+ options_builder.add_stride_h(operation.transpose_conv_options().stride_h());
+ options_builder.add_stride_w(operation.transpose_conv_options().stride_w());
+
+ return options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> TransposeConvChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new TransposeConvChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/TransposeConv.h b/compiler/tflchef/core/src/Op/TransposeConv.h
new file mode 100644
index 000000000..e664bfff2
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/TransposeConv.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_TRANSPOSE_CONV_H__
+#define __OP_TRANSPOSE_CONV_H__
+
+#include "OpChef.h"
+
+class TransposeConvChef final : public OpChef
+{
+public:
+ explicit TransposeConvChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override
+ {
+ return tflite::BuiltinOperator_TRANSPOSE_CONV;
+ }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_TransposeConvOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct TransposeConvChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_TRANSPOSE_CONV_H__
diff --git a/compiler/tflchef/core/src/Op/Unique.cpp b/compiler/tflchef/core/src/Op/Unique.cpp
new file mode 100644
index 000000000..d9a7293c5
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Unique.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unique.h"
+#include "Convert.h"
+
+#include <cassert>
+
+flatbuffers::Offset<void> UniqueChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_unique_options());
+
+ auto tflite_out_idx = as_tflite_tensortype(operation.unique_options().idx_out_type());
+
+ tflite::UniqueOptionsBuilder unique_options_builder{fbb};
+ unique_options_builder.add_idx_out_type(tflite_out_idx);
+
+ return unique_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> UniqueChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new UniqueChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Unique.h b/compiler/tflchef/core/src/Op/Unique.h
new file mode 100644
index 000000000..58aa7bfaa
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Unique.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_UNIQUE_H__
+#define __OP_UNIQUE_H__
+
+#include "OpChef.h"
+
+class UniqueChef final : public OpChef
+{
+public:
+ explicit UniqueChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_UNIQUE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_UniqueOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct UniqueChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_UNIQUE_H__
diff --git a/compiler/tflchef/core/src/Op/Unpack.cpp b/compiler/tflchef/core/src/Op/Unpack.cpp
new file mode 100644
index 000000000..504da5a46
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Unpack.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unpack.h"
+
+flatbuffers::Offset<void> UnpackChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_unpack_options());
+
+ tflite::UnpackOptionsBuilder unpack_options_builder{fbb};
+ unpack_options_builder.add_num(operation.unpack_options().num());
+ unpack_options_builder.add_axis(operation.unpack_options().axis());
+
+ return unpack_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> UnpackChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new UnpackChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Unpack.h b/compiler/tflchef/core/src/Op/Unpack.h
new file mode 100644
index 000000000..3a425b1a3
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Unpack.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_UNPACK_H__
+#define __OP_UNPACK_H__
+
+#include "OpChef.h"
+
+class UnpackChef final : public OpChef
+{
+public:
+ explicit UnpackChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_UNPACK; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_UnpackOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct UnpackChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ABS_H__
diff --git a/compiler/tflchef/core/src/Op/Where.cpp b/compiler/tflchef/core/src/Op/Where.cpp
new file mode 100644
index 000000000..0ce9102bc
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Where.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specwhileic language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Where.h"
+
+flatbuffers::Offset<void> WhereChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::WhereOptionsBuilder where_options_builder{fbb};
+ return where_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> WhereChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new WhereChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/Where.h b/compiler/tflchef/core/src/Op/Where.h
new file mode 100644
index 000000000..7991c64cd
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/Where.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_WHERE_H__
+#define __OP_WHERE_H__
+
+#include "OpChef.h"
+
+class WhereChef final : public OpChef
+{
+public:
+ explicit WhereChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_WHERE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_WhereOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct WhereChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_WHERE_H__
diff --git a/compiler/tflchef/core/src/Op/While.cpp b/compiler/tflchef/core/src/Op/While.cpp
new file mode 100644
index 000000000..1253d0fcc
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/While.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specwhileic language governing permissions and
+ * limitations under the License.
+ */
+
+#include "While.h"
+
+flatbuffers::Offset<void> WhileChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ auto &operation = (*_operation);
+
+ assert(operation.has_while_options());
+
+ tflite::WhileOptionsBuilder while_options_builder{fbb};
+ while_options_builder.add_cond_subgraph_index(operation.while_options().cond_subgraph_index());
+ while_options_builder.add_body_subgraph_index(operation.while_options().body_subgraph_index());
+
+ return while_options_builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> WhileChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new WhileChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/While.h b/compiler/tflchef/core/src/Op/While.h
new file mode 100644
index 000000000..150a14be3
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/While.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_WHILE_H__
+#define __OP_WHILE_H__
+
+#include "OpChef.h"
+
+class WhileChef final : public OpChef
+{
+public:
+ explicit WhileChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_WHILE; }
+
+ tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_WhileOptions; }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct WhileChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_WHILE_H__
diff --git a/compiler/tflchef/core/src/Op/ZerosLike.cpp b/compiler/tflchef/core/src/Op/ZerosLike.cpp
new file mode 100644
index 000000000..e47e2ab50
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ZerosLike.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZerosLike.h"
+
+flatbuffers::Offset<void> ZerosLikeChef::value(flatbuffers::FlatBufferBuilder &fbb) const
+{
+ tflite::ZerosLikeOptionsBuilder builder{fbb};
+ return builder.Finish().Union();
+}
+
+std::unique_ptr<OpChef> ZerosLikeChefFactory::create(const tflchef::Operation *operation) const
+{
+ return std::unique_ptr<OpChef>{new ZerosLikeChef{operation}};
+}
diff --git a/compiler/tflchef/core/src/Op/ZerosLike.h b/compiler/tflchef/core/src/Op/ZerosLike.h
new file mode 100644
index 000000000..0af5b93e2
--- /dev/null
+++ b/compiler/tflchef/core/src/Op/ZerosLike.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ZEROS_LIKE_H__
+#define __OP_ZEROS_LIKE_H__
+
+#include "OpChef.h"
+
+class ZerosLikeChef final : public OpChef
+{
+public:
+ explicit ZerosLikeChef(const tflchef::Operation *operation) : _operation{operation}
+ {
+ // DO NOTHING
+ }
+
+public:
+ tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_ZEROS_LIKE; }
+
+ tflite::BuiltinOptions type(void) const override
+ {
+ return tflite::BuiltinOptions_ZerosLikeOptions;
+ }
+
+ flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override;
+
+private:
+ const tflchef::Operation *_operation;
+};
+
+struct ZerosLikeChefFactory final : public OpChefFactory
+{
+ std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override;
+};
+
+#endif // __OP_ZEROS_LIKE_H__
diff --git a/compiler/tflchef/core/src/OpChef.def b/compiler/tflchef/core/src/OpChef.def
index a25250c46..263725a24 100644
--- a/compiler/tflchef/core/src/OpChef.def
+++ b/compiler/tflchef/core/src/OpChef.def
@@ -6,32 +6,111 @@
// OP_CHEF(NAME, FACTORY_CLASS)
OP_CHEF(Abs, AbsChefFactory)
OP_CHEF(Add, AddChefFactory)
+OP_CHEF(AddN, AddNChefFactory)
OP_CHEF(ArgMax, ArgMaxChefFactory)
-OP_CHEF(BatchToSpaceND, BatchToSpaceNDChefFactory)
+OP_CHEF(ArgMin, ArgMinChefFactory)
OP_CHEF(AveragePool2D, AveragePool2DChefFactory)
+OP_CHEF(BatchMatMul, BatchMatMulChefFactory)
+OP_CHEF(BatchToSpaceND, BatchToSpaceNDChefFactory)
+OP_CHEF(Cast, CastChefFactory)
+OP_CHEF(Ceil, CeilChefFactory)
OP_CHEF(Concatenation, ConcatenationChefFactory)
OP_CHEF(Conv2D, Conv2DChefFactory)
OP_CHEF(Cos, CosChefFactory)
+OP_CHEF(DepthToSpace, DepthToSpaceChefFactory)
OP_CHEF(DepthwiseConv2D, DepthwiseConv2DChefFactory)
OP_CHEF(Div, DivChefFactory)
+OP_CHEF(ELU, ELUChefFactory)
OP_CHEF(Equal, EqualChefFactory)
OP_CHEF(Exp, ExpChefFactory)
+OP_CHEF(ExpandDims, ExpandDimsChefFactory)
+OP_CHEF(Fill, FillChefFactory)
+OP_CHEF(Floor, FloorChefFactory)
OP_CHEF(FloorDiv, FloorDivChefFactory)
+OP_CHEF(FloorMod, FloorModChefFactory)
OP_CHEF(FullyConnected, FullyConnectedChefFactory)
+OP_CHEF(Gather, GatherChefFactory)
+OP_CHEF(GatherNd, GatherNdChefFactory)
+OP_CHEF(Greater, GreaterChefFactory)
+OP_CHEF(GreaterEqual, GreaterEqualChefFactory)
+OP_CHEF(If, IfChefFactory)
+OP_CHEF(L2Normalize, L2NormalizeChefFactory)
+OP_CHEF(L2Pool2D, L2Pool2DChefFactory)
+OP_CHEF(LeakyRelu, LeakyReluChefFactory)
+OP_CHEF(Less, LessChefFactory)
+OP_CHEF(LessEqual, LessEqualChefFactory)
+OP_CHEF(LocalResponseNormalization, LocalResponseNormalizationChefFactory)
+OP_CHEF(Log, LogChefFactory)
+OP_CHEF(LogicalAnd, LogicalAndChefFactory)
OP_CHEF(LogicalNot, LogicalNotChefFactory)
OP_CHEF(LogicalOr, LogicalOrChefFactory)
+OP_CHEF(Logistic, LogisticChefFactory)
+OP_CHEF(LogSoftmax, LogSoftmaxChefFactory)
+OP_CHEF(MatrixDiag, MatrixDiagChefFactory)
+OP_CHEF(MatrixSetDiag, MatrixSetDiagChefFactory)
+OP_CHEF(Maximum, MaximumChefFactory)
OP_CHEF(MaxPool2D, MaxPool2DChefFactory)
OP_CHEF(Mean, MeanChefFactory)
+OP_CHEF(Minimum, MinimumChefFactory)
+OP_CHEF(MirrorPad, MirrorPadChefFactory)
OP_CHEF(Mul, MulChefFactory)
+OP_CHEF(Neg, NegChefFactory)
+OP_CHEF(NotEqual, NotEqualChefFactory)
+OP_CHEF(OneHot, OneHotChefFactory)
OP_CHEF(Pack, PackChefFactory)
OP_CHEF(Pad, PadChefFactory)
+OP_CHEF(Pow, PowChefFactory)
+OP_CHEF(PRelu, PReluChefFactory)
+OP_CHEF(Range, RangeChefFactory)
+OP_CHEF(Rank, RankChefFactory)
+OP_CHEF(ReduceAny, ReduceAnyChefFactory)
+OP_CHEF(ReduceMax, ReduceMaxChefFactory)
+OP_CHEF(ReduceMin, ReduceMinChefFactory)
+OP_CHEF(ReduceProd, ReduceProdChefFactory)
OP_CHEF(ReLU, ReLUChefFactory)
OP_CHEF(ReLU6, ReLU6ChefFactory)
+OP_CHEF(ReLUN1To1, ReLUN1To1ChefFactory)
OP_CHEF(Reshape, ReshapeChefFactory)
+OP_CHEF(ResizeBilinear, ResizeBilinearChefFactory)
+OP_CHEF(ResizeNearestNeighbor, ResizeNearestNeighborChefFactory)
+OP_CHEF(ReverseSequence, ReverseSequenceChefFactory)
+OP_CHEF(ReverseV2, ReverseV2ChefFactory)
+OP_CHEF(Round, RoundChefFactory)
OP_CHEF(Rsqrt, RsqrtChefFactory)
+OP_CHEF(ScatterNd, ScatterNdChefFactory)
+OP_CHEF(SegmentSum,SegmentSumChefFactory)
+OP_CHEF(Select, SelectChefFactory)
+OP_CHEF(SelectV2, SelectV2ChefFactory)
OP_CHEF(Shape, ShapeChefFactory)
+OP_CHEF(Sin, SinChefFactory)
+OP_CHEF(Slice, SliceChefFactory)
OP_CHEF(Softmax, SoftmaxChefFactory)
+OP_CHEF(SpaceToBatchND, SpaceToBatchNDChefFactory)
+OP_CHEF(SpaceToDepth, SpaceToDepthChefFactory)
+OP_CHEF(SparseToDense, SparseToDenseChefFactory)
+OP_CHEF(Split, SplitChefFactory)
+OP_CHEF(SplitV, SplitVChefFactory)
OP_CHEF(Sqrt, SqrtChefFactory)
+OP_CHEF(Square, SquareChefFactory)
+OP_CHEF(SquaredDifference, SquaredDifferenceChefFactory)
+OP_CHEF(Squeeze, SqueezeChefFactory)
+OP_CHEF(StridedSlice, StridedSliceChefFactory)
OP_CHEF(Sub, SubChefFactory)
+OP_CHEF(Sum, SumChefFactory)
OP_CHEF(Tanh, TanhChefFactory)
+OP_CHEF(Tile, TileChefFactory)
+OP_CHEF(TopKV2, TopKV2ChefFactory)
OP_CHEF(Transpose, TransposeChefFactory)
+OP_CHEF(TransposeConv, TransposeConvChefFactory)
+OP_CHEF(Unique, UniqueChefFactory)
+OP_CHEF(Unpack, UnpackChefFactory)
+OP_CHEF(Where, WhereChefFactory)
+OP_CHEF(While, WhileChefFactory)
+OP_CHEF(ZerosLike, ZerosLikeChefFactory)
+
+// Custom Op
+OP_CHEF(AddV2, AddV2ChefFactory)
+OP_CHEF(All, AllChefFactory)
+OP_CHEF(BatchMatMulV2, BatchMatMulV2ChefFactory)
+OP_CHEF(MatMul, MatMulChefFactory)
+OP_CHEF(MatrixBandPart, MatrixBandPartChefFactory)
diff --git a/compiler/tflchef/core/src/OpChef.h b/compiler/tflchef/core/src/OpChef.h
index 0b7d9cf08..7efa096cc 100644
--- a/compiler/tflchef/core/src/OpChef.h
+++ b/compiler/tflchef/core/src/OpChef.h
@@ -29,6 +29,13 @@ struct OpChef
virtual tflite::BuiltinOperator code(void) const = 0;
virtual tflite::BuiltinOptions type(void) const = 0;
virtual flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const = 0;
+
+ // TODO Find a way to place this method in a better place
+ virtual flatbuffers::Offset<flatbuffers::Vector<uint8_t>>
+ custom_value(flatbuffers::FlatBufferBuilder &fbb) const
+ {
+ return flatbuffers::Offset<flatbuffers::Vector<uint8_t>>();
+ }
};
struct OpChefFactory
diff --git a/compiler/tflchef/core/src/OpChefs.h b/compiler/tflchef/core/src/OpChefs.h
index 65dbd4b92..55c37ebfb 100644
--- a/compiler/tflchef/core/src/OpChefs.h
+++ b/compiler/tflchef/core/src/OpChefs.h
@@ -19,34 +19,112 @@
#include "Op/Abs.h"
#include "Op/Add.h"
+#include "Op/AddN.h"
#include "Op/ArgMax.h"
+#include "Op/ArgMin.h"
#include "Op/AveragePool2D.h"
+#include "Op/BatchMatMul.h"
#include "Op/BatchToSpaceND.h"
+#include "Op/Cast.h"
+#include "Op/Ceil.h"
#include "Op/Concatenation.h"
#include "Op/Conv2D.h"
#include "Op/Cos.h"
+#include "Op/DepthToSpace.h"
#include "Op/DepthwiseConv2D.h"
#include "Op/Div.h"
+#include "Op/ELU.h"
#include "Op/Equal.h"
#include "Op/Exp.h"
+#include "Op/ExpandDims.h"
+#include "Op/Fill.h"
+#include "Op/Floor.h"
#include "Op/FloorDiv.h"
+#include "Op/FloorMod.h"
#include "Op/FullyConnected.h"
-#include "Op/LogicalOr.h"
+#include "Op/Gather.h"
+#include "Op/GatherNd.h"
+#include "Op/Greater.h"
+#include "Op/GreaterEqual.h"
+#include "Op/If.h"
+#include "Op/L2Normalize.h"
+#include "Op/L2Pool2D.h"
+#include "Op/LeakyRelu.h"
+#include "Op/Less.h"
+#include "Op/LessEqual.h"
+#include "Op/LocalResponseNormalization.h"
+#include "Op/Log.h"
+#include "Op/LogicalAnd.h"
#include "Op/LogicalNot.h"
+#include "Op/LogicalOr.h"
+#include "Op/Logistic.h"
+#include "Op/LogSoftmax.h"
+#include "Op/MatrixDiag.h"
+#include "Op/MatrixSetDiag.h"
+#include "Op/Maximum.h"
#include "Op/MaxPool2D.h"
#include "Op/Mean.h"
+#include "Op/Minimum.h"
+#include "Op/MirrorPad.h"
#include "Op/Mul.h"
+#include "Op/Neg.h"
+#include "Op/NotEqual.h"
+#include "Op/OneHot.h"
#include "Op/Pack.h"
#include "Op/Pad.h"
+#include "Op/Pow.h"
+#include "Op/PRelu.h"
+#include "Op/Range.h"
+#include "Op/Rank.h"
+#include "Op/ReduceAny.h"
+#include "Op/ReduceMax.h"
+#include "Op/ReduceMin.h"
+#include "Op/ReduceProd.h"
#include "Op/ReLU.h"
#include "Op/ReLU6.h"
+#include "Op/ReLUN1To1.h"
#include "Op/Reshape.h"
+#include "Op/ResizeBilinear.h"
+#include "Op/ResizeNearestNeighbor.h"
+#include "Op/ReverseSequence.h"
+#include "Op/ReverseV2.h"
+#include "Op/Round.h"
#include "Op/Rsqrt.h"
+#include "Op/ScatterNd.h"
+#include "Op/SegmentSum.h"
+#include "Op/Select.h"
+#include "Op/SelectV2.h"
#include "Op/Shape.h"
+#include "Op/Sin.h"
+#include "Op/Slice.h"
#include "Op/Softmax.h"
+#include "Op/SpaceToBatchND.h"
+#include "Op/SpaceToDepth.h"
+#include "Op/SparseToDense.h"
+#include "Op/Split.h"
+#include "Op/SplitV.h"
#include "Op/Sqrt.h"
+#include "Op/Square.h"
+#include "Op/SquaredDifference.h"
+#include "Op/Squeeze.h"
+#include "Op/StridedSlice.h"
#include "Op/Sub.h"
+#include "Op/Sum.h"
#include "Op/Tanh.h"
+#include "Op/Tile.h"
+#include "Op/TopKV2.h"
#include "Op/Transpose.h"
+#include "Op/TransposeConv.h"
+#include "Op/Unique.h"
+#include "Op/Unpack.h"
+#include "Op/Where.h"
+#include "Op/While.h"
+#include "Op/ZerosLike.h"
+
+#include "CustomOp/AddV2.h"
+#include "CustomOp/All.h"
+#include "CustomOp/BatchMatMulV2.h"
+#include "CustomOp/MatMul.h"
+#include "CustomOp/MatrixBandPart.h"
#endif // __OP_CHEFS_H__
diff --git a/compiler/tflchef/log/CMakeLists.txt b/compiler/tflchef/log/CMakeLists.txt
new file mode 100644
index 000000000..330459ec1
--- /dev/null
+++ b/compiler/tflchef/log/CMakeLists.txt
@@ -0,0 +1,7 @@
+# TODO Find how to test logging framework
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(tflchef_log STATIC ${SOURCES})
+target_include_directories(tflchef_log PUBLIC include)
+target_link_libraries(tflchef_log PUBLIC hermes)
+target_link_libraries(tflchef_log PRIVATE hermes_std)
diff --git a/compiler/tflchef/log/include/Log.h b/compiler/tflchef/log/include/Log.h
new file mode 100644
index 000000000..178fe31c4
--- /dev/null
+++ b/compiler/tflchef/log/include/Log.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLCHEF_LOG_H__
+#define __TFLCHEF_LOG_H__
+
+#include <hermes.h>
+
+namespace tflchef
+{
+
+/**
+ * @brief Logger Implementation
+ */
+class Logger final : public hermes::Source
+{
+public:
+ Logger(hermes::Context *ctx);
+ ~Logger();
+};
+
+/**
+ * @brief Logger Configuration
+ *
+ * Users are able to turn logging on/off via TFLCHEF_LOG environment variable.
+ */
+class LoggerConfig final : public hermes::Config
+{
+public:
+ LoggerConfig();
+
+public:
+ void configure(const hermes::Source *, hermes::Source::Setting &) const final;
+ void configure(const Logger *, hermes::Source::Setting &) const;
+
+private:
+ bool _enabled;
+};
+
+} // namespace tflchef
+
+#include "LoggingContext.h"
+
+/**
+ * HOW TO USE:
+ *
+ * LOGGER(l);
+ *
+ * INFO(l) << "Hello, World" << std::endl;
+ *
+ */
+#define LOGGER(name) ::tflchef::Logger name{::tflchef::LoggingContext::get()};
+
+// TODO Support FATAL, ERROR, WARN, and VERBOSE
+#define INFO(name) HERMES_INFO(name)
+
+// WARNING!
+//
+// THE CURRENT IMPLEMENTATION IS NOT THREAD SAFE.
+//
+
+#endif // __TFLCHEF_LOG_H__
diff --git a/compiler/tflchef/log/include/LoggingContext.h b/compiler/tflchef/log/include/LoggingContext.h
new file mode 100644
index 000000000..860099482
--- /dev/null
+++ b/compiler/tflchef/log/include/LoggingContext.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLCHEF_LOGGING_CONTEXT_H__
+#define __TFLCHEF_LOGGING_CONTEXT_H__
+
+#include <hermes.h>
+
+namespace tflchef
+{
+
+/**
+ * @brief Global logging context
+ */
+struct LoggingContext
+{
+ static hermes::Context *get(void);
+};
+
+} // namespace tflchef
+
+#endif // __TFLCHEF_LOGGING_CONTEXT_H__
diff --git a/compiler/tflchef/log/src/Log.cpp b/compiler/tflchef/log/src/Log.cpp
new file mode 100644
index 000000000..62c377745
--- /dev/null
+++ b/compiler/tflchef/log/src/Log.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Log.h"
+
+#include <cassert>
+#include <cstdlib>
+#include <iostream>
+
+// TODO Extract these lexical conversion routines as a library
+namespace
+{
+
+/**
+ * @brief Convert C-string as a value of type T
+ *
+ * safecast(s, v) returns v if s is nullptr.
+ */
+template <typename T> T safecast(const char *, const T &);
+
+template <> bool safecast<bool>(const char *s, const bool &value)
+{
+ return (s == nullptr) ? value : (std::stoi(s) != 0);
+}
+
+} // namespace
+
+//
+// Logger
+//
+namespace tflchef
+{
+
+Logger::Logger(hermes::Context *ctx) { activate(ctx->sources(), ctx->bus()); }
+Logger::~Logger() { deactivate(); }
+
+} // namespace tflchef
+
+//
+// LoggerConfig
+//
+namespace tflchef
+{
+
+LoggerConfig::LoggerConfig()
+{
+ // Turn on logging if TFLCHEF_LOG is set as non-zero value
+ _enabled = safecast<bool>(std::getenv("TFLCHEF_LOG"), false);
+}
+
+void LoggerConfig::configure(const hermes::Source *source, hermes::Source::Setting &setting) const
+{
+ // Let's ignore hermes::Sources if that is not a moco logger
+ if (auto logger = dynamic_cast<const Logger *>(source))
+ {
+ configure(logger, setting);
+ }
+}
+
+void LoggerConfig::configure(const Logger *, hermes::Source::Setting &setting) const
+{
+ if (_enabled)
+ {
+ // Enable all catagories
+ setting.accept_all();
+ }
+ else
+ {
+ // Disable all catagories
+ setting.reject_all();
+ }
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/log/src/LoggingContext.cpp b/compiler/tflchef/log/src/LoggingContext.cpp
new file mode 100644
index 000000000..0514dc38c
--- /dev/null
+++ b/compiler/tflchef/log/src/LoggingContext.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LoggingContext.h"
+#include "Log.h"
+
+#include <hermes/ConsoleReporter.h>
+
+#include <memory>
+
+namespace tflchef
+{
+
+hermes::Context *LoggingContext::get(void)
+{
+ static hermes::Context *ctx = nullptr;
+
+ if (ctx == nullptr)
+ {
+ ctx = new hermes::Context;
+ ctx->sinks()->append(std::make_unique<hermes::ConsoleReporter>());
+ ctx->config(std::make_unique<LoggerConfig>());
+ }
+
+ return ctx;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/proto/tflchef.proto b/compiler/tflchef/proto/tflchef.proto
index 486aa8a67..792503bc9 100644
--- a/compiler/tflchef/proto/tflchef.proto
+++ b/compiler/tflchef/proto/tflchef.proto
@@ -55,15 +55,24 @@ enum Padding {
enum Activation {
NONE = 0;
RELU = 1;
+ RELU_N1_TO_1 = 2;
RELU6 = 3;
}
+// This enum value corresponds to MirrorPadMode in TensorFlow Lite schema
+enum MirrorPadMode {
+ REFLECT = 0;
+ SYMMETRIC = 1;
+}
+
message Conv2DOptions
{
optional Padding padding = 1 [default = VALID];
optional int32 stride_w = 2 [default = 1];
optional int32 stride_h = 3 [default = 1];
optional Activation activation = 4 [default = NONE];
+ optional int32 dilation_w_factor = 5 [default = 1];
+ optional int32 dilation_h_factor = 6 [default = 1];
}
message Pool2DOptions {
@@ -91,6 +100,12 @@ message DepthwiseConv2DOptions
optional int32 stride_h = 3 [default = 1];
optional int32 depth_multiplier = 4 [default = 1];
optional Activation activation = 5 [default = NONE];
+ optional int32 dilation_w_factor = 6 [default = 1];
+ optional int32 dilation_h_factor = 7 [default = 1];
+}
+
+message ScatterNdOptions {
+ // None
}
message SubOptions {
@@ -105,6 +120,10 @@ message FloorDivOptions {
// None
}
+message FloorModOptions {
+ // None
+}
+
message FullyConnectedOptions {
optional Activation activation = 1 [default = NONE];
}
@@ -113,10 +132,18 @@ message AddOptions {
optional Activation activation = 1 [default = NONE];
}
+message AddNOptions {
+ // None
+}
+
message ArgMaxOptions {
optional TensorType output_type = 1 [default = INT64];
}
+message ArgMinOptions {
+ optional TensorType output_type = 1 [default = INT64];
+}
+
message PackOptions {
optional int32 values_count = 1;
optional int32 axis = 2 [default = 0];
@@ -126,6 +153,10 @@ message PadOptions {
// None
}
+message MirrorPadOptions {
+ optional MirrorPadMode mode = 1 [default = REFLECT];
+}
+
message SoftmaxOptions {
optional float beta = 1 [default = 0.0];
}
@@ -134,10 +165,22 @@ message MulOptions {
optional Activation activation = 1 [default = NONE];
}
+message NegOptions {
+ // None
+}
+
+message RangeOptions {
+ // None
+}
+
message ReducerOptions {
optional bool keep_dims = 1 [ default = false ];
}
+message SpaceToDepthOptions {
+ optional int32 block_size = 1;
+}
+
message LogicalOrOptions {
// None
}
@@ -174,14 +217,224 @@ message BatchToSpaceNDOptions {
// None
}
+message SpaceToBatchNDOptions {
+ // None
+}
+
+message StridedSliceOptions {
+ optional int32 begin_mask = 1;
+ optional int32 end_mask = 2;
+ optional int32 ellipsis_mask = 3;
+ optional int32 new_axis_mask = 4;
+ optional int32 shrink_axis_mask = 5;
+}
+
+message SliceOptions {
+ // None
+}
+
message ExpOptions {
// None
}
+message ExpandDimsOptions {
+ // None
+}
+
+message UnpackOptions {
+ optional int32 num = 1;
+ optional int32 axis = 2 [default = 0];
+}
+
+message GatherOptions {
+ optional int32 axis = 1 [default = 0];
+}
+
+message TileOptions {
+ // None
+}
+
+message BatchMatMulOptions {
+ optional bool adj_x = 1 [default = false];
+ optional bool adj_y = 2 [default = false];
+}
+
+message IfOptions {
+ optional int32 then_subgraph_index = 1;
+ optional int32 else_subgraph_index = 2;
+}
+
+message WhileOptions {
+ optional int32 cond_subgraph_index = 1;
+ optional int32 body_subgraph_index = 2;
+}
+
+message CastOptions {
+ optional TensorType in_data_type = 1 [default = FLOAT32];
+ optional TensorType out_data_type = 2 [default = FLOAT32];
+}
+
+message SquareOptions {
+ // None
+}
+
+message MaximumMinimumOptions {
+ //None
+}
+
+message GreaterEqualOptions {
+ // None
+}
+
+message SelectOptions {
+ // None
+}
+
+message SelectV2Options {
+ // None
+}
+
+message SplitOptions {
+ optional int32 num_splits = 1;
+}
+
+message SplitVOptions {
+ optional int32 num_splits = 1;
+}
+
+message SquaredDifferenceOptions {
+ // None
+}
+
+message FillOptions {
+ // None
+}
+
+message GreaterOptions {
+ // None
+}
+
+message L2NormOptions {
+ optional Activation activation = 1 [default = NONE];
+}
+
+message LessOptions {
+ // None
+}
+
+message LessEqualOptions {
+ // None
+}
+
+message LocalResponseNormalizationOptions {
+ optional int32 radius = 1 [default = 5];
+ optional float bias = 2 [default = 1.0];
+ optional float alpha = 3 [default = 1.0];
+ optional float beta = 4 [default = 0.5];
+}
+
+message MatMulOptions {
+ optional bool transpose_a = 1 [default = false];
+ optional bool transpose_b = 2 [default = false];
+}
+
+message SqueezeOptions {
+ repeated int32 squeeze_dim = 1;
+}
+
+message OneHotOptions {
+ optional int32 axis = 1 [default = -1];
+}
+
+message TopKV2Options {
+ // None
+}
+
+message LogSoftmaxOptions {
+ // None
+}
+
+message ZerosLikeOptions {
+ // None
+}
+
+message GatherNdOptions {
+ // None
+}
+
+message NotEqualOptions {
+ // None
+}
+
+message PowOptions {
+ // None
+}
+
+message LeakyReluOptions {
+ optional float alpha = 1 [default = 0.2];
+}
+
+message ResizeNearestNeighborOptions {
+ optional bool align_corners = 1 [default = false];
+}
+
+message ResizeBilinearOptions {
+ optional bool align_corners = 1 [default = false];
+ optional bool half_pixel_centers = 2 [default = false];
+}
+
+message DepthToSpaceOptions {
+ optional int32 block_size = 1;
+}
+
+message TransposeConvOptions {
+ optional Padding padding = 1 [default = VALID];
+ optional int32 stride_w = 2 [default = 1];
+ optional int32 stride_h = 3 [default = 1];
+}
+
+message ReverseSequenceOptions {
+ optional int32 seq_dim = 1 [default = 0];
+ optional int32 batch_dim = 2 [default = 0];
+}
+
+message RankOptions {
+ // NONE
+}
+
+message SegmentSumOptions {
+ // NONE
+}
+
+message UniqueOptions {
+ optional TensorType idx_out_type = 1 [default = INT32];
+}
+
+message WhereOptions {
+ // None
+}
+
+message SparseToDenseOptions {
+ optional bool validate_indices = 1 [default = true];
+}
+
+message ReverseV2Options {
+ // None
+}
+
+message MatrixDiagOptions {
+ // NONE
+}
+
+message MatrixSetDiagOptions {
+ // NONE
+}
+
message Operation {
optional string type = 1;
repeated string input = 2;
repeated string output = 3;
+ optional int32 version = 4 [default = 1];
optional Conv2DOptions conv2d_options = 100;
optional Pool2DOptions averagepool2d_options = 101;
@@ -210,6 +463,91 @@ message Operation {
optional FloorDivOptions floordiv_options = 124;
optional BatchToSpaceNDOptions batch_to_space_options = 125;
optional ExpOptions exp_options = 126;
+ optional UnpackOptions unpack_options = 127;
+ optional GatherOptions gather_options = 128;
+ optional BatchMatMulOptions batch_matmul_options = 129;
+ optional TileOptions tile_options = 130;
+ optional IfOptions if_options = 131;
+ optional WhileOptions while_options = 132;
+ optional SpaceToBatchNDOptions space_to_batch_nd_options = 133;
+ optional CastOptions cast_options = 134;
+ optional GreaterEqualOptions greaterequal_options = 135;
+ optional MaximumMinimumOptions maximum_options = 136;
+ optional StridedSliceOptions strided_slice_options = 137;
+ optional SquaredDifferenceOptions squared_difference_options = 138;
+ optional FillOptions fill_options = 139;
+ optional SelectOptions select_options = 140;
+ optional ReducerOptions reduce_prod_options = 141;
+ optional SplitOptions split_options = 142;
+ optional SplitVOptions split_v_options = 143;
+ optional ReducerOptions sum_options = 144;
+ optional GreaterOptions greater_options = 145;
+ optional SqueezeOptions squeeze_options = 146;
+ optional FloorModOptions floormod_options = 147;
+ optional OneHotOptions onehot_options = 148;
+ optional LessOptions less_options = 149;
+ optional ReducerOptions reduce_max_options = 150;
+ optional MaximumMinimumOptions minimum_options = 151;
+ optional ReducerOptions reduce_any_options = 152;
+ optional ZerosLikeOptions zeros_like_options = 153;
+ // ConcatEmbeddingsOptions 154
+ // LSHProjectionOptions 155
+ // SVDFOptions 156
+ // RNNOptions 157
+ optional L2NormOptions l2norm_options = 158;
+ optional LocalResponseNormalizationOptions local_response_normalization_options = 159;
+ // LSTMOptions 160
+ optional ResizeBilinearOptions resize_bilinear_options = 161;
+ // CallOptions 162
+ // SkipGramOptions 163
+ optional SpaceToDepthOptions space_to_depth_options = 164;
+ // EmbeddingLookupSparseOptions 165
+ // SequenceRNNOptions 166
+ optional TopKV2Options topk_v2_options = 167;
+ optional LogSoftmaxOptions log_softmax_options = 168;
+ // DequantizeOptions 169
+ optional NegOptions neg_options = 170;
+ // PadV2Options 171
+ optional LessEqualOptions lessequal_options = 172;
+ optional SliceOptions slice_options = 173;
+ optional TransposeConvOptions transpose_conv_options = 174;
+ optional SparseToDenseOptions sparse_to_dense_options = 175;
+ optional PowOptions pow_options = 176;
+ optional ArgMinOptions argmin_options = 177;
+ // FakeQuantOptions 178
+ // BidirectionalSequenceLSTMOptions 179
+ // BidirectionalSequenceRNNOptions 180
+ // UnidirectionalSequenceLSTMOptions 181
+ optional RangeOptions range_options = 182;
+ optional ResizeNearestNeighborOptions resize_nearest_neighbor_options = 183;
+ optional LeakyReluOptions leaky_relu_options = 184;
+ optional MirrorPadOptions mirrorpad_options = 185;
+ optional UniqueOptions unique_options = 186;
+ optional ReverseV2Options reversev2_options = 187;
+ // AddNOptions 188
+ optional GatherNdOptions gather_nd_options = 189;
+ optional WhereOptions where_options = 190;
+ optional RankOptions rank_options = 191;
+ optional ReverseSequenceOptions reverse_sequence_options = 192;
+ optional MatrixDiagOptions matrix_diag_options = 193;
+ // QuantizeOptions 194
+ optional MatrixSetDiagOptions matrix_set_diag_options = 195;
+ // HardSwishOptions 196
+ optional DepthToSpaceOptions depth_to_space_options = 197;
+ // NonMaxSuppressionV4Options 198
+ // NonMaxSuppressionV5Options 199
+ optional ScatterNdOptions scatter_nd_options = 200;
+ optional NotEqualOptions notequal_options = 201;
+ optional ExpandDimsOptions expand_dims_options = 202;
+ optional Pool2DOptions l2pool2d_options = 203;
+ optional ReducerOptions all_options = 204;
+ optional ReducerOptions reduce_min_options = 205;
+ optional SegmentSumOptions segment_sum_options = 206;
+ optional AddNOptions add_n_options = 207;
+ optional MatMulOptions matmul_options = 208;
+
+ // NOTE if there are more than two options with same type of Options
+ // use the number not listed in the above reserve list
}
// For additional subgraphs
diff --git a/compiler/tflchef/requires.cmake b/compiler/tflchef/requires.cmake
index 3c5bb197f..4c02174b5 100644
--- a/compiler/tflchef/requires.cmake
+++ b/compiler/tflchef/requires.cmake
@@ -1,4 +1,9 @@
+require("arser")
require("nnkit")
require("cwrap")
require("mio-tflite")
require("safemain")
+require("hermes")
+require("hermes-std")
+require("foder")
+require("souschef")
diff --git a/compiler/tflchef/tests/no_shape/test.recipe b/compiler/tflchef/tests/no_shape/test.recipe
new file mode 100644
index 000000000..38efef96a
--- /dev/null
+++ b/compiler/tflchef/tests/no_shape/test.recipe
@@ -0,0 +1,43 @@
+operand {
+ name: "indices"
+ type: INT32
+ shape { dim: 4 }
+}
+operand {
+ name: "depth"
+ type: INT32
+ # shape is intentionally omitted here
+ filler { tag: "explicit" arg: "1" }
+}
+operand {
+ name: "on_value"
+ type: INT32
+ # shape is intentionally omitted here
+ filler { tag: "explicit" arg: "1" }
+}
+operand {
+ name: "off_value"
+ type: INT32
+ # shape is intentionally omitted here
+ filler { tag: "explicit" arg: "0" }
+}
+operand {
+ name: "ofm"
+ type: INT32
+ shape { dim: 4 dim: 1 }
+}
+operation {
+ type: "OneHot"
+ onehot_options {
+ axis: -1
+ }
+ input: "indices"
+ input: "depth"
+ input: "on_value"
+ input: "off_value"
+ output: "ofm"
+}
+input: "indices"
+input: "on_value"
+input: "off_value"
+output: "ofm"
diff --git a/compiler/tflchef/tests/no_shape/test.reverse b/compiler/tflchef/tests/no_shape/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/tflchef/tests/no_shape/test.reverse
diff --git a/compiler/tflchef/tests/runvalidate.sh b/compiler/tflchef/tests/runvalidate.sh
index a1453b399..0dd9d16e0 100755
--- a/compiler/tflchef/tests/runvalidate.sh
+++ b/compiler/tflchef/tests/runvalidate.sh
@@ -1,13 +1,13 @@
#!/bin/bash
-if [[ $# -le 3 ]]; then
+if [[ $# -le 2 ]]; then
echo "USAGE: $0 [mio_tflite_validate path] [prefix 0] "
exit 255
fi
MIO_TFLITE_VALIDATE_PATH="$1"; shift
-echo "-- Found mio_tflite_validate: ${NNKIT_RUN_PATH}"
+echo "-- Found mio_tflite_validate: ${MIO_TFLITE_VALIDATE_PATH}"
TESTED=()
PASSED=()
diff --git a/compiler/tflchef/tflite/include/tflchef/RawModel.h b/compiler/tflchef/tflite/include/tflchef/RawModel.h
deleted file mode 100644
index a8c8fefb7..000000000
--- a/compiler/tflchef/tflite/include/tflchef/RawModel.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __RAW_MODEL_H__
-#define __RAW_MODEL_H__
-
-#include <mio/tflite/schema_generated.h>
-
-namespace tflchef
-{
-
-struct RawModel
-{
- virtual ~RawModel() = default;
-
- virtual const ::tflite::Model *model(void) const = 0;
-};
-
-/**
- * @brief Load TensorFlow Lite model (as a RawModel) from a given path
- *
- * @note May return a nullptr
- */
-std::unique_ptr<RawModel> load_tflite(const std::string &path);
-
-} // namespace tflchef
-
-#endif // __RAW_MODEL_H__
diff --git a/compiler/tflchef/tflite/src/Convert.cpp b/compiler/tflchef/tflite/src/Convert.cpp
index dc60e0087..3cc1c9238 100644
--- a/compiler/tflchef/tflite/src/Convert.cpp
+++ b/compiler/tflchef/tflite/src/Convert.cpp
@@ -51,10 +51,11 @@ tflchef::Activation as_tflchef_activation(const tflite::ActivationFunctionType t
return tflchef::NONE;
case tflite::ActivationFunctionType_RELU:
return tflchef::RELU;
+ case tflite::ActivationFunctionType_RELU_N1_TO_1:
+ return tflchef::RELU_N1_TO_1;
case tflite::ActivationFunctionType_RELU6:
return tflchef::RELU6;
// TODO handle other types
- // ActivationFunctionType_RELU_N1_TO_1
// ActivationFunctionType_TANH
// ActivationFunctionType_SIGN_BIT
default:
@@ -75,4 +76,17 @@ tflchef::Padding as_tflchef_padding(const tflite::Padding padding)
}
}
+tflchef::MirrorPadMode as_tflchef_mirrorpadmode(const tflite::MirrorPadMode mode)
+{
+ switch (mode)
+ {
+ case tflite::MirrorPadMode_REFLECT:
+ return tflchef::REFLECT;
+ case tflite::MirrorPadMode_SYMMETRIC:
+ return tflchef::SYMMETRIC;
+ default:
+ throw std::runtime_error{"Unknown mirrorpad mode"};
+ }
+}
+
} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Convert.h b/compiler/tflchef/tflite/src/Convert.h
index 8623e7b78..770bffa4d 100644
--- a/compiler/tflchef/tflite/src/Convert.h
+++ b/compiler/tflchef/tflite/src/Convert.h
@@ -27,12 +27,14 @@ namespace tflchef
tflchef::TensorType as_tflchef_type(const tflite::TensorType type);
tflchef::Activation as_tflchef_activation(const tflite::ActivationFunctionType type);
tflchef::Padding as_tflchef_padding(const tflite::Padding padding);
+tflchef::MirrorPadMode as_tflchef_mirrorpadmode(const tflite::MirrorPadMode mode);
/**
* @brief extract buffer data to std::vector<DT>
*/
template <typename DT> std::vector<DT> extract_buffer(const tflite::Buffer *buffer)
{
+ assert(buffer->data() != nullptr);
auto buffer_length = buffer->data()->size();
auto num_elements = buffer_length / sizeof(DT);
std::vector<DT> result(num_elements);
diff --git a/compiler/tflchef/tflite/src/FillerHelper.cpp b/compiler/tflchef/tflite/src/FillerHelper.cpp
new file mode 100644
index 000000000..cf96d2e8c
--- /dev/null
+++ b/compiler/tflchef/tflite/src/FillerHelper.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FillerHelper.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void fill_tensor_to_import(int32_t idx, TFliteImport *import)
+{
+ const tflite::Tensor *tensor = import->tensors()->Get(idx);
+ if (tensor != nullptr)
+ {
+ if (tensor->type() == tflite::TensorType::TensorType_INT32)
+ {
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(idx, vec);
+ }
+ }
+ else if (tensor->type() == tflite::TensorType::TensorType_FLOAT32)
+ {
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<float>(buffer);
+ import->set_tensor_filler(idx, vec);
+ }
+ }
+ }
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/FillerHelper.h b/compiler/tflchef/tflite/src/FillerHelper.h
new file mode 100644
index 000000000..053a5c18a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/FillerHelper.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FILLER_HELPER_H__
+#define __FILLER_HELPER_H__
+
+#include "TFliteImport.h"
+
+#include <mio/tflite/schema_generated.h>
+
+namespace tflchef
+{
+
+void fill_tensor_to_import(int32_t idx, TFliteImport *import);
+
+} // namespace tflchef
+
+#endif // __FILLER_HELPER_H__
diff --git a/compiler/tflchef/tflite/src/Op/Add.cpp b/compiler/tflchef/tflite/src/Op/Add.cpp
index 7e669ecc9..3e880a63b 100644
--- a/compiler/tflchef/tflite/src/Op/Add.cpp
+++ b/compiler/tflchef/tflite/src/Op/Add.cpp
@@ -17,6 +17,7 @@
#include "Add.h"
#include "Convert.h"
+#include "FillerHelper.h"
namespace tflchef
{
@@ -24,7 +25,13 @@ namespace tflchef
void TFliteOpAdd::filler(const tflite::Operator *op, TFliteImport *import,
tflchef::ModelRecipe *model_recipe) const
{
- // Nothing to do with filler
+ // Add may have constant input
+
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 2);
+
+ fill_tensor_to_import(inputs[0], import);
+ fill_tensor_to_import(inputs[1], import);
}
tflchef::Operation *TFliteOpAdd::build(const tflite::Operator *op, TFliteImport *import,
diff --git a/compiler/tflchef/tflite/src/Op/AddN.cpp b/compiler/tflchef/tflite/src/Op/AddN.cpp
new file mode 100644
index 000000000..aeb3803ab
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/AddN.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AddN.h"
+
+#include "Convert.h"
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpAddN::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // AddN may have constant input
+
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ for (uint32_t idx = 0; idx < inputs.size(); ++idx)
+ fill_tensor_to_import(inputs[idx], import);
+}
+
+tflchef::Operation *TFliteOpAddN::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("AddN");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/AddN.h b/compiler/tflchef/tflite/src/Op/AddN.h
new file mode 100644
index 000000000..4387aa06a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/AddN.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ADD_N_H__
+#define __TFLITE_OP_ADD_N_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for AddN
+ */
+class TFliteOpAddN : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ADD_N_H__
diff --git a/compiler/tflchef/tflite/src/Op/ArgMin.cpp b/compiler/tflchef/tflite/src/Op/ArgMin.cpp
new file mode 100644
index 000000000..faab0b830
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ArgMin.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ArgMin.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpArgMin::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input, argmin/dim
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *dim_tensor = import->tensors()->Get(inputs[1]);
+ assert(dim_tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(dim_tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpArgMin::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ArgMinOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ArgMin");
+
+ auto op_options = operation->mutable_argmin_options();
+
+ op_options->set_output_type(as_tflchef_type(op_params->output_type()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ArgMin.h b/compiler/tflchef/tflite/src/Op/ArgMin.h
new file mode 100644
index 000000000..83c643c1a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ArgMin.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ARGMIN_H__
+#define __TFLITE_OP_ARGMIN_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ArgMin
+ */
+class TFliteOpArgMin : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ARGMIN_H__
diff --git a/compiler/tflchef/tflite/src/Op/BatchMatMul.cpp b/compiler/tflchef/tflite/src/Op/BatchMatMul.cpp
new file mode 100644
index 000000000..598e58c94
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/BatchMatMul.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMul.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpBatchMatMul::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpBatchMatMul::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("BatchMatMul");
+
+ auto op_options = operation->mutable_batch_matmul_options();
+
+ auto op_params = op->builtin_options_as_BatchMatMulOptions();
+ assert(op_params != nullptr);
+
+ op_options->set_adj_x(op_params->adj_x());
+ op_options->set_adj_y(op_params->adj_y());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/BatchMatMul.h b/compiler/tflchef/tflite/src/Op/BatchMatMul.h
new file mode 100644
index 000000000..6eb4c6e68
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/BatchMatMul.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_BATCHMATMUL_H__
+#define __TFLITE_OP_BATCHMATMUL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for BATCH_MATMUL
+ */
+class TFliteOpBatchMatMul : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_BATCHMATMUL_H__
diff --git a/compiler/tflchef/tflite/src/Op/Cast.cpp b/compiler/tflchef/tflite/src/Op/Cast.cpp
new file mode 100644
index 000000000..393bb4b35
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Cast.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Cast.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpCast::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpCast::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_CastOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Cast");
+
+ auto op_options = operation->mutable_cast_options();
+
+ op_options->set_in_data_type(as_tflchef_type(op_params->in_data_type()));
+ op_options->set_out_data_type(as_tflchef_type(op_params->out_data_type()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Cast.h b/compiler/tflchef/tflite/src/Op/Cast.h
new file mode 100644
index 000000000..29c126c93
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Cast.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_CAST_H__
+#define __TFLITE_OP_CAST_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for CAST
+ */
+class TFliteOpCast : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_CAST_H__
diff --git a/compiler/tflchef/tflite/src/Op/Ceil.cpp b/compiler/tflchef/tflite/src/Op/Ceil.cpp
new file mode 100644
index 000000000..d3ef3adae
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Ceil.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Ceil.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpCeil::filler(const tflite::Operator *, TFliteImport *, tflchef::ModelRecipe *) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpCeil::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Ceil");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Ceil.h b/compiler/tflchef/tflite/src/Op/Ceil.h
new file mode 100644
index 000000000..44df20778
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Ceil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_CEIL_H__
+#define __TFLITE_OP_CEIL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for CEIL
+ */
+class TFliteOpCeil : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_CEIL_H__
diff --git a/compiler/tflchef/tflite/src/Op/Conv2D.cpp b/compiler/tflchef/tflite/src/Op/Conv2D.cpp
index 5d48ee24f..57abd4649 100644
--- a/compiler/tflchef/tflite/src/Op/Conv2D.cpp
+++ b/compiler/tflchef/tflite/src/Op/Conv2D.cpp
@@ -50,7 +50,8 @@ tflchef::Operation *TFliteOpConv2D::build(const tflite::Operator *op, TFliteImpo
op_options->set_stride_h(op_params->stride_h());
op_options->set_stride_w(op_params->stride_w());
op_options->set_padding(as_tflchef_padding(op_params->padding()));
- // TODO support dilation
+ op_options->set_dilation_w_factor(op_params->dilation_w_factor());
+ op_options->set_dilation_h_factor(op_params->dilation_h_factor());
return operation;
}
diff --git a/compiler/tflchef/tflite/src/Op/DepthToSpace.cpp b/compiler/tflchef/tflite/src/Op/DepthToSpace.cpp
new file mode 100644
index 000000000..1a0917e8e
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/DepthToSpace.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthToSpace.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpDepthToSpace::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpDepthToSpace::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("DepthToSpace");
+
+ auto op_params = op->builtin_options_as_DepthToSpaceOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_depth_to_space_options();
+
+ op_options->set_block_size(op_params->block_size());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/DepthToSpace.h b/compiler/tflchef/tflite/src/Op/DepthToSpace.h
new file mode 100644
index 000000000..b5852ac89
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/DepthToSpace.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_DEPTHTOSPACE_H__
+#define __TFLITE_OP_DEPTHTOSPACE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for DepthToSpace
+ */
+class TFliteOpDepthToSpace : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_DEPTHTOSPACE_H__
diff --git a/compiler/tflchef/tflite/src/Op/DepthwiseConv2D.cpp b/compiler/tflchef/tflite/src/Op/DepthwiseConv2D.cpp
index b19f9330f..5fed3353a 100644
--- a/compiler/tflchef/tflite/src/Op/DepthwiseConv2D.cpp
+++ b/compiler/tflchef/tflite/src/Op/DepthwiseConv2D.cpp
@@ -50,9 +50,8 @@ tflchef::Operation *TFliteOpDepthwiseConv2D::build(const tflite::Operator *op, T
op_options->set_stride_h(op_params->stride_h());
op_options->set_stride_w(op_params->stride_w());
op_options->set_depth_multiplier(op_params->depth_multiplier());
- // TODO support dilation
- // op_params->dilation_w_factor()
- // op_params->dilation_h_factor()
+ op_options->set_dilation_w_factor(op_params->dilation_w_factor());
+ op_options->set_dilation_h_factor(op_params->dilation_h_factor());
op_options->set_padding(as_tflchef_padding(op_params->padding()));
return operation;
diff --git a/compiler/tflchef/tflite/src/Op/ELU.cpp b/compiler/tflchef/tflite/src/Op/ELU.cpp
new file mode 100644
index 000000000..cb4b61d66
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ELU.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ELU.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpELU::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpELU::build(const tflite::Operator *, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ELU");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ELU.h b/compiler/tflchef/tflite/src/Op/ELU.h
new file mode 100644
index 000000000..490c9fde4
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ELU.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ELU_H__
+#define __TFLITE_OP_ELU_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ELU
+ */
+class TFliteOpELU : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ELU_H__
diff --git a/compiler/tflchef/tflite/src/Op/ExpandDims.cpp b/compiler/tflchef/tflite/src/Op/ExpandDims.cpp
new file mode 100644
index 000000000..e30e8dbcc
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ExpandDims.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExpandDims.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpExpandDims::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Fill for axis input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpExpandDims::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ExpandDims");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ExpandDims.h b/compiler/tflchef/tflite/src/Op/ExpandDims.h
new file mode 100644
index 000000000..e2f3e4e50
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ExpandDims.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_EXPAND_DIMS_H__
+#define __TFLITE_OP_EXPAND_DIMS_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ExpandDims
+ */
+class TFliteOpExpandDims : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_EXPAND_DIMS_H__
diff --git a/compiler/tflchef/tflite/src/Op/Fill.cpp b/compiler/tflchef/tflite/src/Op/Fill.cpp
new file mode 100644
index 000000000..08b695fd7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Fill.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Fill.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpFill::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *dims_tensor = import->tensors()->Get(inputs[0]);
+ assert(dims_tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(dims_tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[0], vec);
+}
+
+tflchef::Operation *TFliteOpFill::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("Fill");
+
+ // FillOptions are empty
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Fill.h b/compiler/tflchef/tflite/src/Op/Fill.h
new file mode 100644
index 000000000..4f46f628a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Fill.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_FILL_H__
+#define __TFLITE_OP_FILL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Fill
+ */
+class TFliteOpFill : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_FILL_H__
diff --git a/compiler/tflchef/tflite/src/Op/Floor.cpp b/compiler/tflchef/tflite/src/Op/Floor.cpp
new file mode 100644
index 000000000..373c69f71
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Floor.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Floor.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpFloor::filler(const tflite::Operator *, TFliteImport *, tflchef::ModelRecipe *) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpFloor::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Floor");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Floor.h b/compiler/tflchef/tflite/src/Op/Floor.h
new file mode 100644
index 000000000..f0f8ef38a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Floor.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_FLOOR_H__
+#define __TFLITE_OP_FLOOR_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for FLOOR
+ */
+class TFliteOpFloor : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_FLOOR_H__
diff --git a/compiler/tflchef/tflite/src/Op/FloorMod.cpp b/compiler/tflchef/tflite/src/Op/FloorMod.cpp
new file mode 100644
index 000000000..997d82664
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/FloorMod.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FloorMod.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpFloorMod::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpFloorMod::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("FloorMod");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/FloorMod.h b/compiler/tflchef/tflite/src/Op/FloorMod.h
new file mode 100644
index 000000000..f36dfe813
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/FloorMod.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_FLOOR_MOD_H__
+#define __TFLITE_OP_FLOOR_MOD_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for FLOOR_MOD
+ */
+class TFliteOpFloorMod : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_FLOOR_MOD_H__
diff --git a/compiler/tflchef/tflite/src/Op/Gather.cpp b/compiler/tflchef/tflite/src/Op/Gather.cpp
new file mode 100644
index 000000000..98da3ec43
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Gather.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gather.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpGather::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+ // But second input has filler for constant inputs
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpGather::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_GatherOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Gather");
+
+ auto op_options = operation->mutable_gather_options();
+
+ op_options->set_axis(op_params->axis());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Gather.h b/compiler/tflchef/tflite/src/Op/Gather.h
new file mode 100644
index 000000000..e01276b76
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Gather.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_GATHER_H__
+#define __TFLITE_OP_GATHER_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Gather
+ */
+class TFliteOpGather : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_GATHER_H__
diff --git a/compiler/tflchef/tflite/src/Op/GatherNd.cpp b/compiler/tflchef/tflite/src/Op/GatherNd.cpp
new file mode 100644
index 000000000..0ff5a0b7f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/GatherNd.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GatherNd.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpGatherNd::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // indices buffer has filler
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpGatherNd::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("GatherNd");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/GatherNd.h b/compiler/tflchef/tflite/src/Op/GatherNd.h
new file mode 100644
index 000000000..112f23d33
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/GatherNd.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_GATHER_ND_H__
+#define __TFLITE_OP_GATHER_ND_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for GatherNd
+ */
+class TFliteOpGatherNd : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_GATHER_ND_H__
diff --git a/compiler/tflchef/tflite/src/Op/Greater.cpp b/compiler/tflchef/tflite/src/Op/Greater.cpp
new file mode 100644
index 000000000..4e41efb2d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Greater.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Greater.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpGreater::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpGreater::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Greater");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Greater.h b/compiler/tflchef/tflite/src/Op/Greater.h
new file mode 100644
index 000000000..3ab2d1a4e
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Greater.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_GREATER_H__
+#define __TFLITE_OP_GREATER_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Greater
+ */
+class TFliteOpGreater : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_GREATER_H__
diff --git a/compiler/tflchef/tflite/src/Op/GreaterEqual.cpp b/compiler/tflchef/tflite/src/Op/GreaterEqual.cpp
new file mode 100644
index 000000000..aead30e57
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/GreaterEqual.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GreaterEqual.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpGreaterEqual::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpGreaterEqual::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("GreaterEqual");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/GreaterEqual.h b/compiler/tflchef/tflite/src/Op/GreaterEqual.h
new file mode 100644
index 000000000..96b0af78a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/GreaterEqual.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_GREATEREQUAL_H__
+#define __TFLITE_OP_GREATEREQUAL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Greater Equal
+ */
+class TFliteOpGreaterEqual : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_GREATEREQUAL_H__
diff --git a/compiler/tflchef/tflite/src/Op/L2Normalize.cpp b/compiler/tflchef/tflite/src/Op/L2Normalize.cpp
new file mode 100644
index 000000000..0a8908472
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/L2Normalize.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Normalize.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpL2Normalize::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpL2Normalize::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_L2NormOptions();
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("L2Normalize");
+
+ auto op_options = operation->mutable_l2norm_options();
+
+ op_options->set_activation(as_tflchef_activation(op_params->fused_activation_function()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/L2Normalize.h b/compiler/tflchef/tflite/src/Op/L2Normalize.h
new file mode 100644
index 000000000..a73eae6c8
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/L2Normalize.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_L2NORMALIZE_H__
+#define __TFLITE_OP_L2NORMALIZE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for L2Normalize
+ */
+class TFliteOpL2Normalize : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_L2NORMALIZE_H__
diff --git a/compiler/tflchef/tflite/src/Op/L2Pool2D.cpp b/compiler/tflchef/tflite/src/Op/L2Pool2D.cpp
new file mode 100644
index 000000000..8db4b02b6
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/L2Pool2D.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Pool2D.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpL2Pool2D::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpL2Pool2D::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_Pool2DOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("L2Pool2D");
+
+ auto op_options = operation->mutable_l2pool2d_options();
+
+ op_options->set_padding(as_tflchef_padding(op_params->padding()));
+ op_options->set_stride_h(op_params->stride_h());
+ op_options->set_stride_w(op_params->stride_w());
+ op_options->set_filter_height(op_params->filter_height());
+ op_options->set_filter_width(op_params->filter_width());
+ op_options->set_activation(as_tflchef_activation(op_params->fused_activation_function()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/L2Pool2D.h b/compiler/tflchef/tflite/src/Op/L2Pool2D.h
new file mode 100644
index 000000000..046353440
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/L2Pool2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_L2_POOL2D_H__
+#define __TFLITE_OP_L2_POOL2D_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for L2_POOL_2D
+ */
+class TFliteOpL2Pool2D : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_L2_POOL2D_H__
diff --git a/compiler/tflchef/tflite/src/Op/LeakyRelu.cpp b/compiler/tflchef/tflite/src/Op/LeakyRelu.cpp
new file mode 100644
index 000000000..bf9cb2fb3
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LeakyRelu.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LeakyRelu.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLeakyRelu::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLeakyRelu::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_LeakyReluOptions();
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("LeakyRelu");
+
+ auto *op_options = operation->mutable_leaky_relu_options();
+
+ op_options->set_alpha(op_params->alpha());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/LeakyRelu.h b/compiler/tflchef/tflite/src/Op/LeakyRelu.h
new file mode 100644
index 000000000..28e63e0ca
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LeakyRelu.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LEAKY_RELU_H__
+#define __TFLITE_OP_LEAKY_RELU_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LeakyReLU
+ */
+class TFliteOpLeakyRelu : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LEAKY_RELU_H__
diff --git a/compiler/tflchef/tflite/src/Op/Less.cpp b/compiler/tflchef/tflite/src/Op/Less.cpp
new file mode 100644
index 000000000..0360317c7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Less.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Less.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLess::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLess::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Less");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Less.h b/compiler/tflchef/tflite/src/Op/Less.h
new file mode 100644
index 000000000..1316cb613
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Less.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LESS_H__
+#define __TFLITE_OP_LESS_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Less
+ */
+class TFliteOpLess : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LESS_H__
diff --git a/compiler/tflchef/tflite/src/Op/LessEqual.cpp b/compiler/tflchef/tflite/src/Op/LessEqual.cpp
new file mode 100644
index 000000000..b8c42e80d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LessEqual.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LessEqual.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLessEqual::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLessEqual::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("LessEqual");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/LessEqual.h b/compiler/tflchef/tflite/src/Op/LessEqual.h
new file mode 100644
index 000000000..81c710fbc
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LessEqual.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LESSEQUAL_H__
+#define __TFLITE_OP_LESSEQUAL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LessEqual
+ */
+class TFliteOpLessEqual : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LESSEQUAL_H__
diff --git a/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.cpp b/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.cpp
new file mode 100644
index 000000000..8bebd9e90
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LocalResponseNormalization.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLocalResponseNormalization::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *
+TFliteOpLocalResponseNormalization::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_LocalResponseNormalizationOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("LocalResponseNormalization");
+
+ auto op_options = operation->mutable_local_response_normalization_options();
+
+ op_options->set_radius(op_params->radius());
+ op_options->set_bias(op_params->bias());
+ op_options->set_alpha(op_params->alpha());
+ op_options->set_beta(op_params->beta());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.h b/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.h
new file mode 100644
index 000000000..c0eb3f2b1
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LocalResponseNormalization.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LOCAL_RESPONSE_NORMALIZATION_H__
+#define __TFLITE_OP_LOCAL_RESPONSE_NORMALIZATION_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LocalResponseNormalization
+ */
+class TFliteOpLocalResponseNormalization : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LOCAL_RESPONSE_NORMALIZATION_H__
diff --git a/compiler/tflchef/tflite/src/Op/Log.cpp b/compiler/tflchef/tflite/src/Op/Log.cpp
new file mode 100644
index 000000000..a68dc9a31
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Log.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Log.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLog::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLog::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Log");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Log.h b/compiler/tflchef/tflite/src/Op/Log.h
new file mode 100644
index 000000000..9d17e2f81
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Log.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LOG_H__
+#define __TFLITE_OP_LOG_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Log
+ */
+class TFliteOpLog : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LOG_H__
diff --git a/compiler/tflchef/tflite/src/Op/LogSoftmax.cpp b/compiler/tflchef/tflite/src/Op/LogSoftmax.cpp
new file mode 100644
index 000000000..8f0e1a9f9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LogSoftmax.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogSoftmax.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLogSoftmax::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLogSoftmax::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("LogSoftmax");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/LogSoftmax.h b/compiler/tflchef/tflite/src/Op/LogSoftmax.h
new file mode 100644
index 000000000..efd81f3e9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LogSoftmax.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LOG_SOFTMAX_H__
+#define __TFLITE_OP_LOG_SOFTMAX_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LogSoftmax
+ */
+class TFliteOpLogSoftmax : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LOG_SOFTMAX_H__
diff --git a/compiler/tflchef/tflite/src/Op/LogicalAnd.cpp b/compiler/tflchef/tflite/src/Op/LogicalAnd.cpp
new file mode 100644
index 000000000..2cc486426
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LogicalAnd.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogicalAnd.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLogicalAnd::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLogicalAnd::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("LogicalAnd");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/LogicalAnd.h b/compiler/tflchef/tflite/src/Op/LogicalAnd.h
new file mode 100644
index 000000000..1f7a964b9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/LogicalAnd.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LOGICALAND_H__
+#define __TFLITE_OP_LOGICALAND_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LogicalAnd
+ */
+class TFliteOpLogicalAnd : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LOGICALAND_H__
diff --git a/compiler/tflchef/tflite/src/Op/Logistic.cpp b/compiler/tflchef/tflite/src/Op/Logistic.cpp
new file mode 100644
index 000000000..18b3b5c00
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Logistic.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Logistic.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpLogistic::filler(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpLogistic::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Logistic");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Logistic.h b/compiler/tflchef/tflite/src/Op/Logistic.h
new file mode 100644
index 000000000..a75bf490e
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Logistic.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_LOGISTIC_H__
+#define __TFLITE_OP_LOGISTIC_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for LOGISTIC
+ */
+class TFliteOpLogistic : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_LOGISTIC_H__
diff --git a/compiler/tflchef/tflite/src/Op/MatrixDiag.cpp b/compiler/tflchef/tflite/src/Op/MatrixDiag.cpp
new file mode 100644
index 000000000..ca84c4949
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MatrixDiag.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixDiag.h"
+
+namespace tflchef
+{
+
+void TFliteOpMatrixDiag::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpMatrixDiag::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("MatrixDiag");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/MatrixDiag.h b/compiler/tflchef/tflite/src/Op/MatrixDiag.h
new file mode 100644
index 000000000..4074f2c36
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MatrixDiag.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MATRIX_DIAG_H__
+#define __TFLITE_OP_MATRIX_DIAG_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for MatrixDiag
+ */
+class TFliteOpMatrixDiag : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MATRIX_DIAG_H__
diff --git a/compiler/tflchef/tflite/src/Op/MatrixSetDiag.cpp b/compiler/tflchef/tflite/src/Op/MatrixSetDiag.cpp
new file mode 100644
index 000000000..97c7de41f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MatrixSetDiag.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixSetDiag.h"
+
+namespace tflchef
+{
+
+void TFliteOpMatrixSetDiag::filler(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpMatrixSetDiag::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("MatrixSetDiag");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/MatrixSetDiag.h b/compiler/tflchef/tflite/src/Op/MatrixSetDiag.h
new file mode 100644
index 000000000..0e7ec7f32
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MatrixSetDiag.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MATRIX_SET_DIAG_H__
+#define __TFLITE_OP_MATRIX_SET_DIAG_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for MatrixSetDiag
+ */
+class TFliteOpMatrixSetDiag : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MATRIX_SET_DIAG_H__
diff --git a/compiler/tflchef/tflite/src/Op/Maximum.cpp b/compiler/tflchef/tflite/src/Op/Maximum.cpp
new file mode 100644
index 000000000..fb977b6ed
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Maximum.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Maximum.h"
+
+namespace tflchef
+{
+
+void TFliteOpMaximum::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpMaximum::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Maximum");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Maximum.h b/compiler/tflchef/tflite/src/Op/Maximum.h
new file mode 100644
index 000000000..acafec343
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Maximum.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MAXIMUM_H__
+#define __TFLITE_OP_MAXIMUM_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for maximum
+ */
+class TFliteOpMaximum : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MAXIMUM_H__
diff --git a/compiler/tflchef/tflite/src/Op/Minimum.cpp b/compiler/tflchef/tflite/src/Op/Minimum.cpp
new file mode 100644
index 000000000..2bb50cb89
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Minimum.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Minimum.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpMinimum::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpMinimum::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Minimum");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Minimum.h b/compiler/tflchef/tflite/src/Op/Minimum.h
new file mode 100644
index 000000000..5db5b7940
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Minimum.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MINIMUM_H__
+#define __TFLITE_OP_MINIMUM_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for minimum
+ */
+class TFliteOpMinimum : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MINIMUM_H__
diff --git a/compiler/tflchef/tflite/src/Op/MirrorPad.cpp b/compiler/tflchef/tflite/src/Op/MirrorPad.cpp
new file mode 100644
index 000000000..c688552ee
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MirrorPad.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MirrorPad.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpMirrorPad::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpMirrorPad::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("MirrorPad");
+
+ auto op_options = operation->mutable_mirrorpad_options();
+
+ auto op_params = op->builtin_options_as_MirrorPadOptions();
+ assert(op_params != nullptr);
+
+ op_options->set_mode(as_tflchef_mirrorpadmode(op_params->mode()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/MirrorPad.h b/compiler/tflchef/tflite/src/Op/MirrorPad.h
new file mode 100644
index 000000000..c9acdd498
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/MirrorPad.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MIRROR_PAD_H__
+#define __TFLITE_OP_MIRROR_PAD_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for MIRROR_PAD
+ */
+class TFliteOpMirrorPad : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MIRROR_PAD_H__
diff --git a/compiler/tflchef/tflite/src/Op/Mul.cpp b/compiler/tflchef/tflite/src/Op/Mul.cpp
new file mode 100644
index 000000000..9faa4acaf
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Mul.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Mul.h"
+
+#include "Convert.h"
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpMul::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Mul may have constant input
+
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 2);
+
+ fill_tensor_to_import(inputs[0], import);
+ fill_tensor_to_import(inputs[1], import);
+}
+
+tflchef::Operation *TFliteOpMul::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Mul");
+
+ auto op_params = op->builtin_options_as_MulOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_mul_options();
+ op_options->set_activation(as_tflchef_activation(op_params->fused_activation_function()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Mul.h b/compiler/tflchef/tflite/src/Op/Mul.h
new file mode 100644
index 000000000..fd009d2fd
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Mul.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_MUL_H__
+#define __TFLITE_OP_MUL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for MUL
+ */
+class TFliteOpMul : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_MUL_H__
diff --git a/compiler/tflchef/tflite/src/Op/Neg.cpp b/compiler/tflchef/tflite/src/Op/Neg.cpp
new file mode 100644
index 000000000..c691390a3
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Neg.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Neg.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpNeg::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpNeg::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Neg");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Neg.h b/compiler/tflchef/tflite/src/Op/Neg.h
new file mode 100644
index 000000000..c77ab7e84
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Neg.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_NEG_H__
+#define __TFLITE_OP_NEG_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for abs
+ */
+class TFliteOpNeg : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_NEG_H__
diff --git a/compiler/tflchef/tflite/src/Op/NotEqual.cpp b/compiler/tflchef/tflite/src/Op/NotEqual.cpp
new file mode 100644
index 000000000..c2275db06
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/NotEqual.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NotEqual.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpNotEqual::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpNotEqual::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("NotEqual");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/NotEqual.h b/compiler/tflchef/tflite/src/Op/NotEqual.h
new file mode 100644
index 000000000..b1febdcc5
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/NotEqual.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_NOTEQUAL_H__
+#define __TFLITE_OP_NOTEQUAL_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Not Equal
+ */
+class TFliteOpNotEqual : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_NOTEQUAL_H__
diff --git a/compiler/tflchef/tflite/src/Op/OneHot.cpp b/compiler/tflchef/tflite/src/Op/OneHot.cpp
new file mode 100644
index 000000000..f26ed3e7f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/OneHot.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OneHot.h"
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpOneHot::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // only depth(second input) has constant on recipe cause depth value is used in shape inference.
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+ }
+
+ // on/off can be dtype of input/output. let's support INT32/FLOAT32 for now
+ for (int32_t index = 2; index <= 3; ++index)
+ {
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[index]);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ if (buffer && buffer->data())
+ {
+ switch (tensor->type())
+ {
+ case tflite::TensorType::TensorType_INT32:
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ break;
+ }
+
+ case tflite::TensorType::TensorType_FLOAT32:
+ {
+ auto vec = extract_buffer<float>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ break;
+ }
+
+ default:
+ assert(false);
+ break;
+ }
+ }
+ }
+}
+
+tflchef::Operation *TFliteOpOneHot::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_OneHotOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("OneHot");
+
+ auto op_options = operation->mutable_onehot_options();
+
+ op_options->set_axis(op_params->axis());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/OneHot.h b/compiler/tflchef/tflite/src/Op/OneHot.h
new file mode 100644
index 000000000..50bbed095
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/OneHot.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ONEHOT_H__
+#define __TFLITE_OP_ONEHOT_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for OneHot
+ */
+class TFliteOpOneHot : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ONEHOT_H__
diff --git a/compiler/tflchef/tflite/src/Op/PRelu.cpp b/compiler/tflchef/tflite/src/Op/PRelu.cpp
new file mode 100644
index 000000000..8a5e83a84
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/PRelu.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PRelu.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpPRelu::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+}
+
+tflchef::Operation *TFliteOpPRelu::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("PRelu");
+
+ // PReluOptions are empty
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/PRelu.h b/compiler/tflchef/tflite/src/Op/PRelu.h
new file mode 100644
index 000000000..b35c6e7ce
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/PRelu.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_PRELU_H__
+#define __TFLITE_OP_PRELU_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for PRelu
+ */
+class TFliteOpPRelu : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_PRELU_H__
diff --git a/compiler/tflchef/tflite/src/Op/Pow.cpp b/compiler/tflchef/tflite/src/Op/Pow.cpp
new file mode 100644
index 000000000..fe8e8ac0f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Pow.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pow.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpPow::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+}
+
+tflchef::Operation *TFliteOpPow::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("Pow");
+
+ // PowOptions are empty
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Pow.h b/compiler/tflchef/tflite/src/Op/Pow.h
new file mode 100644
index 000000000..20e847377
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Pow.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_POW_H__
+#define __TFLITE_OP_POW_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Pow
+ */
+class TFliteOpPow : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_POW_H__
diff --git a/compiler/tflchef/tflite/src/Op/Range.cpp b/compiler/tflchef/tflite/src/Op/Range.cpp
new file mode 100644
index 000000000..2958b9c41
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Range.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Range.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpRange::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for all inputs
+ const auto &inputs = *op->inputs();
+
+ for (int index = 0; index < 3; ++index)
+ {
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[index]);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ if (tensor->type() == tflite::TensorType::TensorType_INT32)
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ }
+ else if (tensor->type() == tflite::TensorType::TensorType_FLOAT32)
+ {
+ auto vec = extract_buffer<float>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ }
+ else
+ {
+ assert(false && "Invalid tensor type");
+ }
+ }
+}
+
+tflchef::Operation *TFliteOpRange::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Range");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Range.h b/compiler/tflchef/tflite/src/Op/Range.h
new file mode 100644
index 000000000..ad10dc58b
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Range.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_RANGE_H__
+#define __TFLITE_OP_RANGE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for abs
+ */
+class TFliteOpRange : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_RANGE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Rank.cpp b/compiler/tflchef/tflite/src/Op/Rank.cpp
new file mode 100644
index 000000000..184c8e482
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Rank.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Rank.h"
+
+namespace tflchef
+{
+
+void TFliteOpRank::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpRank::build(const tflite::Operator *, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Rank");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Rank.h b/compiler/tflchef/tflite/src/Op/Rank.h
new file mode 100644
index 000000000..003d9d310
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Rank.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_RANK_H__
+#define __TFLITE_OP_RANK_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for rank
+ */
+class TFliteOpRank : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_RANK_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReLUN1To1.cpp b/compiler/tflchef/tflite/src/Op/ReLUN1To1.cpp
new file mode 100644
index 000000000..4cc8dbd2b
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReLUN1To1.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLUN1To1.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpReLUN1To1::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpReLUN1To1::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ReLUN1To1");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReLUN1To1.h b/compiler/tflchef/tflite/src/Op/ReLUN1To1.h
new file mode 100644
index 000000000..0767006af
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReLUN1To1.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_RELU_N1_TO_1_H__
+#define __TFLITE_OP_RELU_N1_TO_1_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for RELU_N1_TO_1
+ */
+class TFliteOpReLUN1To1 : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_RELU_N1_TO_1_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReduceAny.cpp b/compiler/tflchef/tflite/src/Op/ReduceAny.cpp
new file mode 100644
index 000000000..e0cc503c4
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceAny.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceAny.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpReduceAny::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpReduceAny::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("ReduceAny");
+
+ auto op_params = op->builtin_options_as_ReducerOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_reduce_any_options();
+ op_options->set_keep_dims(op_params->keep_dims());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReduceAny.h b/compiler/tflchef/tflite/src/Op/ReduceAny.h
new file mode 100644
index 000000000..dd5e361d5
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceAny.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REDUCE_ANY_H__
+#define __TFLITE_OP_REDUCE_ANY_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for REDUCE_ANY
+ */
+class TFliteOpReduceAny : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REDUCE_ANY_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReduceMax.cpp b/compiler/tflchef/tflite/src/Op/ReduceMax.cpp
new file mode 100644
index 000000000..499f58566
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceMax.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceMax.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpReduceMax::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpReduceMax::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ReducerOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ReduceMax");
+
+ auto op_options = operation->mutable_reduce_max_options();
+
+ op_options->set_keep_dims(op_params->keep_dims());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReduceMax.h b/compiler/tflchef/tflite/src/Op/ReduceMax.h
new file mode 100644
index 000000000..8e65cf47c
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceMax.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REDUCEMAX_H__
+#define __TFLITE_OP_REDUCEMAX_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Reduce Max
+ */
+class TFliteOpReduceMax : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REDUCEMAX_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReduceMin.cpp b/compiler/tflchef/tflite/src/Op/ReduceMin.cpp
new file mode 100644
index 000000000..09e2e134c
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceMin.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceMin.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpReduceMin::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpReduceMin::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ReducerOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ReduceMin");
+
+ auto op_options = operation->mutable_reduce_min_options();
+
+ op_options->set_keep_dims(op_params->keep_dims());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReduceMin.h b/compiler/tflchef/tflite/src/Op/ReduceMin.h
new file mode 100644
index 000000000..88cba6fe7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceMin.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REDUCEMIN_H__
+#define __TFLITE_OP_REDUCEMIN_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Reduce Min
+ */
+class TFliteOpReduceMin : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REDUCEMIN_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReduceProd.cpp b/compiler/tflchef/tflite/src/Op/ReduceProd.cpp
new file mode 100644
index 000000000..e2d98970d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceProd.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceProd.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpReduceProd::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpReduceProd::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("ReduceProd");
+
+ auto op_params = op->builtin_options_as_ReducerOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_reduce_prod_options();
+ op_options->set_keep_dims(op_params->keep_dims());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReduceProd.h b/compiler/tflchef/tflite/src/Op/ReduceProd.h
new file mode 100644
index 000000000..e7766840a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReduceProd.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REDUCE_PROD_H__
+#define __TFLITE_OP_REDUCE_PROD_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for REDUCE_PROD
+ */
+class TFliteOpReduceProd : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REDUCE_PROD_H__
diff --git a/compiler/tflchef/tflite/src/Op/Reshape.cpp b/compiler/tflchef/tflite/src/Op/Reshape.cpp
index 663ab3ec3..0094d5df5 100644
--- a/compiler/tflchef/tflite/src/Op/Reshape.cpp
+++ b/compiler/tflchef/tflite/src/Op/Reshape.cpp
@@ -17,6 +17,7 @@
#include "Reshape.h"
#include "Convert.h"
+#include "FillerHelper.h"
namespace tflchef
{
@@ -27,33 +28,29 @@ void TFliteOpReshape::filler(const tflite::Operator *op, TFliteImport *import,
const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
bool hasShape = (inputs.size() == 2);
- assert(inputs.size() == 1 || hasShape);
-
if (hasShape)
{
- auto op_params = op->builtin_options_as_ReshapeOptions();
- std::vector<int32_t> new_shape = as_index_vector(op_params->new_shape());
- import->set_tensor_filler(inputs.at(1), new_shape);
+ fill_tensor_to_import(inputs[1], import);
}
}
tflchef::Operation *TFliteOpReshape::build(const tflite::Operator *op, TFliteImport *import,
tflchef::ModelRecipe *model_recipe) const
{
- auto op_params = op->builtin_options_as_ReshapeOptions();
- assert(op_params != nullptr);
-
auto operation = model_recipe->add_operation();
operation->set_type("Reshape");
- auto op_options = operation->mutable_reshape_options();
-
- std::vector<int32_t> new_shape = as_index_vector(op_params->new_shape());
-
- for (auto shape : new_shape)
+ auto op_params = op->builtin_options_as_ReshapeOptions();
+ if (op_params != nullptr)
{
- op_options->add_new_shape(shape);
+ auto op_options = operation->mutable_reshape_options();
+
+ std::vector<int32_t> new_shape = as_index_vector(op_params->new_shape());
+ for (auto shape : new_shape)
+ {
+ op_options->add_new_shape(shape);
+ }
}
return operation;
diff --git a/compiler/tflchef/tflite/src/Op/ResizeBilinear.cpp b/compiler/tflchef/tflite/src/Op/ResizeBilinear.cpp
new file mode 100644
index 000000000..0f6db1fcb
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ResizeBilinear.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeBilinear.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpResizeBilinear::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // size buffer has filler
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpResizeBilinear::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ResizeBilinearOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ResizeBilinear");
+
+ auto op_options = operation->mutable_resize_bilinear_options();
+
+ op_options->set_align_corners(op_params->align_corners());
+ op_options->set_half_pixel_centers(op_params->half_pixel_centers());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ResizeBilinear.h b/compiler/tflchef/tflite/src/Op/ResizeBilinear.h
new file mode 100644
index 000000000..98c49c534
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ResizeBilinear.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_RESIZE_BILINEAR_H__
+#define __TFLITE_OP_RESIZE_BILINEAR_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ResizeBilinear
+ */
+class TFliteOpResizeBilinear : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_RESIZE_BILINEAR_H__
diff --git a/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.cpp b/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.cpp
new file mode 100644
index 000000000..f3dd8fed0
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeNearestNeighbor.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpResizeNearestNeighbor::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // size buffer has filler
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpResizeNearestNeighbor::build(const tflite::Operator *op,
+ TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ResizeNearestNeighborOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ResizeNearestNeighbor");
+
+ auto op_options = operation->mutable_resize_nearest_neighbor_options();
+
+ op_options->set_align_corners(op_params->align_corners());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.h b/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.h
new file mode 100644
index 000000000..5090bb938
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ResizeNearestNeighbor.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_RESIZE_NEAREST_NEIGHBOR_H__
+#define __TFLITE_OP_RESIZE_NEAREST_NEIGHBOR_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ResizeNearestNeighbor
+ */
+class TFliteOpResizeNearestNeighbor : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_RESIZE_NEAREST_NEIGHBOR_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReverseSequence.cpp b/compiler/tflchef/tflite/src/Op/ReverseSequence.cpp
new file mode 100644
index 000000000..6ef6c2326
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReverseSequence.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseSequence.h"
+
+#include "Convert.h"
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpReverseSequence::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 2);
+
+ fill_tensor_to_import(inputs[1], import);
+}
+
+tflchef::Operation *TFliteOpReverseSequence::build(const tflite::Operator *op, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ReverseSequence");
+
+ auto op_params = op->builtin_options_as_ReverseSequenceOptions();
+
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_reverse_sequence_options();
+
+ op_options->set_seq_dim(op_params->seq_dim());
+ op_options->set_batch_dim(op_params->batch_dim());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReverseSequence.h b/compiler/tflchef/tflite/src/Op/ReverseSequence.h
new file mode 100644
index 000000000..8c8c811e4
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReverseSequence.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REVERSE_SEQUENCE_H__
+#define __TFLITE_OP_REVERSE_SEQUENCE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ReverseSequence
+ */
+class TFliteOpReverseSequence : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REVERSE_SEQUENCE_H__
diff --git a/compiler/tflchef/tflite/src/Op/ReverseV2.cpp b/compiler/tflchef/tflite/src/Op/ReverseV2.cpp
new file mode 100644
index 000000000..c59d97574
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReverseV2.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseV2.h"
+
+#include "Convert.h"
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpReverseV2::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 2);
+
+ fill_tensor_to_import(inputs[1], import);
+}
+
+tflchef::Operation *TFliteOpReverseV2::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ReverseV2");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ReverseV2.h b/compiler/tflchef/tflite/src/Op/ReverseV2.h
new file mode 100644
index 000000000..6a8a75e6b
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ReverseV2.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_REVERSEV2_H__
+#define __TFLITE_OP_REVERSEV2_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ReverseV2
+ */
+class TFliteOpReverseV2 : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_REVERSEV2_H__
diff --git a/compiler/tflchef/tflite/src/Op/Round.cpp b/compiler/tflchef/tflite/src/Op/Round.cpp
new file mode 100644
index 000000000..c3f6bf6c4
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Round.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Round.h"
+
+namespace tflchef
+{
+
+void TFliteOpRound::filler(const tflite::Operator *, TFliteImport *, tflchef::ModelRecipe *) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpRound::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Round");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Round.h b/compiler/tflchef/tflite/src/Op/Round.h
new file mode 100644
index 000000000..df0da3fa1
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Round.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ROUND_H__
+#define __TFLITE_OP_ROUND_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Round
+ */
+class TFliteOpRound : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ROUND_H__
diff --git a/compiler/tflchef/tflite/src/Op/ScatterNd.cpp b/compiler/tflchef/tflite/src/Op/ScatterNd.cpp
new file mode 100644
index 000000000..548a09a67
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ScatterNd.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScatterNd.h"
+
+#include "Convert.h"
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpScatterNd::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Filler for indices and shape
+ fill_tensor_to_import(0, import);
+ fill_tensor_to_import(2, import);
+}
+
+tflchef::Operation *TFliteOpScatterNd::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("ScatterNd");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ScatterNd.h b/compiler/tflchef/tflite/src/Op/ScatterNd.h
new file mode 100644
index 000000000..76362d775
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ScatterNd.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SCATTER_ND_H__
+#define __TFLITE_OP_SCATTER_ND_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ScatterNd
+ */
+class TFliteOpScatterNd : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SCATTER_ND_H__
diff --git a/compiler/tflchef/tflite/src/Op/SegmentSum.cpp b/compiler/tflchef/tflite/src/Op/SegmentSum.cpp
new file mode 100644
index 000000000..a975ca4b3
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SegmentSum.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SegmentSum.h"
+
+#include "FillerHelper.h"
+
+namespace tflchef
+{
+
+void TFliteOpSegmentSum::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Filler for indices and shape
+ fill_tensor_to_import(1, import);
+}
+
+tflchef::Operation *TFliteOpSegmentSum::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SegmentSum");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SegmentSum.h b/compiler/tflchef/tflite/src/Op/SegmentSum.h
new file mode 100644
index 000000000..d20e63bd7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SegmentSum.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SEGMENT_SUM_H__
+#define __TFLITE_OP_SEGMENT_SUM_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SEGMENT_SUM
+ */
+class TFliteOpSegmentSum : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SEGMENT_SUM_H__
diff --git a/compiler/tflchef/tflite/src/Op/Select.cpp b/compiler/tflchef/tflite/src/Op/Select.cpp
new file mode 100644
index 000000000..741ffb8f6
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Select.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Select.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSelect::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSelect::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Select");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Select.h b/compiler/tflchef/tflite/src/Op/Select.h
new file mode 100644
index 000000000..bf8e57d78
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Select.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SELECT_H__
+#define __TFLITE_OP_SELECT_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SELECT
+ */
+class TFliteOpSelect : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SELECT_H__
diff --git a/compiler/tflchef/tflite/src/Op/SelectV2.cpp b/compiler/tflchef/tflite/src/Op/SelectV2.cpp
new file mode 100644
index 000000000..0ddabb4be
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SelectV2.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SelectV2.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSelectV2::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSelectV2::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SelectV2");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SelectV2.h b/compiler/tflchef/tflite/src/Op/SelectV2.h
new file mode 100644
index 000000000..ff03341d7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SelectV2.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SELECT_V2_H__
+#define __TFLITE_OP_SELECT_V2_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SELECT
+ */
+class TFliteOpSelectV2 : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SELECT_V2_H__
diff --git a/compiler/tflchef/tflite/src/Op/Shape.cpp b/compiler/tflchef/tflite/src/Op/Shape.cpp
new file mode 100644
index 000000000..d6e490d63
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Shape.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Shape.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpShape::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpShape::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("Shape");
+
+ auto op_params = op->builtin_options_as_ShapeOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_shape_options();
+ op_options->set_out_type(as_tflchef_type(op_params->out_type()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Shape.h b/compiler/tflchef/tflite/src/Op/Shape.h
new file mode 100644
index 000000000..ebe1befb3
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Shape.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SHAPE_H__
+#define __TFLITE_OP_SHAPE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SHAPE
+ */
+class TFliteOpShape : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SHAPE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Sin.cpp b/compiler/tflchef/tflite/src/Op/Sin.cpp
new file mode 100644
index 000000000..8c063f424
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Sin.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sin.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSin::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSin::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Sin");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Sin.h b/compiler/tflchef/tflite/src/Op/Sin.h
new file mode 100644
index 000000000..51eabceb5
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Sin.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SIN_H__
+#define __TFLITE_OP_SIN_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Sin
+ */
+class TFliteOpSin : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SIN_H__
diff --git a/compiler/tflchef/tflite/src/Op/Slice.cpp b/compiler/tflchef/tflite/src/Op/Slice.cpp
new file mode 100644
index 000000000..f0c44da2d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Slice.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Slice.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSlice::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ // for begin and size
+ for (int32_t index = 1; index <= 2; ++index)
+ {
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[index]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpSlice::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Slice");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Slice.h b/compiler/tflchef/tflite/src/Op/Slice.h
new file mode 100644
index 000000000..6ca6724d3
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Slice.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SLICE_H__
+#define __TFLITE_OP_SLICE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SLICE
+ */
+class TFliteOpSlice : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SLICE_H__
diff --git a/compiler/tflchef/tflite/src/Op/SpaceToBatchND.cpp b/compiler/tflchef/tflite/src/Op/SpaceToBatchND.cpp
new file mode 100644
index 000000000..9de0775a9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SpaceToBatchND.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToBatchND.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSpaceToBatchND::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second, third input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+
+ tensor = import->tensors()->Get(inputs[2]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ buffer = import->buffers()->Get(tensor->buffer());
+ vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[2], vec);
+}
+
+tflchef::Operation *TFliteOpSpaceToBatchND::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SpaceToBatchND");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SpaceToBatchND.h b/compiler/tflchef/tflite/src/Op/SpaceToBatchND.h
new file mode 100644
index 000000000..9d7bc44e8
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SpaceToBatchND.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SPACETOBATCHND_H__
+#define __TFLITE_OP_SPACETOBATCHND_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SpaceToBatchND
+ */
+class TFliteOpSpaceToBatchND : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SPACETOBATCHND_H__
diff --git a/compiler/tflchef/tflite/src/Op/SpaceToDepth.cpp b/compiler/tflchef/tflite/src/Op/SpaceToDepth.cpp
new file mode 100644
index 000000000..e5718b515
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SpaceToDepth.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToDepth.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSpaceToDepth::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSpaceToDepth::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SpaceToDepth");
+
+ auto op_params = op->builtin_options_as_SpaceToDepthOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_space_to_depth_options();
+
+ op_options->set_block_size(op_params->block_size());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SpaceToDepth.h b/compiler/tflchef/tflite/src/Op/SpaceToDepth.h
new file mode 100644
index 000000000..784ad940a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SpaceToDepth.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SPACETODEPTH_H__
+#define __TFLITE_OP_SPACETODEPTH_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SpaceToDepth
+ */
+class TFliteOpSpaceToDepth : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SPACETODEPTH_H__
diff --git a/compiler/tflchef/tflite/src/Op/SparseToDense.cpp b/compiler/tflchef/tflite/src/Op/SparseToDense.cpp
new file mode 100644
index 000000000..9e4f0a067
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SparseToDense.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SparseToDense.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSparseToDense::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for Shape
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *output_shape_tensor = import->tensors()->Get(inputs[1]);
+ assert(output_shape_tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(output_shape_tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpSparseToDense::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_SparseToDenseOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SparseToDense");
+
+ auto op_options = operation->mutable_sparse_to_dense_options();
+
+ op_options->set_validate_indices(op_params->validate_indices());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SparseToDense.h b/compiler/tflchef/tflite/src/Op/SparseToDense.h
new file mode 100644
index 000000000..5ffe4789d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SparseToDense.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SPARSETODENSE_H__
+#define __TFLITE_OP_SPARSETODENSE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SparseToDense
+ */
+class TFliteOpSparseToDense : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SPARSETODENSE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Split.cpp b/compiler/tflchef/tflite/src/Op/Split.cpp
new file mode 100644
index 000000000..49f9aa2c6
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Split.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Split.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSplit::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const auto &inputs = *op->inputs();
+
+ // for input split_dim
+ // NOTE unlike other Ops, Split input 0 is split_dim
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[0]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[0], vec);
+}
+
+tflchef::Operation *TFliteOpSplit::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Split");
+
+ auto op_options = operation->mutable_split_options();
+
+ auto op_params = op->builtin_options_as_SplitOptions();
+ assert(op_params != nullptr);
+
+ op_options->set_num_splits(op_params->num_splits());
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Split.h b/compiler/tflchef/tflite/src/Op/Split.h
new file mode 100644
index 000000000..af247a1b9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Split.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SPLIT_H__
+#define __TFLITE_OP_SPLIT_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SPLIT
+ */
+class TFliteOpSplit : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SPLIT_H__
diff --git a/compiler/tflchef/tflite/src/Op/SplitV.cpp b/compiler/tflchef/tflite/src/Op/SplitV.cpp
new file mode 100644
index 000000000..18035e6f4
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SplitV.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SplitV.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSplitV::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const auto &inputs = *op->inputs();
+
+ // for input "size_splits" and "split_dim"
+ for (int32_t idx = 1; idx <= 2; idx++)
+ {
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[idx]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[idx], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpSplitV::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SplitV");
+
+ auto op_options = operation->mutable_split_v_options();
+
+ auto op_params = op->builtin_options_as_SplitVOptions();
+ assert(op_params != nullptr);
+
+ op_options->set_num_splits(op_params->num_splits());
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SplitV.h b/compiler/tflchef/tflite/src/Op/SplitV.h
new file mode 100644
index 000000000..3f715b5f9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SplitV.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SPLIT_V_H__
+#define __TFLITE_OP_SPLIT_V_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SPLIT_V
+ */
+class TFliteOpSplitV : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SPLIT_V_H__
diff --git a/compiler/tflchef/tflite/src/Op/Square.cpp b/compiler/tflchef/tflite/src/Op/Square.cpp
new file mode 100644
index 000000000..d3803284a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Square.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Square.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSquare::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSquare::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Square");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Square.h b/compiler/tflchef/tflite/src/Op/Square.h
new file mode 100644
index 000000000..9c008fe52
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Square.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SQUARE_H__
+#define __TFLITE_OP_SQUARE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Square
+ */
+class TFliteOpSquare : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SQUARE_H__
diff --git a/compiler/tflchef/tflite/src/Op/SquaredDifference.cpp b/compiler/tflchef/tflite/src/Op/SquaredDifference.cpp
new file mode 100644
index 000000000..1ee536e76
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SquaredDifference.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SquaredDifference.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSquaredDifference::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSquaredDifference::build(const tflite::Operator *op,
+ TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("SquaredDifference");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/SquaredDifference.h b/compiler/tflchef/tflite/src/Op/SquaredDifference.h
new file mode 100644
index 000000000..58c2ed460
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/SquaredDifference.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SQUAREDDIFFERENCE_H__
+#define __TFLITE_OP_SQUAREDDIFFERENCE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for SquaredDifference
+ */
+class TFliteOpSquaredDifference : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SQUAREDDIFFERENCE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Squeeze.cpp b/compiler/tflchef/tflite/src/Op/Squeeze.cpp
new file mode 100644
index 000000000..7983fc62a
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Squeeze.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Squeeze.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSqueeze::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpSqueeze::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_SqueezeOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Squeeze");
+
+ auto op_options = operation->mutable_squeeze_options();
+
+ std::vector<int32_t> squeeze_dims = as_index_vector(op_params->squeeze_dims());
+
+ for (auto dim : squeeze_dims)
+ {
+ op_options->add_squeeze_dim(dim);
+ }
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Squeeze.h b/compiler/tflchef/tflite/src/Op/Squeeze.h
new file mode 100644
index 000000000..b6c89f73d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Squeeze.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SQUEEZE_H__
+#define __TFLITE_OP_SQUEEZE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Squeeze
+ */
+class TFliteOpSqueeze : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SQUEEZE_H__
diff --git a/compiler/tflchef/tflite/src/Op/StridedSlice.cpp b/compiler/tflchef/tflite/src/Op/StridedSlice.cpp
new file mode 100644
index 000000000..c770236c7
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/StridedSlice.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StridedSlice.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpStridedSlice::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+
+ // for begin, end and strides
+ for (int32_t index = 1; index <= 3; ++index)
+ {
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[index]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[index], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpStridedSlice::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_StridedSliceOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("StridedSlice");
+
+ auto op_options = operation->mutable_strided_slice_options();
+
+ op_options->set_begin_mask(op_params->begin_mask());
+ op_options->set_end_mask(op_params->end_mask());
+ op_options->set_ellipsis_mask(op_params->ellipsis_mask());
+ op_options->set_new_axis_mask(op_params->new_axis_mask());
+ op_options->set_shrink_axis_mask(op_params->shrink_axis_mask());
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/StridedSlice.h b/compiler/tflchef/tflite/src/Op/StridedSlice.h
new file mode 100644
index 000000000..98054b9b9
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/StridedSlice.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_STRIDEDSLICE_H__
+#define __TFLITE_OP_STRIDEDSLICE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for StridedSlice
+ */
+class TFliteOpStridedSlice : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_STRIDEDSLICE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Sub.cpp b/compiler/tflchef/tflite/src/Op/Sub.cpp
index db77fddf7..0a08bbfdf 100644
--- a/compiler/tflchef/tflite/src/Op/Sub.cpp
+++ b/compiler/tflchef/tflite/src/Op/Sub.cpp
@@ -17,6 +17,7 @@
#include "Sub.h"
#include "Convert.h"
+#include "FillerHelper.h"
namespace tflchef
{
@@ -24,7 +25,13 @@ namespace tflchef
void TFliteOpSub::filler(const tflite::Operator *op, TFliteImport *import,
tflchef::ModelRecipe *model_recipe) const
{
- // Nothing to do with filler
+ // Sub may have constant input
+
+ const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
+ assert(inputs.size() == 2);
+
+ fill_tensor_to_import(inputs[0], import);
+ fill_tensor_to_import(inputs[1], import);
}
tflchef::Operation *TFliteOpSub::build(const tflite::Operator *op, TFliteImport *import,
diff --git a/compiler/tflchef/tflite/src/Op/Sum.cpp b/compiler/tflchef/tflite/src/Op/Sum.cpp
new file mode 100644
index 000000000..9f3133e85
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Sum.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sum.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpSum::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpSum::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_ReducerOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Sum");
+
+ auto op_options = operation->mutable_sum_options();
+
+ op_options->set_keep_dims(op_params->keep_dims());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Sum.h b/compiler/tflchef/tflite/src/Op/Sum.h
new file mode 100644
index 000000000..38eeb080d
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Sum.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_SUM_H__
+#define __TFLITE_OP_SUM_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for sum
+ */
+class TFliteOpSum : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_SUM_H__
diff --git a/compiler/tflchef/tflite/src/Op/Tile.cpp b/compiler/tflchef/tflite/src/Op/Tile.cpp
new file mode 100644
index 000000000..14e65131c
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Tile.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tile.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpTile::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32 ||
+ tensor->type() == tflite::TensorType::TensorType_INT64);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpTile::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Tile");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Tile.h b/compiler/tflchef/tflite/src/Op/Tile.h
new file mode 100644
index 000000000..640f52a1f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Tile.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_TILE_H__
+#define __TFLITE_OP_TILE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Tile
+ */
+class TFliteOpTile : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_TILE_H__
diff --git a/compiler/tflchef/tflite/src/Op/TopKV2.cpp b/compiler/tflchef/tflite/src/Op/TopKV2.cpp
new file mode 100644
index 000000000..461456ae2
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/TopKV2.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TopKV2.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpTopKV2::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // filler for second input
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[1]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32 ||
+ tensor->type() == tflite::TensorType::TensorType_INT64);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[1], vec);
+}
+
+tflchef::Operation *TFliteOpTopKV2::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("TopKV2");
+
+ // NOTE there is `sorted` attribute in TF but it's always true for TFlite
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/TopKV2.h b/compiler/tflchef/tflite/src/Op/TopKV2.h
new file mode 100644
index 000000000..b2b74cc75
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/TopKV2.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_TOPK_V2_H__
+#define __TFLITE_OP_TOPK_V2_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for TOPK_V2
+ */
+class TFliteOpTopKV2 : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_TOPK_V2_H__
diff --git a/compiler/tflchef/tflite/src/Op/Transpose.cpp b/compiler/tflchef/tflite/src/Op/Transpose.cpp
index ae97a19e2..a997bb08e 100644
--- a/compiler/tflchef/tflite/src/Op/Transpose.cpp
+++ b/compiler/tflchef/tflite/src/Op/Transpose.cpp
@@ -36,16 +36,11 @@ void TFliteOpTranspose::filler(const tflite::Operator *op, TFliteImport *import,
tflchef::Operation *TFliteOpTranspose::build(const tflite::Operator *op, TFliteImport *import,
tflchef::ModelRecipe *model_recipe) const
{
- auto op_params = op->builtin_options_as<tflite::TransposeOptions>();
- assert(op_params != nullptr);
- (void)op_params;
-
auto operation = model_recipe->add_operation();
operation->set_type("Transpose");
- auto op_options = operation->mutable_transpose_options();
- (void)op_options;
+ // No options for Transpose
return operation;
}
diff --git a/compiler/tflchef/tflite/src/Op/TransposeConv.cpp b/compiler/tflchef/tflite/src/Op/TransposeConv.cpp
new file mode 100644
index 000000000..7e772b954
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/TransposeConv.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TransposeConv.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpTransposeConv::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ const auto &inputs = *op->inputs();
+
+ const tflite::Tensor *tensor = import->tensors()->Get(inputs[0]);
+ assert(tensor->type() == tflite::TensorType::TensorType_INT32);
+ const tflite::Buffer *buffer = import->buffers()->Get(tensor->buffer());
+
+ if (buffer && buffer->data())
+ {
+ auto vec = extract_buffer<int32_t>(buffer);
+ import->set_tensor_filler(inputs[0], vec);
+ }
+}
+
+tflchef::Operation *TFliteOpTransposeConv::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_TransposeConvOptions();
+
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("TransposeConv");
+
+ auto op_options = operation->mutable_transpose_conv_options();
+
+ op_options->set_stride_h(op_params->stride_h());
+ op_options->set_stride_w(op_params->stride_w());
+ op_options->set_padding(as_tflchef_padding(op_params->padding()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/TransposeConv.h b/compiler/tflchef/tflite/src/Op/TransposeConv.h
new file mode 100644
index 000000000..c79cdabd2
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/TransposeConv.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_TRANSPOSE_CONV_H__
+#define __TFLITE_OP_TRANSPOSE_CONV_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for TransposeConv
+ */
+class TFliteOpTransposeConv : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_TRANSPOSE_CONV_H__
diff --git a/compiler/tflchef/tflite/src/Op/Unique.cpp b/compiler/tflchef/tflite/src/Op/Unique.cpp
new file mode 100644
index 000000000..e3f77f40e
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Unique.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unique.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpUnique::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpUnique::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto op_params = op->builtin_options_as_UniqueOptions();
+ assert(op_params != nullptr);
+
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Unique");
+
+ auto op_options = operation->mutable_unique_options();
+
+ op_options->set_idx_out_type(as_tflchef_type(op_params->idx_out_type()));
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Unique.h b/compiler/tflchef/tflite/src/Op/Unique.h
new file mode 100644
index 000000000..fae037c9f
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Unique.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_UNIQUE_H__
+#define __TFLITE_OP_UNIQUE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Unique
+ */
+class TFliteOpUnique : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_UNIQUE_H__
diff --git a/compiler/tflchef/tflite/src/Op/Unpack.cpp b/compiler/tflchef/tflite/src/Op/Unpack.cpp
new file mode 100644
index 000000000..a51ef84ef
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Unpack.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unpack.h"
+
+namespace tflchef
+{
+
+void TFliteOpUnpack::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with filler
+}
+
+tflchef::Operation *TFliteOpUnpack::build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+
+ operation->set_type("Unpack");
+
+ auto op_params = op->builtin_options_as_UnpackOptions();
+ assert(op_params != nullptr);
+
+ auto op_options = operation->mutable_unpack_options();
+ op_options->set_num(op_params->num());
+ op_options->set_axis(op_params->axis());
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Unpack.h b/compiler/tflchef/tflite/src/Op/Unpack.h
new file mode 100644
index 000000000..1036bdc14
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Unpack.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_UNPACK_H__
+#define __TFLITE_OP_UNPACK_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Unpack
+ */
+class TFliteOpUnpack : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_UNPACK_H__
diff --git a/compiler/tflchef/tflite/src/Op/Where.cpp b/compiler/tflchef/tflite/src/Op/Where.cpp
new file mode 100644
index 000000000..e42de3737
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Where.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Where.h"
+
+namespace tflchef
+{
+
+void TFliteOpWhere::filler(const tflite::Operator *, TFliteImport *, tflchef::ModelRecipe *) const
+{
+ // Nothing to do with fillers here
+}
+
+tflchef::Operation *TFliteOpWhere::build(const tflite::Operator *, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("Where");
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/Where.h b/compiler/tflchef/tflite/src/Op/Where.h
new file mode 100644
index 000000000..00cdc4b00
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/Where.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_WHERE_H__
+#define __TFLITE_OP_WHERE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for Where
+ */
+class TFliteOpWhere : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_WHERE_H__
diff --git a/compiler/tflchef/tflite/src/Op/ZerosLike.cpp b/compiler/tflchef/tflite/src/Op/ZerosLike.cpp
new file mode 100644
index 000000000..a56b6bdfb
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ZerosLike.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZerosLike.h"
+
+#include "Convert.h"
+
+namespace tflchef
+{
+
+void TFliteOpZerosLike::filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ // Nothing to do with fillers here
+}
+
+tflchef::Operation *TFliteOpZerosLike::build(const tflite::Operator *op, TFliteImport *,
+ tflchef::ModelRecipe *model_recipe) const
+{
+ auto operation = model_recipe->add_operation();
+ operation->set_type("ZerosLike");
+
+ auto op_options = operation->mutable_zeros_like_options();
+ (void)op_options;
+
+ return operation;
+}
+
+} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/Op/ZerosLike.h b/compiler/tflchef/tflite/src/Op/ZerosLike.h
new file mode 100644
index 000000000..163c1fa21
--- /dev/null
+++ b/compiler/tflchef/tflite/src/Op/ZerosLike.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_OP_ZEROS_LIKE_H__
+#define __TFLITE_OP_ZEROS_LIKE_H__
+
+#include "TFliteOpChef.h"
+
+namespace tflchef
+{
+
+/**
+ * @brief tflchef operator builder for ZerosLike
+ */
+class TFliteOpZerosLike : public TFliteOpChef
+{
+public:
+ void filler(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+ tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import,
+ tflchef::ModelRecipe *model_recipe) const override;
+};
+
+} // namespace tflchef
+
+#endif // __TFLITE_OP_ZEROS_LIKE_H__
diff --git a/compiler/tflchef/tflite/src/RawModelLoader.cpp b/compiler/tflchef/tflite/src/RawModelLoader.cpp
deleted file mode 100644
index e9ef8ec8b..000000000
--- a/compiler/tflchef/tflite/src/RawModelLoader.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <tflchef/RawModel.h>
-
-#include <cwrap/Fildes.h>
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-namespace
-{
-
-class MemoryMappedRawModel final : public tflchef::RawModel
-{
-public:
- /**
- * @require fd and data SHOULD be valid
- */
- explicit MemoryMappedRawModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
- {
- // DO NOTHING
- }
-
-public:
- ~MemoryMappedRawModel()
- {
- munmap(_data, _size);
- close(_fd);
- }
-
-public:
- MemoryMappedRawModel(const MemoryMappedRawModel &) = delete;
- MemoryMappedRawModel(MemoryMappedRawModel &&) = delete;
-
-public:
- const ::tflite::Model *model(void) const override { return ::tflite::GetModel(_data); }
-
-private:
- int _fd = -1;
- void *_data = nullptr;
- size_t _size = 0;
-};
-
-} // namespace
-
-namespace tflchef
-{
-
-std::unique_ptr<RawModel> load_tflite(const std::string &path)
-{
- cwrap::Fildes fildes{open(path.c_str(), O_RDONLY)};
-
- if (fildes.get() == -1)
- {
- // Return nullptr on open failure
- return nullptr;
- }
-
- struct stat st;
- if (fstat(fildes.get(), &st) == -1)
- {
- // Return nullptr on fstat failure
- return nullptr;
- }
-
- auto size = st.st_size;
- auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fildes.get(), 0);
-
- if (data == MAP_FAILED)
- {
- // Return nullptr on mmap failure
- return nullptr;
- }
-
- return std::unique_ptr<tflchef::RawModel>{new MemoryMappedRawModel(fildes.release(), data, size)};
-}
-
-} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/RecipeChef.cpp b/compiler/tflchef/tflite/src/RecipeChef.cpp
index 407006b26..db62d0e40 100644
--- a/compiler/tflchef/tflite/src/RecipeChef.cpp
+++ b/compiler/tflchef/tflite/src/RecipeChef.cpp
@@ -35,9 +35,16 @@ void set_inputs(TFliteImport *import, tflchef::Operation *operation, const tflit
for (auto input : inputs)
{
- auto tensor = tensors->Get(input);
- std::string name = tensor_name(tensor);
- operation->add_input(name);
+ if (input == -1)
+ {
+ operation->add_input("");
+ }
+ else
+ {
+ auto tensor = tensors->Get(input);
+ std::string name = tensor_name(tensor);
+ operation->add_input(name);
+ }
}
}
@@ -104,11 +111,14 @@ std::unique_ptr<ModelRecipe> generate_recipe(const tflite::Model *model)
operand->set_name(tensor_name(tensor));
operand->set_type(as_tflchef_type(tensor->type()));
- std::vector<int32_t> dims = as_index_vector(tensor->shape());
- ::tflchef::TensorShape *shape = operand->mutable_shape();
- for (auto dim : dims)
+ if (tensor->shape())
{
- shape->add_dim(dim);
+ std::vector<int32_t> dims = as_index_vector(tensor->shape());
+ ::tflchef::TensorShape *shape = operand->mutable_shape();
+ for (auto dim : dims)
+ {
+ shape->add_dim(dim);
+ }
}
// filler for weights, bias and so on
diff --git a/compiler/tflchef/tflite/src/TFliteImport.h b/compiler/tflchef/tflite/src/TFliteImport.h
index fa8196405..5b46f4501 100644
--- a/compiler/tflchef/tflite/src/TFliteImport.h
+++ b/compiler/tflchef/tflite/src/TFliteImport.h
@@ -121,18 +121,18 @@ public:
}
private:
- const TFliteSubGraphs_t *_subgraphs;
- const TFliteBuffers_t *_buffers;
- const TFliteTensors_t *_tensors;
- const TFliteOperators_t *_operators;
-
- std::vector<const tflite::OperatorCode *> _op_codes;
- std::vector<int32_t> _inputs;
- std::vector<int32_t> _outputs;
-
- std::map<uint32_t, bool> _tensor_filler;
- std::map<uint32_t, std::vector<int32_t>> _tensor_filler_vint32;
- std::map<uint32_t, std::vector<float>> _tensor_filler_vfloat;
+ const TFliteSubGraphs_t *_subgraphs{nullptr};
+ const TFliteBuffers_t *_buffers{nullptr};
+ const TFliteTensors_t *_tensors{nullptr};
+ const TFliteOperators_t *_operators{nullptr};
+
+ std::vector<const tflite::OperatorCode *> _op_codes{};
+ std::vector<int32_t> _inputs{};
+ std::vector<int32_t> _outputs{};
+
+ std::map<uint32_t, bool> _tensor_filler{};
+ std::map<uint32_t, std::vector<int32_t>> _tensor_filler_vint32{};
+ std::map<uint32_t, std::vector<float>> _tensor_filler_vfloat{};
};
} // namespace tflchef
diff --git a/compiler/tflchef/tflite/src/TFliteOpChefs.h b/compiler/tflchef/tflite/src/TFliteOpChefs.h
index 685d6861b..ad52af1c2 100644
--- a/compiler/tflchef/tflite/src/TFliteOpChefs.h
+++ b/compiler/tflchef/tflite/src/TFliteOpChefs.h
@@ -20,32 +20,104 @@
// In alphabet order
#include "Op/Abs.h"
#include "Op/Add.h"
+#include "Op/AddN.h"
#include "Op/ArgMax.h"
+#include "Op/ArgMin.h"
#include "Op/AveragePool2D.h"
+#include "Op/BatchMatMul.h"
#include "Op/BatchToSpaceND.h"
+#include "Op/Cast.h"
+#include "Op/Ceil.h"
#include "Op/Concatenation.h"
#include "Op/Conv2D.h"
#include "Op/Cos.h"
+#include "Op/DepthToSpace.h"
#include "Op/DepthwiseConv2D.h"
#include "Op/Div.h"
+#include "Op/ELU.h"
#include "Op/Equal.h"
#include "Op/Exp.h"
+#include "Op/ExpandDims.h"
+#include "Op/Fill.h"
+#include "Op/Floor.h"
#include "Op/FloorDiv.h"
+#include "Op/FloorMod.h"
#include "Op/FullyConnected.h"
+#include "Op/Gather.h"
+#include "Op/GatherNd.h"
+#include "Op/Greater.h"
+#include "Op/GreaterEqual.h"
+#include "Op/L2Normalize.h"
+#include "Op/L2Pool2D.h"
+#include "Op/LeakyRelu.h"
+#include "Op/Less.h"
+#include "Op/LessEqual.h"
+#include "Op/LocalResponseNormalization.h"
+#include "Op/Log.h"
+#include "Op/LogicalAnd.h"
#include "Op/LogicalNot.h"
#include "Op/LogicalOr.h"
+#include "Op/Logistic.h"
+#include "Op/LogSoftmax.h"
+#include "Op/MatrixDiag.h"
+#include "Op/MatrixSetDiag.h"
+#include "Op/Maximum.h"
#include "Op/MaxPool2D.h"
#include "Op/Mean.h"
+#include "Op/Minimum.h"
+#include "Op/MirrorPad.h"
+#include "Op/Mul.h"
+#include "Op/Neg.h"
+#include "Op/NotEqual.h"
+#include "Op/OneHot.h"
#include "Op/Pack.h"
#include "Op/Pad.h"
+#include "Op/Pow.h"
+#include "Op/PRelu.h"
+#include "Op/Range.h"
+#include "Op/Rank.h"
+#include "Op/ReduceAny.h"
+#include "Op/ReduceMax.h"
+#include "Op/ReduceMin.h"
+#include "Op/ReduceProd.h"
#include "Op/ReLU.h"
#include "Op/ReLU6.h"
+#include "Op/ReLUN1To1.h"
#include "Op/Reshape.h"
+#include "Op/ResizeBilinear.h"
+#include "Op/ResizeNearestNeighbor.h"
+#include "Op/ReverseSequence.h"
+#include "Op/ReverseV2.h"
+#include "Op/Round.h"
#include "Op/Rsqrt.h"
+#include "Op/ScatterNd.h"
+#include "Op/SegmentSum.h"
+#include "Op/Select.h"
+#include "Op/SelectV2.h"
+#include "Op/Shape.h"
+#include "Op/Sin.h"
+#include "Op/Slice.h"
#include "Op/Softmax.h"
+#include "Op/SpaceToBatchND.h"
+#include "Op/SpaceToDepth.h"
+#include "Op/SparseToDense.h"
+#include "Op/Split.h"
+#include "Op/SplitV.h"
#include "Op/Sqrt.h"
+#include "Op/Square.h"
+#include "Op/SquaredDifference.h"
+#include "Op/Squeeze.h"
+#include "Op/StridedSlice.h"
#include "Op/Sub.h"
+#include "Op/Sum.h"
#include "Op/Tanh.h"
+#include "Op/Tile.h"
+#include "Op/TopKV2.h"
#include "Op/Transpose.h"
+#include "Op/TransposeConv.h"
+#include "Op/Unique.h"
+#include "Op/Unpack.h"
+#include "Op/Where.h"
+#include "Op/ZerosLike.h"
#endif // __TFLITE_OP_CHEFS_H__
diff --git a/compiler/tflchef/tflite/src/TFliteOpRegistry.h b/compiler/tflchef/tflite/src/TFliteOpRegistry.h
index f0aed2113..0a44b3f06 100644
--- a/compiler/tflchef/tflite/src/TFliteOpRegistry.h
+++ b/compiler/tflchef/tflite/src/TFliteOpRegistry.h
@@ -57,33 +57,105 @@ private:
REG_TFL_OP(ABS, TFliteOpAbs);
REG_TFL_OP(ADD, TFliteOpAdd);
+ REG_TFL_OP(ADD_N, TFliteOpAddN);
REG_TFL_OP(ARG_MAX, TFliteOpArgMax);
+ REG_TFL_OP(ARG_MIN, TFliteOpArgMin);
REG_TFL_OP(AVERAGE_POOL_2D, TFliteOpAveragePool2D);
+ REG_TFL_OP(BATCH_MATMUL, TFliteOpBatchMatMul);
REG_TFL_OP(BATCH_TO_SPACE_ND, TFliteOpBatchToSpaceND);
+ REG_TFL_OP(CAST, TFliteOpCast);
+ REG_TFL_OP(CEIL, TFliteOpCeil);
REG_TFL_OP(CONCATENATION, TFliteOpConcatenation);
REG_TFL_OP(CONV_2D, TFliteOpConv2D);
REG_TFL_OP(COS, TFliteOpCos);
+ REG_TFL_OP(DEPTH_TO_SPACE, TFliteOpDepthToSpace);
REG_TFL_OP(DEPTHWISE_CONV_2D, TFliteOpDepthwiseConv2D);
REG_TFL_OP(DIV, TFliteOpDiv);
+ REG_TFL_OP(ELU, TFliteOpELU);
REG_TFL_OP(EQUAL, TFliteOpEqual);
REG_TFL_OP(EXP, TFliteOpExp);
+ REG_TFL_OP(EXPAND_DIMS, TFliteOpExpandDims);
+ REG_TFL_OP(FILL, TFliteOpFill);
+ REG_TFL_OP(FLOOR, TFliteOpFloor);
REG_TFL_OP(FLOOR_DIV, TFliteOpFloorDiv);
+ REG_TFL_OP(FLOOR_MOD, TFliteOpFloorMod);
REG_TFL_OP(FULLY_CONNECTED, TFliteOpFullyConnected);
+ REG_TFL_OP(GATHER, TFliteOpGather);
+ REG_TFL_OP(GATHER_ND, TFliteOpGatherNd);
+ REG_TFL_OP(GREATER, TFliteOpGreater);
+ REG_TFL_OP(GREATER_EQUAL, TFliteOpGreaterEqual);
+ REG_TFL_OP(L2_NORMALIZATION, TFliteOpL2Normalize);
+ REG_TFL_OP(L2_POOL_2D, TFliteOpL2Pool2D);
+ REG_TFL_OP(LEAKY_RELU, TFliteOpLeakyRelu);
+ REG_TFL_OP(LESS, TFliteOpLess);
+ REG_TFL_OP(LESS_EQUAL, TFliteOpLessEqual);
+ REG_TFL_OP(LOCAL_RESPONSE_NORMALIZATION, TFliteOpLocalResponseNormalization);
+ REG_TFL_OP(LOG, TFliteOpLog);
+ REG_TFL_OP(LOGICAL_AND, TFliteOpLogicalAnd);
REG_TFL_OP(LOGICAL_NOT, TFliteOpLogicalNot);
REG_TFL_OP(LOGICAL_OR, TFliteOpLogicalOr);
+ REG_TFL_OP(LOGISTIC, TFliteOpLogistic);
+ REG_TFL_OP(LOG_SOFTMAX, TFliteOpLogSoftmax);
+ REG_TFL_OP(MATRIX_DIAG, TFliteOpMatrixDiag);
REG_TFL_OP(MAX_POOL_2D, TFliteOpMaxPool2D);
+ REG_TFL_OP(MATRIX_SET_DIAG, TFliteOpMatrixSetDiag);
+ REG_TFL_OP(MAXIMUM, TFliteOpMaximum);
REG_TFL_OP(MEAN, TFliteOpMean);
+ REG_TFL_OP(MINIMUM, TFliteOpMinimum);
+ REG_TFL_OP(MIRROR_PAD, TFliteOpMirrorPad);
+ REG_TFL_OP(MUL, TFliteOpMul);
+ REG_TFL_OP(NEG, TFliteOpNeg);
+ REG_TFL_OP(NOT_EQUAL, TFliteOpNotEqual);
+ REG_TFL_OP(ONE_HOT, TFliteOpOneHot);
REG_TFL_OP(PACK, TFliteOpPack);
REG_TFL_OP(PAD, TFliteOpPad);
+ REG_TFL_OP(POW, TFliteOpPow);
+ REG_TFL_OP(PRELU, TFliteOpPRelu);
+ REG_TFL_OP(RANGE, TFliteOpRange);
+ REG_TFL_OP(RANK, TFliteOpRank);
+ REG_TFL_OP(REDUCE_ANY, TFliteOpReduceAny);
+ REG_TFL_OP(REDUCE_MAX, TFliteOpReduceMax);
+ REG_TFL_OP(REDUCE_MIN, TFliteOpReduceMin);
+ REG_TFL_OP(REDUCE_PROD, TFliteOpReduceProd);
REG_TFL_OP(RELU, TFliteOpReLU);
REG_TFL_OP(RELU6, TFliteOpReLU6);
+ REG_TFL_OP(RELU_N1_TO_1, TFliteOpReLUN1To1);
REG_TFL_OP(RESHAPE, TFliteOpReshape);
+ REG_TFL_OP(RESIZE_BILINEAR, TFliteOpResizeBilinear);
+ REG_TFL_OP(RESIZE_NEAREST_NEIGHBOR, TFliteOpResizeNearestNeighbor);
+ REG_TFL_OP(REVERSE_SEQUENCE, TFliteOpReverseSequence);
+ REG_TFL_OP(REVERSE_V2, TFliteOpReverseV2);
+ REG_TFL_OP(ROUND, TFliteOpRound);
REG_TFL_OP(RSQRT, TFliteOpRsqrt);
+ REG_TFL_OP(SCATTER_ND, TFliteOpScatterNd);
+ REG_TFL_OP(SEGMENT_SUM, TFliteOpSegmentSum);
+ REG_TFL_OP(SELECT, TFliteOpSelect);
+ REG_TFL_OP(SELECT_V2, TFliteOpSelectV2);
+ REG_TFL_OP(SHAPE, TFliteOpShape);
+ REG_TFL_OP(SIN, TFliteOpSin);
+ REG_TFL_OP(SLICE, TFliteOpSlice);
REG_TFL_OP(SOFTMAX, TFliteOpSoftmax);
+ REG_TFL_OP(SPACE_TO_BATCH_ND, TFliteOpSpaceToBatchND);
+ REG_TFL_OP(SPACE_TO_DEPTH, TFliteOpSpaceToDepth);
+ REG_TFL_OP(SPARSE_TO_DENSE, TFliteOpSparseToDense);
+ REG_TFL_OP(SPLIT, TFliteOpSplit);
+ REG_TFL_OP(SPLIT_V, TFliteOpSplitV);
REG_TFL_OP(SQRT, TFliteOpSqrt);
+ REG_TFL_OP(SQUARE, TFliteOpSquare);
+ REG_TFL_OP(SQUARED_DIFFERENCE, TFliteOpSquaredDifference);
+ REG_TFL_OP(SQUEEZE, TFliteOpSqueeze);
+ REG_TFL_OP(STRIDED_SLICE, TFliteOpStridedSlice);
REG_TFL_OP(SUB, TFliteOpSub);
+ REG_TFL_OP(SUM, TFliteOpSum);
REG_TFL_OP(TANH, TFliteOpTanh);
+ REG_TFL_OP(TILE, TFliteOpTile);
+ REG_TFL_OP(TOPK_V2, TFliteOpTopKV2);
REG_TFL_OP(TRANSPOSE, TFliteOpTranspose);
+ REG_TFL_OP(TRANSPOSE_CONV, TFliteOpTransposeConv);
+ REG_TFL_OP(UNIQUE, TFliteOpUnique);
+ REG_TFL_OP(UNPACK, TFliteOpUnpack);
+ REG_TFL_OP(WHERE, TFliteOpWhere);
+ REG_TFL_OP(ZEROS_LIKE, TFliteOpZerosLike);
#undef REG_TFL_OP
}
diff --git a/compiler/tflchef/tools/file/CMakeLists.txt b/compiler/tflchef/tools/file/CMakeLists.txt
index 477b7d974..f411d60f1 100644
--- a/compiler/tflchef/tools/file/CMakeLists.txt
+++ b/compiler/tflchef/tools/file/CMakeLists.txt
@@ -1,3 +1,4 @@
add_executable(tflchef-file Driver.cpp)
+target_link_libraries(tflchef-file arser)
target_link_libraries(tflchef-file tflchef_core)
target_link_libraries(tflchef-file safemain)
diff --git a/compiler/tflchef/tools/file/Driver.cpp b/compiler/tflchef/tools/file/Driver.cpp
index 3ef701910..cecfeeb3e 100644
--- a/compiler/tflchef/tools/file/Driver.cpp
+++ b/compiler/tflchef/tools/file/Driver.cpp
@@ -20,30 +20,42 @@
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
+#include <arser/arser.h>
+
#include <fstream>
#include <iostream>
int entry(int argc, char **argv)
{
- if (argc != 3)
+ arser::Arser arser;
+ arser.add_argument("recipe")
+ .type(arser::DataType::STR)
+ .help("Source recipe file path to convert");
+ arser.add_argument("tflite").type(arser::DataType::STR).help("Target tflite file path");
+
+ try
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [recipe] [output]" << std::endl;
- return 255;
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
}
int32_t model_version = 1;
::tflchef::ModelRecipe model_recipe;
+ std::string recipe_path = arser.get<std::string>("recipe");
// Load model recipe from a file
{
- std::ifstream is{argv[1]};
+ std::ifstream is{recipe_path};
google::protobuf::io::IstreamInputStream iis{&is};
if (!google::protobuf::TextFormat::Parse(&iis, &model_recipe))
{
- std::cerr << "ERROR: Failed to parse recipe '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to parse recipe '" << recipe_path << "'" << std::endl;
return 255;
}
@@ -62,9 +74,10 @@ int entry(int argc, char **argv)
auto generated_model = tflchef::cook(model_recipe);
+ std::string tflite_path = arser.get<std::string>("tflite");
// Dump generated model into a file
{
- std::ofstream os{argv[2], std::ios::binary};
+ std::ofstream os{tflite_path, std::ios::binary};
os.write(generated_model.base(), generated_model.size());
}
diff --git a/compiler/tflchef/tools/reverse/CMakeLists.txt b/compiler/tflchef/tools/reverse/CMakeLists.txt
index 63cb36c06..a5c0f5bca 100644
--- a/compiler/tflchef/tools/reverse/CMakeLists.txt
+++ b/compiler/tflchef/tools/reverse/CMakeLists.txt
@@ -1,3 +1,5 @@
add_executable(tflchef-reverse Driver.cpp)
+target_link_libraries(tflchef-reverse arser)
target_link_libraries(tflchef-reverse tflchef_tflite)
target_link_libraries(tflchef-reverse safemain)
+target_link_libraries(tflchef-reverse foder)
diff --git a/compiler/tflchef/tools/reverse/Driver.cpp b/compiler/tflchef/tools/reverse/Driver.cpp
index 549756463..1116dec34 100644
--- a/compiler/tflchef/tools/reverse/Driver.cpp
+++ b/compiler/tflchef/tools/reverse/Driver.cpp
@@ -14,34 +14,41 @@
* limitations under the License.
*/
-#include <tflchef/RawModel.h>
#include <tflchef/RecipeChef.h>
+#include <arser/arser.h>
+#include <foder/FileLoader.h>
+
#include <memory>
#include <iostream>
int entry(int argc, char **argv)
{
- if (argc != 3)
+ arser::Arser arser;
+ arser.add_argument("tflite")
+ .type(arser::DataType::STR)
+ .help("Source tflite file path to convert");
+ arser.add_argument("recipe").type(arser::DataType::STR).help("Target recipe file path");
+
+ try
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [tflite] [output]" << std::endl;
- return 255;
+ arser.parse(argc, argv);
}
-
- // Load TF lite model from a tflite file
- std::unique_ptr<tflchef::RawModel> rawmodel = tflchef::load_tflite(argv[1]);
- if (rawmodel == nullptr)
+ catch (const std::runtime_error &err)
{
- std::cerr << "ERROR: Failed to load tflite '" << argv[1] << "'" << std::endl;
- return 255;
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
}
- const tflite::Model *tflmodel = rawmodel->model();
+ std::string tflite_path = arser.get<std::string>("tflite");
+ // Load TF lite model from a tflite file
+ const foder::FileLoader fileLoader{tflite_path};
+ std::vector<char> modelData = fileLoader.load();
+ const tflite::Model *tflmodel = tflite::GetModel(modelData.data());
if (tflmodel == nullptr)
{
- std::cerr << "ERROR: Failed to load tflite '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load tflite '" << tflite_path << "'" << std::endl;
return 255;
}
@@ -53,11 +60,12 @@ int entry(int argc, char **argv)
return 255;
}
+ std::string recipe_path = arser.get<std::string>("recipe");
// Save to a file
- bool result = tflchef::write_recipe(argv[2], recipe);
+ bool result = tflchef::write_recipe(recipe_path, recipe);
if (!result)
{
- std::cerr << "ERROR: Failed to write to recipe '" << argv[2] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to write to recipe '" << recipe_path << "'" << std::endl;
return 255;
}
return 0;
diff --git a/compiler/tfldump/CMakeLists.txt b/compiler/tfldump/CMakeLists.txt
index 99b6365cc..e6afcb6d2 100644
--- a/compiler/tfldump/CMakeLists.txt
+++ b/compiler/tfldump/CMakeLists.txt
@@ -1,4 +1,5 @@
if(NOT TARGET mio_tflite)
+ message(STATUS "Build tfldump: FAILED (missing mio_tflite)")
return()
endif(NOT TARGET mio_tflite)
@@ -8,7 +9,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(tfldump ${DRIVER} ${SOURCES})
target_include_directories(tfldump PRIVATE include)
+target_link_libraries(tfldump arser)
target_link_libraries(tfldump mio_tflite)
target_link_libraries(tfldump safemain)
-target_link_libraries(tfldump stdex)
target_link_libraries(tfldump flatbuffers)
diff --git a/compiler/tfldump/driver/Driver.cpp b/compiler/tfldump/driver/Driver.cpp
index 2ede0fdd9..3961d2f17 100644
--- a/compiler/tfldump/driver/Driver.cpp
+++ b/compiler/tfldump/driver/Driver.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <arser/arser.h>
#include <tflread/Model.h>
#include <tfldump/Dump.h>
@@ -21,30 +22,37 @@
int entry(int argc, char **argv)
{
- if (argc != 2)
+ arser::Arser arser;
+ arser.add_argument("tflite").type(arser::DataType::STR).help("TFLite file to dump");
+
+ try
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [tflite]" << std::endl;
- return 255;
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << '\n';
+ std::cout << arser;
+ return 0;
}
+ std::string tflite_path = arser.get<std::string>("tflite");
// Load TF lite model from a tflite file
- std::unique_ptr<tflread::Model> model = tflread::load_tflite(argv[1]);
+ std::unique_ptr<tflread::Model> model = tflread::load_tflite(tflite_path);
if (model == nullptr)
{
- std::cerr << "ERROR: Failed to load tflite '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load tflite '" << tflite_path << "'" << std::endl;
return 255;
}
const tflite::Model *tflmodel = model->model();
if (tflmodel == nullptr)
{
- std::cerr << "ERROR: Failed to load tflite '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load tflite '" << tflite_path << "'" << std::endl;
return 255;
}
- std::cout << "Dump: " << argv[1] << std::endl << std::endl;
+ std::cout << "Dump: " << tflite_path << std::endl << std::endl;
std::cout << tflmodel << std::endl;
diff --git a/compiler/tfldump/requires.cmake b/compiler/tfldump/requires.cmake
index adcae7c1f..2cdd3a391 100644
--- a/compiler/tfldump/requires.cmake
+++ b/compiler/tfldump/requires.cmake
@@ -1 +1,3 @@
+require("arser")
require("mio-tflite")
+require("safemain")
diff --git a/compiler/tfldump/src/Dump.cpp b/compiler/tfldump/src/Dump.cpp
index e6b84251a..e1562d42f 100644
--- a/compiler/tfldump/src/Dump.cpp
+++ b/compiler/tfldump/src/Dump.cpp
@@ -110,8 +110,8 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
auto operators = reader.operators();
// dump operands(tensors)
- os << "Operands: T(subgraph index : tensor index) TYPE (shape) B(buffer index) OperandName"
- << std::endl;
+ os << "Operands: T(subgraph index : tensor index) TYPE (shape) (shape_signature) "
+ << "B(buffer index) OperandName" << std::endl;
for (uint32_t i = 0; i < tensors->Length(); ++i)
{
// TODO refactor to some better structure
@@ -124,6 +124,11 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
os << "T(" << reader.subgraph_index() << ":" << i << ") " << tflread::tensor_type(tensor)
<< " ";
os << "(" << dims << ") ";
+ if (tensor->shape_signature())
+ {
+ std::vector<int32_t> dims_sig = tflread::as_index_vector(tensor->shape_signature());
+ os << "(" << dims_sig << ") ";
+ }
os << "B(" << tensor->buffer() << ") ";
os << tflread::tensor_name(tensor) << std::endl;
@@ -154,7 +159,12 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
os << std::endl << strqindent;
}
if (q_params->zero_point())
+ {
os << "zeropt(" << q_params->zero_point() << ") ";
+ if (q_params->zero_point()->size() > 1)
+ os << std::endl << strqindent;
+ }
+ os << "quantized_dimension(" << q_params->quantized_dimension() << ")";
os << std::endl;
}
@@ -186,7 +196,7 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
for (auto input : inputs)
{
- os << " I T(" << input << ") ";
+ os << " I T(" << reader.subgraph_index() << ":" << input << ") ";
if (input >= 0)
{
auto tensor = tensors->Get(input);
@@ -196,7 +206,7 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
}
for (auto output : outputs)
{
- os << " O T(" << output << ") ";
+ os << " O T(" << reader.subgraph_index() << ":" << output << ") ";
if (output >= 0)
{
auto tensor = tensors->Get(output);
@@ -214,14 +224,14 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader)
{
auto tensor = tensors->Get(input);
std::string name = tflread::tensor_name(tensor);
- os << "I T(" << input << ") " << name << std::endl;
+ os << "I T(" << reader.subgraph_index() << ":" << input << ") " << name << std::endl;
}
for (const auto output : reader.outputs())
{
auto tensor = tensors->Get(output);
std::string name = tflread::tensor_name(tensor);
- os << "O T(" << output << ") " << name << std::endl;
+ os << "O T(" << reader.subgraph_index() << ":" << output << ") " << name << std::endl;
}
os << std::endl;
diff --git a/compiler/tfldump/src/OpPrinter.cpp b/compiler/tfldump/src/OpPrinter.cpp
index 5120f42b1..9fc1a6456 100644
--- a/compiler/tfldump/src/OpPrinter.cpp
+++ b/compiler/tfldump/src/OpPrinter.cpp
@@ -17,11 +17,11 @@
#include "OpPrinter.h"
#include "Read.h"
-#include <stdex/Memory.h>
+#include <memory>
#include <flatbuffers/flexbuffers.h>
-using stdex::make_unique;
+using std::make_unique;
namespace tfldump
{
@@ -60,6 +60,35 @@ public:
}
};
+class ArgMinPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_ArgMinOptions())
+ {
+ os << " ";
+ os << "OutputType(" << EnumNameTensorType(params->output_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class CastPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto cast_params = op->builtin_options_as_CastOptions())
+ {
+ os << " ";
+ os << "in_data_type(" << tflite::EnumNameTensorType(cast_params->in_data_type()) << ") ";
+ os << "out_data_type(" << tflite::EnumNameTensorType(cast_params->out_data_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class Conv2DPrinter : public OpPrinter
{
public:
@@ -71,6 +100,8 @@ public:
os << "Padding(" << conv_params->padding() << ") ";
os << "Stride.W(" << conv_params->stride_w() << ") ";
os << "Stride.H(" << conv_params->stride_h() << ") ";
+ os << "Dilation.W(" << conv_params->dilation_w_factor() << ") ";
+ os << "Dilation.H(" << conv_params->dilation_h_factor() << ") ";
os << "Activation("
<< EnumNameActivationFunctionType(conv_params->fused_activation_function()) << ")";
os << std::endl;
@@ -130,6 +161,20 @@ public:
}
};
+class ReducerPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto reducer_params = op->builtin_options_as_ReducerOptions())
+ {
+ os << " ";
+ os << "keep_dims(" << reducer_params->keep_dims() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class ReshapePrinter : public OpPrinter
{
public:
@@ -145,6 +190,80 @@ public:
}
};
+class ResizeBilinearPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *resize_params = op->builtin_options_as_ResizeBilinearOptions())
+ {
+ os << " ";
+ os << std::boolalpha;
+ os << "align_corners(" << resize_params->align_corners() << ")";
+ os << "half_pixel_centers(" << resize_params->half_pixel_centers() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class ResizeNearestNeighborPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *resize_params = op->builtin_options_as_ResizeNearestNeighborOptions())
+ {
+ os << " ";
+ os << std::boolalpha;
+ os << "align_corners(" << resize_params->align_corners() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class ReverseSequencePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_ReverseSequenceOptions())
+ {
+ os << " ";
+ os << "seq_dim(" << std_params->seq_dim() << ") ";
+ os << "batch_dim(" << std_params->batch_dim() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class DepthToSpacePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_DepthToSpaceOptions())
+ {
+ os << " ";
+ os << "BlockSize(" << std_params->block_size() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class SparseToDensePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_SparseToDenseOptions())
+ {
+ os << " ";
+ os << "ValidateIndices(" << std_params->validate_indices() << ")";
+ os << std::endl;
+ }
+ }
+};
+
class DepthwiseConv2DPrinter : public OpPrinter
{
public:
@@ -184,6 +303,95 @@ public:
}
};
+class GatherPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_GatherOptions())
+ {
+ os << " ";
+ os << "Axis(" << params->axis() << ") ";
+
+ os << std::endl;
+ }
+ }
+};
+
+class IfPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_IfOptions())
+ {
+ os << " ";
+ os << "then_subgraph_index(" << params->then_subgraph_index() << ") ";
+ os << "else_subgraph_index(" << params->else_subgraph_index() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class L2NormPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_L2NormOptions())
+ {
+ os << " ";
+ os << "Activation(" << EnumNameActivationFunctionType(params->fused_activation_function())
+ << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class LeakyReluPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_LeakyReluOptions())
+ {
+ os << " ";
+ os << "alpha(" << params->alpha() << ") ";
+ }
+ }
+};
+
+class LocalResponseNormalizationPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_LocalResponseNormalizationOptions())
+ {
+ os << " ";
+ os << "radius(" << params->radius() << ") ";
+ os << "bias(" << params->bias() << ") ";
+ os << "alpha(" << params->alpha() << ") ";
+ os << "beta(" << params->beta() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class MirrorPadPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_MirrorPadOptions())
+ {
+ os << " ";
+ os << "mode(" << EnumNameMirrorPadMode(params->mode()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class MulPrinter : public OpPrinter
{
public:
@@ -214,6 +422,35 @@ public:
}
};
+class OneHotPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_OneHotOptions())
+ {
+ os << " ";
+ os << "Axis(" << params->axis() << ") ";
+
+ os << std::endl;
+ }
+ }
+};
+
+class ShapePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_ShapeOptions())
+ {
+ os << " ";
+ os << "out_type(" << EnumNameTensorType(params->out_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class SoftmaxPrinter : public OpPrinter
{
public:
@@ -228,6 +465,87 @@ public:
}
};
+class SpaceToDepthPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *std_params = op->builtin_options_as_SpaceToDepthOptions())
+ {
+ os << " ";
+ os << "BlockSize(" << std_params->block_size() << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class SqueezePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SqueezeOptions())
+ {
+ os << " ";
+ os << "SqueezeDims(";
+ for (int i = 0; i < params->squeeze_dims()->size(); ++i)
+ {
+ if (i != 0)
+ os << ", ";
+ os << params->squeeze_dims()->Get(i);
+ }
+ os << ")";
+ os << std::endl;
+ }
+ }
+};
+
+class StridedSlicePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *strided_slice_params = op->builtin_options_as_StridedSliceOptions())
+ {
+ os << " ";
+ os << "begin_mask(" << strided_slice_params->begin_mask() << ") ";
+ os << "end_mask(" << strided_slice_params->end_mask() << ") ";
+ os << "ellipsis_mask(" << strided_slice_params->ellipsis_mask() << ") ";
+ os << "new_axis_mask(" << strided_slice_params->new_axis_mask() << ") ";
+ os << "shrink_axis_mask(" << strided_slice_params->shrink_axis_mask() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class SplitPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SplitOptions())
+ {
+ os << " ";
+ os << "num_splits(" << params->num_splits() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class SplitVPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_SplitVOptions())
+ {
+ os << " ";
+ os << "num_splits(" << params->num_splits() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class SubPrinter : public OpPrinter
{
public:
@@ -243,6 +561,51 @@ public:
}
};
+class TransposeConvPrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_TransposeConvOptions())
+ {
+ os << " ";
+ os << "Padding(" << params->padding() << ") ";
+ os << "Stride.W(" << params->stride_w() << ") ";
+ os << "Stride.H(" << params->stride_h() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class WhilePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_WhileOptions())
+ {
+ os << " ";
+ os << "cond_subgraph_index(" << params->cond_subgraph_index() << ") ";
+ os << "body_subgraph_index(" << params->body_subgraph_index() << ") ";
+ os << std::endl;
+ }
+ }
+};
+
+class UniquePrinter : public OpPrinter
+{
+public:
+ void options(const tflite::Operator *op, std::ostream &os) const override
+ {
+ if (auto *params = op->builtin_options_as_UniqueOptions())
+ {
+ os << " ";
+ os << "idx_out_type(" << EnumNameTensorType(params->idx_out_type()) << ") ";
+ os << std::endl;
+ }
+ }
+};
+
class CustomOpPrinter : public OpPrinter
{
public:
@@ -286,20 +649,69 @@ public:
OpPrinterRegistry::OpPrinterRegistry()
{
_op_map[tflite::BuiltinOperator_ADD] = make_unique<AddPrinter>();
+ // There is no Option for ADD_N
_op_map[tflite::BuiltinOperator_ARG_MAX] = make_unique<ArgMaxPrinter>();
+ _op_map[tflite::BuiltinOperator_ARG_MIN] = make_unique<ArgMinPrinter>();
_op_map[tflite::BuiltinOperator_AVERAGE_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[tflite::BuiltinOperator_CAST] = make_unique<CastPrinter>();
+ // There is no Option for CEIL
_op_map[tflite::BuiltinOperator_CONCATENATION] = make_unique<ConcatenationPrinter>();
_op_map[tflite::BuiltinOperator_CONV_2D] = make_unique<Conv2DPrinter>();
+ _op_map[tflite::BuiltinOperator_DEPTH_TO_SPACE] = make_unique<DepthToSpacePrinter>();
_op_map[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = make_unique<DepthwiseConv2DPrinter>();
_op_map[tflite::BuiltinOperator_DIV] = make_unique<DivPrinter>();
+ // There is no Option for FLOOR
+ // There is no Option for FLOOR_MOD
_op_map[tflite::BuiltinOperator_FULLY_CONNECTED] = make_unique<FullyConnectedPrinter>();
+ _op_map[tflite::BuiltinOperator_GATHER] = make_unique<GatherPrinter>();
+ _op_map[tflite::BuiltinOperator_IF] = make_unique<IfPrinter>();
+ _op_map[tflite::BuiltinOperator_L2_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[tflite::BuiltinOperator_L2_NORMALIZATION] = make_unique<L2NormPrinter>();
+ _op_map[tflite::BuiltinOperator_LEAKY_RELU] = make_unique<LeakyReluPrinter>();
+ _op_map[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION] =
+ make_unique<LocalResponseNormalizationPrinter>();
+ // There is no Option for LOG
+ // There is no Option for LOGISTIC
+ // There is no Option for LOG_SOFTMAX
_op_map[tflite::BuiltinOperator_MAX_POOL_2D] = make_unique<Pool2DPrinter>();
+ _op_map[tflite::BuiltinOperator_MIRROR_PAD] = make_unique<MirrorPadPrinter>();
_op_map[tflite::BuiltinOperator_MUL] = make_unique<MulPrinter>();
+ _op_map[tflite::BuiltinOperator_ONE_HOT] = make_unique<OneHotPrinter>();
_op_map[tflite::BuiltinOperator_PACK] = make_unique<PackPrinter>();
- // There is no Option for ReLU and ReLU6
+ // There is no Option for PAD
+ // There is no Option for PRELU
+ // There is no Option for RELU
+ // There is no Option for RELU6
+ // There is no Option for RELU_N1_TO_1
+ _op_map[tflite::BuiltinOperator_REDUCE_ANY] = make_unique<ReducerPrinter>();
+ _op_map[tflite::BuiltinOperator_REDUCE_MAX] = make_unique<ReducerPrinter>();
+ _op_map[tflite::BuiltinOperator_REDUCE_MIN] = make_unique<ReducerPrinter>();
+ _op_map[tflite::BuiltinOperator_REDUCE_PROD] = make_unique<ReducerPrinter>();
_op_map[tflite::BuiltinOperator_RESHAPE] = make_unique<ReshapePrinter>();
+ _op_map[tflite::BuiltinOperator_RESIZE_BILINEAR] = make_unique<ResizeBilinearPrinter>();
+ _op_map[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] =
+ make_unique<ResizeNearestNeighborPrinter>();
+ _op_map[tflite::BuiltinOperator_REVERSE_SEQUENCE] = make_unique<ReverseSequencePrinter>();
+ // There is no Option for ROUND
+ // There is no Option for SELECT
+ // There is no Option for SELECT_V2
+ _op_map[tflite::BuiltinOperator_SHAPE] = make_unique<ShapePrinter>();
+ // There is no Option for SIN
+ // There is no Option for SLICE
_op_map[tflite::BuiltinOperator_SOFTMAX] = make_unique<SoftmaxPrinter>();
+ _op_map[tflite::BuiltinOperator_SPACE_TO_DEPTH] = make_unique<SpaceToDepthPrinter>();
+ // There is no Option for SPACE_TO_BATCH_ND
+ _op_map[tflite::BuiltinOperator_SPARSE_TO_DENSE] = make_unique<SparseToDensePrinter>();
+ _op_map[tflite::BuiltinOperator_SPLIT] = make_unique<SplitPrinter>();
+ _op_map[tflite::BuiltinOperator_SPLIT_V] = make_unique<SplitVPrinter>();
+ _op_map[tflite::BuiltinOperator_SQUEEZE] = make_unique<SqueezePrinter>();
+ _op_map[tflite::BuiltinOperator_STRIDED_SLICE] = make_unique<StridedSlicePrinter>();
_op_map[tflite::BuiltinOperator_SUB] = make_unique<SubPrinter>();
+ _op_map[tflite::BuiltinOperator_SUM] = make_unique<ReducerPrinter>();
+ _op_map[tflite::BuiltinOperator_TRANSPOSE_CONV] = make_unique<TransposeConvPrinter>();
+ // There is no Option for TOPK_V2
+ _op_map[tflite::BuiltinOperator_UNIQUE] = make_unique<UniquePrinter>();
+ _op_map[tflite::BuiltinOperator_WHILE] = make_unique<WhilePrinter>();
_op_map[tflite::BuiltinOperator_CUSTOM] = make_unique<CustomOpPrinter>();
}
diff --git a/compiler/tflite2circle-conversion-test/CMakeLists.txt b/compiler/tflite2circle-conversion-test/CMakeLists.txt
index d7b644242..83fe23a8f 100644
--- a/compiler/tflite2circle-conversion-test/CMakeLists.txt
+++ b/compiler/tflite2circle-conversion-test/CMakeLists.txt
@@ -2,7 +2,6 @@ nnas_include(TargetRequire)
unset(REQUIRED_TARGETS)
list(APPEND REQUIRED_TARGETS tflite2circle)
-list(APPEND REQUIRED_TARGETS tflchef)
TargetRequire_Return(${REQUIRED_TARGETS})
nncc_find_resource(TensorFlowLiteRecipes)
@@ -10,46 +9,19 @@ nncc_find_resource(TensorFlowLiteRecipes)
set(TEST_REPO "${TensorFlowLiteRecipes_DIR}")
set(TEST_RECIPE_FILENAME "test.recipe")
-unset(TESTCASES)
-macro(add NAME)
- list(APPEND TESTCASES ${NAME})
-endmacro(add)
-
-# Read "test.lst"
-include("test.lst")
+file(GLOB RECIPES RELATIVE ${TEST_REPO} "${TEST_REPO}/*/${TEST_RECIPE_FILENAME}")
unset(TEST_DEPS)
unset(TEST_NAMES)
-foreach(PREFIX IN ITEMS ${TESTCASES})
- if(NOT IS_DIRECTORY "${TEST_REPO}/${PREFIX}")
- message(FATAL_ERROR "Missing '${PREFIX}' test")
+foreach(RECIPE IN ITEMS ${RECIPES})
+ get_filename_component(PREFIX ${RECIPE} DIRECTORY)
+ if(NOT IS_DIRECTORY "${TEST_REPO}/${RECIPE_PREFIX}")
+ message(FATAL_ERROR "Missing '${RECIPE_PREFIX}' test")
endif()
- set(RECIPE_SOURCE_PATH "${TEST_REPO}/${PREFIX}/${TEST_RECIPE_FILENAME}")
- set(RECIPE_FILE "${PREFIX}.recipe")
- set(RECIPE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}")
-
- set(TFLITE_FILE "${PREFIX}.tflite")
- set(TFLITE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${TFLITE_FILE}")
-
- # Copy .recipe
- add_custom_command(OUTPUT ${RECIPE_BINARY_PATH}
- COMMAND ${CMAKE_COMMAND} -E copy "${RECIPE_SOURCE_PATH}" "${RECIPE_BINARY_PATH}"
- DEPENDS ${RECIPE_SOURCE_PATH}
- COMMENT "Generate ${RECIPE_FILE}"
- )
-
- # Generate .tflite
- add_custom_command(OUTPUT ${TFLITE_OUTPUT_PATH}
- COMMAND $<TARGET_FILE:tflchef-file> ${RECIPE_BINARY_PATH} ${TFLITE_OUTPUT_PATH}
- DEPENDS ${RECIPE_BINARY_PATH}
- COMMENT "Generate ${PREFIX}.tflite"
- )
-
- list(APPEND TEST_DEPS ${RECIPE_BINARY_PATH} ${TFLITE_OUTPUT_PATH})
list(APPEND TEST_NAMES ${PREFIX})
-endforeach(PREFIX IN ITEMS ${TESTCASES})
+endforeach(RECIPE IN ITEMS ${RECIPES})
##
## Copy testall
@@ -66,6 +38,8 @@ add_custom_command(
list(APPEND TEST_DEPS "${TEST_RUNNER}")
+get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR)
+
###
### Generate test.config
###
@@ -89,6 +63,6 @@ add_test(
NAME tflite2circle_conversion_test
COMMAND "${TEST_RUNNER}"
"${TEST_CONFIG}"
- "${CMAKE_CURRENT_BINARY_DIR}"
+ "${ARTIFACTS_BIN_PATH}"
${TEST_NAMES}
)
diff --git a/compiler/tflite2circle-conversion-test/requires.cmake b/compiler/tflite2circle-conversion-test/requires.cmake
index 730711aca..87b00993f 100644
--- a/compiler/tflite2circle-conversion-test/requires.cmake
+++ b/compiler/tflite2circle-conversion-test/requires.cmake
@@ -1,2 +1,2 @@
+require("common-artifacts")
require("tflite2circle")
-require("tflchef")
diff --git a/compiler/tflite2circle-conversion-test/test.lst b/compiler/tflite2circle-conversion-test/test.lst
deleted file mode 100644
index 8b8f0aaf3..000000000
--- a/compiler/tflite2circle-conversion-test/test.lst
+++ /dev/null
@@ -1,20 +0,0 @@
-add(Add_000)
-add(AveragePool2D_000)
-add(Concatenation_000)
-add(Conv2D_000)
-add(Conv2D_001)
-add(Conv2D_U8_000)
-add(DepthwiseConv2D_000)
-add(Div_000)
-add(FullyConnected_000)
-add(FullyConnected_001)
-add(MaxPool2D_000)
-add(Quantization_000)
-add(ReLU_000)
-add(ReLU6_000)
-add(Reshape_000)
-add(Reshape_001)
-add(Reshape_U8_000)
-add(Sqrt_000)
-add(Sub_000)
-add(Sub_001)
diff --git a/compiler/tflite2circle-conversion-test/testall.sh b/compiler/tflite2circle-conversion-test/testall.sh
index 664543736..2290ee6db 100755
--- a/compiler/tflite2circle-conversion-test/testall.sh
+++ b/compiler/tflite2circle-conversion-test/testall.sh
@@ -13,6 +13,7 @@ if [[ $# -lt 2 ]]; then
exit 255
fi
+BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONFIG_PATH="$1"; shift
WORKDIR="$1"; shift
@@ -31,11 +32,11 @@ while [[ $# -ne 0 ]]; do
TESTED+=("${PREFIX}")
- PASSED_TAG="${PREFIX}.passed"
+ PASSED_TAG="${BINDIR}/${PREFIX}.passed"
rm -f "${PASSED_TAG}"
- cat > "${PREFIX}.log" <(
+ cat > "${BINDIR}/${PREFIX}.log" <(
exec 2>&1
echo "-- Found tflite: ${PREFIX}.tflite"
@@ -48,7 +49,7 @@ while [[ $# -ne 0 ]]; do
# Generate circle
"${TFLITE2CIRCLE_PATH}" \
"${WORKDIR}/${PREFIX}.tflite" \
- "${WORKDIR}/${PREFIX}.circle"
+ "${BINDIR}/${PREFIX}.circle"
if [[ $? -eq 0 ]]; then
touch "${PASSED_TAG}"
diff --git a/compiler/tflite2circle/CMakeLists.txt b/compiler/tflite2circle/CMakeLists.txt
index f846a2bc5..a0a2e026b 100644
--- a/compiler/tflite2circle/CMakeLists.txt
+++ b/compiler/tflite2circle/CMakeLists.txt
@@ -10,7 +10,9 @@ file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(tflite2circle ${DRIVER} ${SOURCES})
target_include_directories(tflite2circle PRIVATE include)
target_include_directories(tflite2circle PRIVATE src)
+target_link_libraries(tflite2circle arser)
target_link_libraries(tflite2circle safemain)
-target_link_libraries(tflite2circle stdex)
target_link_libraries(tflite2circle mio_tflite)
target_link_libraries(tflite2circle mio_circle)
+
+install(TARGETS tflite2circle DESTINATION bin)
diff --git a/compiler/tflite2circle/driver/Driver.cpp b/compiler/tflite2circle/driver/Driver.cpp
index 826f9dee7..67b8e33bc 100644
--- a/compiler/tflite2circle/driver/Driver.cpp
+++ b/compiler/tflite2circle/driver/Driver.cpp
@@ -15,43 +15,60 @@
*/
#include <iostream>
+#include <memory>
+#include <string>
#include <vector>
+#include <arser/arser.h>
+
#include "CircleModel.h"
#include "TFLModel.h"
int entry(int argc, char **argv)
{
- if (argc != 3)
+ arser::Arser arser{"tflite2circle is a Tensorflow lite to circle model converter"};
+
+ arser.add_argument("tflite")
+ .nargs(1)
+ .type(arser::DataType::STR)
+ .help("Source tflite file path to convert");
+ arser.add_argument("circle").nargs(1).type(arser::DataType::STR).help("Target circle file path");
+
+ try
{
- std::cerr << "ERROR: Failed to parse arguments" << std::endl;
- std::cerr << std::endl;
- std::cerr << "USAGE: " << argv[0] << " [tflite] [circle]" << std::endl;
- return 255;
+ arser.parse(argc, argv);
+ }
+ catch (const std::runtime_error &err)
+ {
+ std::cout << err.what() << std::endl;
+ std::cout << arser;
+ return 0;
}
+ std::string tfl_path = arser.get<std::string>("tflite");
+ std::string circle_path = arser.get<std::string>("circle");
// read tflite file
- tflite2circle::TFLModel tfl_model(argv[1]);
+ tflite2circle::TFLModel tfl_model(tfl_path);
if (!tfl_model.is_valid())
{
- std::cerr << "ERROR: Failed to load tflite '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to load tflite '" << tfl_path << "'" << std::endl;
return 255;
}
// create flatbuffer builder
- auto flatbuffer_builder = stdex::make_unique<flatbuffers::FlatBufferBuilder>(1024);
+ auto flatbuffer_builder = std::make_unique<flatbuffers::FlatBufferBuilder>(1024);
// convert tflite to circle
tflite2circle::CircleModel circle_model{flatbuffer_builder, tfl_model};
- std::ofstream outfile{argv[2], std::ios::binary};
+ std::ofstream outfile{circle_path, std::ios::binary};
outfile.write(circle_model.base(), circle_model.size());
outfile.close();
// TODO find a better way of error handling
if (outfile.fail())
{
- std::cerr << "ERROR: Failed to write circle '" << argv[1] << "'" << std::endl;
+ std::cerr << "ERROR: Failed to write circle '" << circle_path << "'" << std::endl;
return 255;
}
diff --git a/compiler/tflite2circle/include/CircleModel.h b/compiler/tflite2circle/include/CircleModel.h
index ee1a8fe75..e1e35d8ff 100644
--- a/compiler/tflite2circle/include/CircleModel.h
+++ b/compiler/tflite2circle/include/CircleModel.h
@@ -23,7 +23,6 @@
#include <iostream>
#include <string>
#include <vector>
-#include <stdex/Memory.h>
#include "TFLModel.h"
diff --git a/compiler/tflite2circle/requires.cmake b/compiler/tflite2circle/requires.cmake
index dd67319b8..ff19b7491 100644
--- a/compiler/tflite2circle/requires.cmake
+++ b/compiler/tflite2circle/requires.cmake
@@ -1,4 +1,4 @@
+require("arser")
require("mio-tflite")
require("mio-circle")
require("safemain")
-require("stdex")
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions.h
index 0167da284..159a8af97 100644
--- a/compiler/tflite2circle/src/BuildBuiltinOptions.h
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions.h
@@ -21,34 +21,90 @@
#include "BuildBuiltinOptions/AbsOptions.h"
#include "BuildBuiltinOptions/AddOptions.h"
+#include "BuildBuiltinOptions/AddNOptions.h"
#include "BuildBuiltinOptions/ArgMaxOptions.h"
+#include "BuildBuiltinOptions/ArgMinOptions.h"
+#include "BuildBuiltinOptions/BatchMatMulOptions.h"
#include "BuildBuiltinOptions/BatchToSpaceNDOptions.h"
#include "BuildBuiltinOptions/CastOptions.h"
#include "BuildBuiltinOptions/ConcatenationOptions.h"
#include "BuildBuiltinOptions/Conv2DOptions.h"
#include "BuildBuiltinOptions/CosOptions.h"
+#include "BuildBuiltinOptions/DepthToSpaceOptions.h"
#include "BuildBuiltinOptions/DepthwiseConv2DOptions.h"
#include "BuildBuiltinOptions/DivOptions.h"
#include "BuildBuiltinOptions/EqualOptions.h"
#include "BuildBuiltinOptions/ExpandDimsOptions.h"
#include "BuildBuiltinOptions/ExpOptions.h"
#include "BuildBuiltinOptions/FillOptions.h"
+#include "BuildBuiltinOptions/FloorDivOptions.h"
+#include "BuildBuiltinOptions/FloorModOptions.h"
#include "BuildBuiltinOptions/FullyConnectedOptions.h"
+#include "BuildBuiltinOptions/GatherOptions.h"
+#include "BuildBuiltinOptions/GatherNdOptions.h"
+#include "BuildBuiltinOptions/GreaterOptions.h"
#include "BuildBuiltinOptions/GreaterEqualOptions.h"
+#include "BuildBuiltinOptions/IfOptions.h"
+#include "BuildBuiltinOptions/L2NormalizeOptions.h"
+// L2Pool2D uses Pool2DOptions
+#include "BuildBuiltinOptions/LeakyReluOptions.h"
+#include "BuildBuiltinOptions/LessOptions.h"
+#include "BuildBuiltinOptions/LessEqualOptions.h"
+#include "BuildBuiltinOptions/LocalResponseNormalizationOptions.h"
+#include "BuildBuiltinOptions/LogicalAndOptions.h"
#include "BuildBuiltinOptions/LogicalNotOptions.h"
#include "BuildBuiltinOptions/LogicalOrOptions.h"
+// There is no LogisticOptions
+#include "BuildBuiltinOptions/LogSoftmaxOptions.h"
+#include "BuildBuiltinOptions/MatrixDiagOptions.h"
+#include "BuildBuiltinOptions/MatrixSetDiagOptions.h"
+#include "BuildBuiltinOptions/MaximumMinimumOptions.h"
+#include "BuildBuiltinOptions/MirrorPadOptions.h"
#include "BuildBuiltinOptions/MulOptions.h"
+#include "BuildBuiltinOptions/NegOptions.h"
#include "BuildBuiltinOptions/NotEqualOptions.h"
+#include "BuildBuiltinOptions/OneHotOptions.h"
#include "BuildBuiltinOptions/PackOptions.h"
#include "BuildBuiltinOptions/PadOptions.h"
+#include "BuildBuiltinOptions/RangeOptions.h"
#include "BuildBuiltinOptions/Pool2DOptions.h"
+#include "BuildBuiltinOptions/PowOptions.h"
+#include "BuildBuiltinOptions/RankOptions.h"
+// There is no PReluOptions
#include "BuildBuiltinOptions/ReducerOptions.h"
#include "BuildBuiltinOptions/ReshapeOptions.h"
+#include "BuildBuiltinOptions/ResizeBilinearOptions.h"
+#include "BuildBuiltinOptions/ResizeNearestNeighborOptions.h"
+#include "BuildBuiltinOptions/ReverseSequenceOptions.h"
+#include "BuildBuiltinOptions/ReverseV2Options.h"
+// There is no RoundOptions
+// There is no RsqrtOptions
+#include "BuildBuiltinOptions/ScatterNdOptions.h"
+#include "BuildBuiltinOptions/SegmentSumOptions.h"
+#include "BuildBuiltinOptions/SelectOptions.h"
+#include "BuildBuiltinOptions/SelectV2Options.h"
#include "BuildBuiltinOptions/ShapeOptions.h"
+// There is no SinOptions
+#include "BuildBuiltinOptions/SliceOptions.h"
#include "BuildBuiltinOptions/SoftmaxOptions.h"
+#include "BuildBuiltinOptions/SpaceToBatchNDOptions.h"
+#include "BuildBuiltinOptions/SpaceToDepthOptions.h"
+#include "BuildBuiltinOptions/SparseToDenseOptions.h"
#include "BuildBuiltinOptions/SplitOptions.h"
+#include "BuildBuiltinOptions/SplitVOptions.h"
+#include "BuildBuiltinOptions/SquaredDifferenceOptions.h"
+#include "BuildBuiltinOptions/SquareOptions.h"
#include "BuildBuiltinOptions/SqueezeOptions.h"
+#include "BuildBuiltinOptions/StridedSliceOptions.h"
#include "BuildBuiltinOptions/SubOptions.h"
+#include "BuildBuiltinOptions/TileOptions.h"
+#include "BuildBuiltinOptions/TopKV2Options.h"
#include "BuildBuiltinOptions/TransposeOptions.h"
+#include "BuildBuiltinOptions/TransposeConvOptions.h"
+#include "BuildBuiltinOptions/UniqueOptions.h"
+#include "BuildBuiltinOptions/UnpackOptions.h"
+#include "BuildBuiltinOptions/WhereOptions.h"
+#include "BuildBuiltinOptions/WhileOptions.h"
+#include "BuildBuiltinOptions/ZerosLikeOptions.h"
#endif // __BUILD_BUITIN_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.cpp
new file mode 100644
index 000000000..df8b083d6
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AddNOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::AddNOptions>
+build_circle_AddNOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::AddNOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.h
new file mode 100644
index 000000000..9e18e8aaf
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/AddNOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_ADD_N_OPTIONS_H__
+#define __BBO_ADD_N_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::AddNOptions>
+build_circle_AddNOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_ADD_N_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.cpp
new file mode 100644
index 000000000..204558df8
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ArgMinOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ArgMinOptions>
+build_circle_ArgMinOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_ArgMinOptions();
+ assert(tflite_builtin_options);
+ circle::ArgMinOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_output_type(
+ get_circle_tensortype(tflite_builtin_options->output_type()));
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.h
new file mode 100644
index 000000000..76cbc39a4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ArgMinOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_ARGMIN_OPTIONS_H__
+#define __BBO_ARGMIN_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ArgMinOptions>
+build_circle_ArgMinOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_ARGMIN_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.cpp
new file mode 100644
index 000000000..ac0a094d2
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchMatMulOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::BatchMatMulOptions>
+build_circle_BatchMatMulOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_BatchMatMulOptions();
+ assert(tflite_builtin_options);
+ circle::BatchMatMulOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_adjoint_lhs(tflite_builtin_options->adj_x());
+ builtin_options_builder.add_adjoint_rhs(tflite_builtin_options->adj_y());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.h
new file mode 100644
index 000000000..b1e90aa5a
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/BatchMatMulOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_BATCH_MATMUL_OPTIONS_H__
+#define __BBO_BATCH_MATMUL_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::BatchMatMulOptions>
+build_circle_BatchMatMulOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_BATCH_MATMUL_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/CastOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/CastOptions.cpp
index f07fb3b1f..bc1445248 100644
--- a/compiler/tflite2circle/src/BuildBuiltinOptions/CastOptions.cpp
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/CastOptions.cpp
@@ -26,7 +26,9 @@ flatbuffers::Offset<circle::CastOptions>
build_circle_CastOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
{
auto tflite_builtin_options = op->builtin_options_as_CastOptions();
- assert(tflite_builtin_options);
+ if (tflite_builtin_options == nullptr)
+ return 0;
+
circle::CastOptionsBuilder builtin_options_builder{fb};
builtin_options_builder.add_in_data_type(
get_circle_tensortype(tflite_builtin_options->in_data_type()));
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.cpp
new file mode 100644
index 000000000..669aadb57
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthToSpaceOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::DepthToSpaceOptions>
+build_circle_DepthToSpaceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_DepthToSpaceOptions();
+ assert(tflite_builtin_options);
+ circle::DepthToSpaceOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_block_size(tflite_builtin_options->block_size());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.h
new file mode 100644
index 000000000..47c09a8aa
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/DepthToSpaceOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_DEPTH_TO_SPACE_OPTIONS_H__
+#define __BBO_DEPTH_TO_SPACE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::DepthToSpaceOptions>
+build_circle_DepthToSpaceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_DEPTH_TO_SPACE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.cpp
new file mode 100644
index 000000000..aa08cfdca
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FloorDivOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::FloorDivOptions>
+build_circle_FloorDivOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::FloorDivOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.h
new file mode 100644
index 000000000..0f65591f4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorDivOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_FLOOR_DIV_OPTIONS_H__
+#define __BBO_FLOOR_DIV_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::FloorDivOptions>
+build_circle_FloorDivOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_FLOOR_DIV_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.cpp
new file mode 100644
index 000000000..770351e8a
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FloorModOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::FloorModOptions>
+build_circle_FloorModOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::FloorModOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.h
new file mode 100644
index 000000000..e53d5f7e7
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/FloorModOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_FLOOR_MOD_OPTIONS_H__
+#define __BBO_FLOOR_MOD_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::FloorModOptions>
+build_circle_FloorModOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_FLOOR_MOD_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.cpp
new file mode 100644
index 000000000..487cce1f1
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GatherNdOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GatherNdOptions>
+build_circle_GatherNdOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::GatherNdOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.h
new file mode 100644
index 000000000..55e4f7b34
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherNdOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_GATHER_ND_OPTIONS_H__
+#define __BBO_GATHER_ND_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GatherNdOptions>
+build_circle_GatherNdOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_GATHER_ND_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.cpp
new file mode 100644
index 000000000..ecd5dc1e4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GatherOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GatherOptions>
+build_circle_GatherOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_GatherOptions();
+ assert(tflite_builtin_options);
+ circle::GatherOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_axis(tflite_builtin_options->axis());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.h
new file mode 100644
index 000000000..300a7f72e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GatherOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_GATHER_OPTIONS_H__
+#define __BBO_GATHER_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GatherOptions>
+build_circle_GatherOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_GATHER_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.cpp
new file mode 100644
index 000000000..14ca0cdcd
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GreaterOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GreaterOptions>
+build_circle_GreaterOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::GreaterOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.h
new file mode 100644
index 000000000..46e4d9957
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/GreaterOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_GREATER_OPTIONS_H__
+#define __BBO_GREATER_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::GreaterOptions>
+build_circle_GreaterOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_GREATER_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.cpp
new file mode 100644
index 000000000..cc5be4901
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IfOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::IfOptions> build_circle_IfOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_IfOptions();
+ assert(tflite_builtin_options);
+ circle::IfOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_then_subgraph_index(tflite_builtin_options->then_subgraph_index());
+ builtin_options_builder.add_else_subgraph_index(tflite_builtin_options->else_subgraph_index());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.h
new file mode 100644
index 000000000..6483d7bd6
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/IfOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_IF_OPTIONS_H__
+#define __BBO_IF_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::IfOptions> build_circle_IfOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_IF_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.cpp
new file mode 100644
index 000000000..d58aed83d
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2NormalizeOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::L2NormOptions>
+build_circle_L2NormOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_L2NormOptions();
+ assert(tflite_builtin_options);
+ circle::L2NormOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_fused_activation_function(
+ get_circle_activation_function_type(tflite_builtin_options->fused_activation_function()));
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.h
new file mode 100644
index 000000000..5568c4e08
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/L2NormalizeOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_L2NORMALIZE_OPTIONS_H__
+#define __BBO_L2NORMALIZE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::L2NormOptions>
+build_circle_L2NormOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_L2NORMALIZE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.cpp
new file mode 100644
index 000000000..793b1d7df
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LeakyReluOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LeakyReluOptions>
+build_circle_LeakyReluOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto *tflite_builtin_options = op->builtin_options_as_LeakyReluOptions();
+ assert(tflite_builtin_options);
+ circle::LeakyReluOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_alpha(tflite_builtin_options->alpha());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.h
new file mode 100644
index 000000000..a2168f29f
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LeakyReluOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LEAKY_RELU_OPTIONS_H__
+#define __BBO_LEAKY_RELU_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LeakyReluOptions>
+build_circle_LeakyReluOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LEAKY_RELU_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.cpp
new file mode 100644
index 000000000..09a77535e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LessEqualOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LessEqualOptions>
+build_circle_LessEqualOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::LessEqualOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.h
new file mode 100644
index 000000000..3477c026d
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LessEqualOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LESSEQUAL_OPTIONS_H__
+#define __BBO_LESSEQUAL_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LessEqualOptions>
+build_circle_LessEqualOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LESSEQUAL_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.cpp
new file mode 100644
index 000000000..0009dd6f4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LessOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LessOptions>
+build_circle_LessOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::LessOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.h
new file mode 100644
index 000000000..932809abd
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LessOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LESS_OPTIONS_H__
+#define __BBO_LESS_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LessOptions>
+build_circle_LessOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LESS_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.cpp
new file mode 100644
index 000000000..342f6a891
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LocalResponseNormalizationOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LocalResponseNormalizationOptions>
+build_circle_LocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_LocalResponseNormalizationOptions();
+ assert(tflite_builtin_options);
+ circle::LocalResponseNormalizationOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_radius(tflite_builtin_options->radius());
+ builtin_options_builder.add_bias(tflite_builtin_options->bias());
+ builtin_options_builder.add_alpha(tflite_builtin_options->alpha());
+ builtin_options_builder.add_beta(tflite_builtin_options->beta());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.h
new file mode 100644
index 000000000..0b43fee94
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LocalResponseNormalizationOptions.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LOCAL_RESPONSE_NORMALIZATION_OPTIONS_H__
+#define __BBO_LOCAL_RESPONSE_NORMALIZATION_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LocalResponseNormalizationOptions>
+build_circle_LocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LOCAL_RESPONSE_NORMALIZATION_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.cpp
new file mode 100644
index 000000000..271028838
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogSoftmaxOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LogSoftmaxOptions>
+build_circle_LogSoftmaxOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::LogSoftmaxOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.h
new file mode 100644
index 000000000..920d1bd60
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LogSoftmaxOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LOG_SOFTMAX_OPTIONS_H__
+#define __BBO_LOG_SOFTMAX_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LogSoftmaxOptions>
+build_circle_LogSoftmaxOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LOG_SOFTMAX_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.cpp
new file mode 100644
index 000000000..52bfe0be9
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogicalAndOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LogicalAndOptions>
+build_circle_LogicalAndOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::LogicalAndOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.h
new file mode 100644
index 000000000..90243d8b8
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/LogicalAndOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_LOGICALAND_OPTIONS_H__
+#define __BBO_LOGICALAND_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::LogicalAndOptions>
+build_circle_LogicalAndOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_LOGICALAND_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.cpp
new file mode 100644
index 000000000..87a550451
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixDiagOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MatrixDiagOptions>
+build_circle_MatrixDiagOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::MatrixDiagOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.h
new file mode 100644
index 000000000..0fddaee40
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixDiagOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_MATRIX_DIAG_OPTIONS_H__
+#define __BBO_MATRIX_DIAG_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MatrixDiagOptions>
+build_circle_MatrixDiagOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_MATRIX_DIAG_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.cpp
new file mode 100644
index 000000000..c2e6890e6
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatrixSetDiagOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MatrixSetDiagOptions>
+build_circle_MatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::MatrixSetDiagOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.h
new file mode 100644
index 000000000..2da7426e4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MatrixSetDiagOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_MATRIX_SET_DIAG_OPTIONS_H__
+#define __BBO_MATRIX_SET_DIAG_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MatrixSetDiagOptions>
+build_circle_MatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_MATRIX_SET_DIAG_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.cpp
new file mode 100644
index 000000000..d2d2888f2
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaximumMinimumOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MaximumMinimumOptions>
+build_circle_MaximumMinimumOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_MaximumMinimumOptions();
+ assert(tflite_builtin_options);
+ circle::MaximumMinimumOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.h
new file mode 100644
index 000000000..fdaa7b8af
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MaximumMinimumOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_MAXIMUMMINIMUM_OPTIONS_H__
+#define __BBO_MAXIMUMMINIMUM_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MaximumMinimumOptions>
+build_circle_MaximumMinimumOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_MAXIMUMMINIMUM_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.cpp
new file mode 100644
index 000000000..5a0f7aa3e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PadOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MirrorPadOptions>
+build_circle_MirrorPadOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_MirrorPadOptions();
+ assert(tflite_builtin_options);
+ circle::MirrorPadOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_mode(get_circle_mirrorpad_mode(tflite_builtin_options->mode()));
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.h
new file mode 100644
index 000000000..6459dc3eb
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/MirrorPadOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_MIRROR_PAD_OPTIONS_H__
+#define __BBO_MIRROR_PAD_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::MirrorPadOptions>
+build_circle_MirrorPadOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_MIRROR_PAD_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.cpp
new file mode 100644
index 000000000..9bc62bba0
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NegOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::NegOptions> build_circle_NegOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *)
+{
+ circle::NegOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.h
new file mode 100644
index 000000000..8cdba0dea
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/NegOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_NEG_OPTIONS_H__
+#define __BBO_NEG_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::NegOptions> build_circle_NegOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_NEG_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.cpp
new file mode 100644
index 000000000..d4ca0b898
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OneHotOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::OneHotOptions>
+build_circle_OneHotOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_OneHotOptions();
+ assert(tflite_builtin_options);
+ circle::OneHotOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_axis(tflite_builtin_options->axis());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.h
new file mode 100644
index 000000000..fa24be981
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/OneHotOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_ONEHOT_OPTIONS_H__
+#define __BBO_ONEHOT_OPTIONS_H__
+
+#include <mio/circle/schema_generated.h>
+#include <mio/tflite/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::OneHotOptions>
+build_circle_OneHotOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_ONEHOT_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.cpp
new file mode 100644
index 000000000..1be39d709
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PowOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::PowOptions> build_circle_PowOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op)
+{
+ circle::PowOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.h
new file mode 100644
index 000000000..9bacd46ec
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/PowOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_POW_OPTIONS_H__
+#define __BBO_POW_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::PowOptions> build_circle_PowOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_POW_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.cpp
new file mode 100644
index 000000000..493761322
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RangeOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::RangeOptions>
+build_circle_RangeOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::RangeOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.h
new file mode 100644
index 000000000..3ee40043f
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/RangeOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_RANGE_OPTIONS_H__
+#define __BBO_RANGE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::RangeOptions>
+build_circle_RangeOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_RANGE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.cpp
new file mode 100644
index 000000000..a5cbed572
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RankOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::RankOptions>
+build_circle_RankOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::RankOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.h
new file mode 100644
index 000000000..4f70bb374
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/RankOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_RANK_OPTIONS_H__
+#define __BBO_RANK_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::RankOptions>
+build_circle_RankOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_RANK_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ReshapeOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ReshapeOptions.cpp
index dc8e73633..265f20d58 100644
--- a/compiler/tflite2circle/src/BuildBuiltinOptions/ReshapeOptions.cpp
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ReshapeOptions.cpp
@@ -25,7 +25,9 @@ flatbuffers::Offset<circle::ReshapeOptions>
build_circle_ReshapeOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
{
auto tflite_builtin_options = op->builtin_options_as_ReshapeOptions();
- assert(tflite_builtin_options);
+ if (tflite_builtin_options == nullptr)
+ return 0;
+
std::vector<int32_t> new_shape_vec{tflite_builtin_options->new_shape()->begin(),
tflite_builtin_options->new_shape()->end()};
auto new_shape = fb.CreateVector(new_shape_vec);
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.cpp
new file mode 100644
index 000000000..e02ca836c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeBilinearOptions.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ResizeBilinearOptions>
+build_circle_ResizeBilinearOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_ResizeBilinearOptions();
+ assert(tflite_builtin_options);
+
+ circle::ResizeBilinearOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_align_corners(tflite_builtin_options->align_corners());
+ builtin_options_builder.add_half_pixel_centers(tflite_builtin_options->half_pixel_centers());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.h
new file mode 100644
index 000000000..d645eff7e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeBilinearOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_RESIZE_BILINEAR_OPTIONS_H__
+#define __BBO_RESIZE_BILINEAR_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ResizeBilinearOptions>
+build_circle_ResizeBilinearOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_RESIZE_BILINEAR_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.cpp
new file mode 100644
index 000000000..572bd3d7c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeNearestNeighborOptions.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ResizeNearestNeighborOptions>
+build_circle_ResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_ResizeNearestNeighborOptions();
+ assert(tflite_builtin_options);
+
+ circle::ResizeNearestNeighborOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_align_corners(tflite_builtin_options->align_corners());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.h
new file mode 100644
index 000000000..f87a43325
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ResizeNearestNeighborOptions.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_RESIZE_NEAREST_NEIGHBOR_OPTIONS_H__
+#define __BBO_RESIZE_NEAREST_NEIGHBOR_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ResizeNearestNeighborOptions>
+build_circle_ResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_RESIZE_NEAREST_NEIGHBOR_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.cpp
new file mode 100644
index 000000000..57f7d6893
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseSequenceOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ReverseSequenceOptions>
+build_circle_ReverseSequenceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_ReverseSequenceOptions();
+ assert(tflite_builtin_options);
+ circle::ReverseSequenceOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_seq_dim(tflite_builtin_options->seq_dim());
+ builtin_options_builder.add_batch_dim(tflite_builtin_options->batch_dim());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.h
new file mode 100644
index 000000000..b0567903e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseSequenceOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_REVERSE_SEQUENCE_OPTIONS_H__
+#define __BBO_REVERSE_SEQUENCE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ReverseSequenceOptions>
+build_circle_ReverseSequenceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_REVERSE_SEQUENCE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.cpp
new file mode 100644
index 000000000..b6771d3dc
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReverseV2Options.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ReverseV2Options>
+build_circle_ReverseV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::ReverseV2OptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.h
new file mode 100644
index 000000000..a92dd9e75
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ReverseV2Options.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_REVERSE_V2_OPTIONS_H__
+#define __BBO_REVERSE_V2_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ReverseV2Options>
+build_circle_ReverseV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_REVERSE_V2_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.cpp
new file mode 100644
index 000000000..e79480529
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScatterNdOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ScatterNdOptions>
+build_circle_ScatterNdOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::ScatterNdOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.h
new file mode 100644
index 000000000..de17ebcad
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ScatterNdOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SCATTER_ND_OPTIONS_H__
+#define __BBO_SCATTER_ND_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ScatterNdOptions>
+build_circle_ScatterNdOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SCATTER_ND_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.cpp
new file mode 100644
index 000000000..efd80c15d
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SegmentSumOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SegmentSumOptions>
+build_circle_SegmentSumOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::SegmentSumOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.h
new file mode 100644
index 000000000..e6163849a
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SegmentSumOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SEGMENT_SUM_OPTIONS_H__
+#define __BBO_SEGMENT_SUM_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SegmentSumOptions>
+build_circle_SegmentSumOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SEGMENT_SUM_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.cpp
new file mode 100644
index 000000000..c584fbfe6
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SelectOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SelectOptions>
+build_circle_SelectOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::SelectOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.h
new file mode 100644
index 000000000..4d4e6fe3c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SELECT_OPTIONS_H__
+#define __BBO_SELECT_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SelectOptions>
+build_circle_SelectOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SELECT_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.cpp
new file mode 100644
index 000000000..9032d5e82
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SelectV2Options.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SelectV2Options>
+build_circle_SelectV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::SelectV2OptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.h
new file mode 100644
index 000000000..14e91d2a4
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SelectV2Options.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SELECT_V2_OPTIONS_H__
+#define __BBO_SELECT_V2_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SelectV2Options>
+build_circle_SelectV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SELECT_V2_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.cpp
new file mode 100644
index 000000000..d1dba1b43
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SliceOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SliceOptions>
+build_circle_SliceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::SliceOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.h
new file mode 100644
index 000000000..0d7ca606c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SliceOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SLICE_OPTIONS_H__
+#define __BBO_SLICE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SliceOptions>
+build_circle_SliceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SLICE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.cpp
new file mode 100644
index 000000000..0cb4dba9e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToBatchNDOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SpaceToBatchNDOptions>
+build_circle_SpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::SpaceToBatchNDOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.h
new file mode 100644
index 000000000..dd3d98305
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToBatchNDOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SPACE_TO_BATCH_ND_OPTIONS_H__
+#define __BBO_SPACE_TO_BATCH_ND_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SpaceToBatchNDOptions>
+build_circle_SpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SPACE_TO_BATCH_ND_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.cpp
new file mode 100644
index 000000000..d9d6a8fc0
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SpaceToDepthOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SpaceToDepthOptions>
+build_circle_SpaceToDepthOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_SpaceToDepthOptions();
+ assert(tflite_builtin_options);
+ circle::SpaceToDepthOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_block_size(tflite_builtin_options->block_size());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.h
new file mode 100644
index 000000000..e53875b13
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SpaceToDepthOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SPACE_TO_DEPTH_OPTIONS_H__
+#define __BBO_SPACE_TO_DEPTH_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SpaceToDepthOptions>
+build_circle_SpaceToDepthOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SPACE_TO_DEPTH_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.cpp
new file mode 100644
index 000000000..48abaac95
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SparseToDenseOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SparseToDenseOptions>
+build_circle_SparseToDenseOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_SparseToDenseOptions();
+ assert(tflite_builtin_options);
+ circle::SparseToDenseOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_validate_indices(tflite_builtin_options->validate_indices());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.h
new file mode 100644
index 000000000..c2058f072
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SparseToDenseOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SPARSETODENSE_OPTIONS_H__
+#define __BBO_SPARSETODENSE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SparseToDenseOptions>
+build_circle_SparseToDenseOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SPARSETODENSE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.cpp
new file mode 100644
index 000000000..da309f383
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SplitVOptions.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SplitVOptions>
+build_circle_SplitVOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_SplitVOptions();
+ assert(tflite_builtin_options);
+ circle::SplitVOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_num_splits(tflite_builtin_options->num_splits());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.h
new file mode 100644
index 000000000..0550c00f8
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SplitVOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SPLIT_V_OPTIONS_H__
+#define __BBO_SPLIT_V_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SplitVOptions>
+build_circle_SplitVOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SPLIT_V_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.cpp
new file mode 100644
index 000000000..4656f9167
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SquareOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SquareOptions>
+build_circle_SquareOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::SquareOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.h
new file mode 100644
index 000000000..647686e92
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SquareOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SQUARE_OPTIONS_H__
+#define __BBO_SQUARE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SquareOptions>
+build_circle_SquareOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SQUARE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.cpp
new file mode 100644
index 000000000..0c2dbde83
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SquaredDifferenceOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SquaredDifferenceOptions>
+build_circle_SquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op)
+{
+ circle::SquaredDifferenceOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.h
new file mode 100644
index 000000000..76b45c0b9
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/SquaredDifferenceOptions.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_SQUAREDDIFFERENCE_OPTIONS_H__
+#define __BBO_SQUAREDDIFFERENCE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::SquaredDifferenceOptions>
+build_circle_SquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &fb,
+ const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_SQUAREDDIFFERENCE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.cpp
new file mode 100644
index 000000000..68cf89795
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StridedSliceOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::StridedSliceOptions>
+build_circle_StridedSliceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_StridedSliceOptions();
+ assert(tflite_builtin_options);
+ circle::StridedSliceOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_begin_mask(tflite_builtin_options->begin_mask());
+ builtin_options_builder.add_end_mask(tflite_builtin_options->end_mask());
+ builtin_options_builder.add_ellipsis_mask(tflite_builtin_options->ellipsis_mask());
+ builtin_options_builder.add_new_axis_mask(tflite_builtin_options->new_axis_mask());
+ builtin_options_builder.add_shrink_axis_mask(tflite_builtin_options->shrink_axis_mask());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.h
new file mode 100644
index 000000000..e6782a053
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/StridedSliceOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_STRIDEDSLICE_OPTIONS_H__
+#define __BBO_STRIDEDSLICE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::StridedSliceOptions>
+build_circle_StridedSliceOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_STRIDEDSLICE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.cpp
new file mode 100644
index 000000000..47316bf67
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TileOptions.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TileOptions>
+build_circle_TileOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::TileOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.h
new file mode 100644
index 000000000..e3e9a7187
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TileOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_TILE_OPTIONS_H__
+#define __BBO_TILE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TileOptions>
+build_circle_TileOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_TILE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.cpp
new file mode 100644
index 000000000..a2bc76f9f
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TopKV2Options.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TopKV2Options>
+build_circle_TopKV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ circle::TopKV2OptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.h b/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.h
new file mode 100644
index 000000000..89b5b995c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TopKV2Options.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_TOPK_V2_OPTIONS_H__
+#define __BBO_TOPK_V2_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TopKV2Options>
+build_circle_TopKV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_TOPK_V2_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.cpp
new file mode 100644
index 000000000..301f2c421
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TransposeConvOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TransposeConvOptions>
+build_circle_TransposeConvOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_TransposeConvOptions();
+ assert(tflite_builtin_options);
+ circle::TransposeConvOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_padding(get_circle_padding(tflite_builtin_options->padding()));
+ builtin_options_builder.add_stride_w(tflite_builtin_options->stride_w());
+ builtin_options_builder.add_stride_h(tflite_builtin_options->stride_h());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.h
new file mode 100644
index 000000000..dd0bec296
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/TransposeConvOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_TRANSPOSE_CONV_OPTIONS_H__
+#define __BBO_TRANSPOSE_CONV_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::TransposeConvOptions>
+build_circle_TransposeConvOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_TRANSPOSE_CONV_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.cpp
new file mode 100644
index 000000000..96ddc15ad
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "UniqueOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::UniqueOptions>
+build_circle_UniqueOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_UniqueOptions();
+ assert(tflite_builtin_options);
+ circle::UniqueOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_idx_out_type(
+ get_circle_tensortype(tflite_builtin_options->idx_out_type()));
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.h
new file mode 100644
index 000000000..35736e91e
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/UniqueOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_UNIQUE_OPTIONS_H__
+#define __BBO_UNIQUE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::UniqueOptions>
+build_circle_UniqueOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_UNIQUE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.cpp
new file mode 100644
index 000000000..c9a332a4b
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "UnpackOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::UnpackOptions>
+build_circle_UnpackOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_UnpackOptions();
+ assert(tflite_builtin_options);
+ circle::UnpackOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_num(tflite_builtin_options->num());
+ builtin_options_builder.add_axis(tflite_builtin_options->axis());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.h
new file mode 100644
index 000000000..6dfed87f0
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/UnpackOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_UNPACK_OPTIONS_H__
+#define __BBO_UNPACK_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::UnpackOptions>
+build_circle_UnpackOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_UNPACK_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.cpp
new file mode 100644
index 000000000..d591a5419
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WhereOptions.h"
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::WhereOptions>
+build_circle_WhereOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::WhereOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.h
new file mode 100644
index 000000000..a113f6923
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/WhereOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_WHERE_OPTIONS_H__
+#define __BBO_WHERE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::WhereOptions>
+build_circle_WhereOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_WHERE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.cpp
new file mode 100644
index 000000000..1ad1af75d
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WhileOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::WhileOptions>
+build_circle_WhileOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op)
+{
+ auto tflite_builtin_options = op->builtin_options_as_WhileOptions();
+ assert(tflite_builtin_options);
+ circle::WhileOptionsBuilder builtin_options_builder{fb};
+ builtin_options_builder.add_cond_subgraph_index(tflite_builtin_options->cond_subgraph_index());
+ builtin_options_builder.add_body_subgraph_index(tflite_builtin_options->body_subgraph_index());
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.h
new file mode 100644
index 000000000..000e1a241
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/WhileOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_WHILE_OPTIONS_H__
+#define __BBO_WHILE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::WhileOptions>
+build_circle_WhileOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_WHILE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.cpp
new file mode 100644
index 000000000..e64e4213c
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZerosLikeOptions.h"
+#include "DataLookup.h"
+
+#include <cassert>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ZerosLikeOptions>
+build_circle_ZerosLikeOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *)
+{
+ circle::ZerosLikeOptionsBuilder builtin_options_builder{fb};
+ return builtin_options_builder.Finish();
+}
+
+} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.h
new file mode 100644
index 000000000..5113eccb8
--- /dev/null
+++ b/compiler/tflite2circle/src/BuildBuiltinOptions/ZerosLikeOptions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BBO_ZEROS_LIKE_OPTIONS_H__
+#define __BBO_ZEROS_LIKE_OPTIONS_H__
+
+#include <mio/tflite/schema_generated.h>
+#include <mio/circle/schema_generated.h>
+
+namespace tflite2circle
+{
+
+flatbuffers::Offset<circle::ZerosLikeOptions>
+build_circle_ZerosLikeOptions(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op);
+
+} // namespace tflite2circle
+
+#endif // __BBO_ZEROS_LIKE_OPTIONS_H__
diff --git a/compiler/tflite2circle/src/CircleModel.cpp b/compiler/tflite2circle/src/CircleModel.cpp
index 3a569323c..cb4437a49 100644
--- a/compiler/tflite2circle/src/CircleModel.cpp
+++ b/compiler/tflite2circle/src/CircleModel.cpp
@@ -15,6 +15,7 @@
*/
#include <iostream>
+#include <memory>
#include "CircleModel.h"
#include "DataLookup.h"
@@ -67,8 +68,12 @@ Offset<SubGraphLink>::Offset(FlatBufBuilder &fb, const TFLFlatBufVec *tflite_fla
for (auto it : *tflite_tensors)
{
// shape
- std::vector<int32_t> shape_vec{it->shape()->begin(), it->shape()->end()};
- auto shape = fb->CreateVector(shape_vec);
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape;
+ if (it->shape())
+ {
+ auto shape_vec = std::vector<int32_t>({it->shape()->begin(), it->shape()->end()});
+ shape = fb->CreateVector(shape_vec);
+ }
// name
flatbuffers::Offset<flatbuffers::String> name;
if (it->name())
@@ -85,6 +90,7 @@ Offset<SubGraphLink>::Offset(FlatBufBuilder &fb, const TFLFlatBufVec *tflite_fla
flatbuffers::Offset<flatbuffers::Vector<float>> max;
flatbuffers::Offset<flatbuffers::Vector<float>> scale;
flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point;
+ int32_t quantized_dimension = it->quantization()->quantized_dimension();
if (it->quantization()->min() && it->quantization()->max())
{
@@ -106,7 +112,9 @@ Offset<SubGraphLink>::Offset(FlatBufBuilder &fb, const TFLFlatBufVec *tflite_fla
zero_point = fb->CreateVector(tfzerop);
}
- quantization = circle::CreateQuantizationParameters(*fb, min, max, scale, zero_point);
+ quantization = circle::CreateQuantizationParameters(*fb, min, max, scale, zero_point,
+ circle::QuantizationDetails_NONE, 0,
+ quantized_dimension);
}
// is_variable
bool is_variable = it->is_variable();
@@ -150,6 +158,18 @@ Offset<SubGraphLink>::Offset(FlatBufBuilder &fb, const TFLFlatBufVec *tflite_fla
// builtin options
auto circle_builtin_options = get_circle_builtin_options(*fb, it);
auto circle_builtin_options_type = get_circle_builtin_options_type(it);
+ // custom options
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> circle_custom_options;
+ if (it->custom_options())
+ {
+ std::vector<uint8_t> custom_options_vec{it->custom_options()->begin(),
+ it->custom_options()->end()};
+ circle_custom_options = fb->CreateVector(custom_options_vec);
+ }
+ // custom options format
+ // TODO Make get_circle_custom_options_format
+ assert(it->custom_options_format() == tflite::CustomOptionsFormat_FLEXBUFFERS);
+ auto circle_custom_options_format = circle::CustomOptionsFormat_FLEXBUFFERS;
circle::OperatorBuilder operator_builder{*fb};
operator_builder.add_opcode_index(it->opcode_index());
@@ -157,7 +177,9 @@ Offset<SubGraphLink>::Offset(FlatBufBuilder &fb, const TFLFlatBufVec *tflite_fla
operator_builder.add_outputs(circle_outputs);
operator_builder.add_builtin_options(circle_builtin_options);
operator_builder.add_builtin_options_type(circle_builtin_options_type);
- // TODO custom_options, mutating_variable_inputs
+ operator_builder.add_custom_options(circle_custom_options);
+ operator_builder.add_custom_options_format(circle_custom_options_format);
+ // TODO mutating_variable_inputs
auto opeartor = operator_builder.Finish();
operator_vec.emplace_back(opeartor);
}
@@ -205,11 +227,11 @@ CircleModel::CircleModel(FlatBufBuilder &fb, TFLModel &model)
{
const tflite::Model *tfl_model = model.load_model();
_operator_codes_offset =
- stdex::make_unique<Offset<OperatorCodeLink>>(fb, tfl_model->operator_codes());
- _subGraphs_offset = stdex::make_unique<Offset<SubGraphLink>>(fb, tfl_model->subgraphs());
- _buffers_offset = stdex::make_unique<Offset<BufferLink>>(fb, tfl_model->buffers());
+ std::make_unique<Offset<OperatorCodeLink>>(fb, tfl_model->operator_codes());
+ _subGraphs_offset = std::make_unique<Offset<SubGraphLink>>(fb, tfl_model->subgraphs());
+ _buffers_offset = std::make_unique<Offset<BufferLink>>(fb, tfl_model->buffers());
_metadata_buffer_offset =
- stdex::make_unique<Offset<MetaDataBufferLink>>(fb, tfl_model->metadata_buffer());
+ std::make_unique<Offset<MetaDataBufferLink>>(fb, tfl_model->metadata_buffer());
model_build();
}
diff --git a/compiler/tflite2circle/src/DataLookup.cpp b/compiler/tflite2circle/src/DataLookup.cpp
index a1b780650..b0d35d1a5 100644
--- a/compiler/tflite2circle/src/DataLookup.cpp
+++ b/compiler/tflite2circle/src/DataLookup.cpp
@@ -110,4 +110,17 @@ circle::BuiltinOptions get_circle_builtin_options_type(const tflite::Operator *o
}
}
+circle::MirrorPadMode get_circle_mirrorpad_mode(tflite::MirrorPadMode tfl_mode)
+{
+ switch (tfl_mode)
+ {
+ case tflite::MirrorPadMode_REFLECT:
+ return circle::MirrorPadMode_REFLECT;
+ case tflite::MirrorPadMode_SYMMETRIC:
+ return circle::MirrorPadMode_SYMMETRIC;
+ default:
+ throw std::runtime_error("tflite2circle: wrong mirrorpad mode.");
+ }
+}
+
} // namespace tflite2circle
diff --git a/compiler/tflite2circle/src/DataLookup.h b/compiler/tflite2circle/src/DataLookup.h
index 3f141ec08..7ea01b9c8 100644
--- a/compiler/tflite2circle/src/DataLookup.h
+++ b/compiler/tflite2circle/src/DataLookup.h
@@ -23,15 +23,59 @@
namespace tflite2circle
{
+/**
+ * @brief Returns circle builtin_code according to tflite.
+ *
+ * @note You can see a list of currently supported BuiltinOperator in TFLOperator.lst file.
+*/
circle::BuiltinOperator get_circle_builtin_code(tflite::BuiltinOperator tfl_bop);
+
+/**
+ * @brief Returns circle TensorType according to tflite.
+ *
+ * @note You can see a list of currently supported TensorType in TFLTensorType.lst file.
+*/
circle::TensorType get_circle_tensortype(tflite::TensorType tfl_tt);
+
+/**
+ * @brief Returns circle Padding enum according to tflite.
+*/
circle::Padding get_circle_padding(tflite::Padding tfl_p);
+
+/**
+ * @brief Returns circle ActivationFunctionType according to tflite.
+ *
+ * @note You can see a list of currently supported ActivationFunctionType in
+ * TFLActivationFunctionType.lst file.
+*/
circle::ActivationFunctionType
get_circle_activation_function_type(tflite::ActivationFunctionType tfl_aft);
+
+/**
+ * @brief Returns circle builtin_options according to tflite.
+ *
+ * @note You can see a list of currently supported BuiltinOptions in
+ * TFLBuiltinOptions.lst file.
+ *
+ * This function calls the build_circle_##BuiltinOptions internally(e.g.
+ * build_circle_AbsOptions, build_circle_AddOptions, etc.), so refer to it for a more
+ * detailed implementation.
+*/
flatbuffers::Offset<void> get_circle_builtin_options(flatbuffers::FlatBufferBuilder &fb,
const tflite::Operator *op);
+
+/**
+ * @brief Returns circle builtin_options_type according to tflite.
+ *
+ * @note You can see a list of currently supported BuiltinOptions in TFLBuiltinOptions.lst file.
+*/
circle::BuiltinOptions get_circle_builtin_options_type(const tflite::Operator *op);
+/**
+ * @brief Returns circle MirrorPadMode according to tflite.
+*/
+circle::MirrorPadMode get_circle_mirrorpad_mode(tflite::MirrorPadMode tfl_mode);
+
} // namespace tflite2circle
#endif // __DATA_LOOKUP_H__
diff --git a/compiler/tflite2circle/src/TFLBuiltinOptions.lst b/compiler/tflite2circle/src/TFLBuiltinOptions.lst
index 65c60b8ec..3ef9f1575 100644
--- a/compiler/tflite2circle/src/TFLBuiltinOptions.lst
+++ b/compiler/tflite2circle/src/TFLBuiltinOptions.lst
@@ -15,89 +15,93 @@ TFL_BUILTIN_OPTIONS(FullyConnectedOptions)
TFL_BUILTIN_OPTIONS(SoftmaxOptions)
TFL_BUILTIN_OPTIONS(ConcatenationOptions)
TFL_BUILTIN_OPTIONS(AddOptions)
-//TFL_BUILTIN_OPTIONS(L2NormOptions)
-//TFL_BUILTIN_OPTIONS(LocalResponseNormalizationOptions)
+TFL_BUILTIN_OPTIONS(L2NormOptions)
+TFL_BUILTIN_OPTIONS(LocalResponseNormalizationOptions)
//TFL_BUILTIN_OPTIONS(LSTMOptions)
-//TFL_BUILTIN_OPTIONS(ResizeBilinearOptions)
+TFL_BUILTIN_OPTIONS(ResizeBilinearOptions)
//TFL_BUILTIN_OPTIONS(CallOptions)
TFL_BUILTIN_OPTIONS(ReshapeOptions)
//TFL_BUILTIN_OPTIONS(SkipGramOptions)
-//TFL_BUILTIN_OPTIONS(SpaceToDepthOptions)
+TFL_BUILTIN_OPTIONS(SpaceToDepthOptions)
//TFL_BUILTIN_OPTIONS(EmbeddingLookupSparseOptions)
TFL_BUILTIN_OPTIONS(MulOptions)
TFL_BUILTIN_OPTIONS(PadOptions)
-//TFL_BUILTIN_OPTIONS(GatherOptions)
+TFL_BUILTIN_OPTIONS(GatherOptions)
TFL_BUILTIN_OPTIONS(BatchToSpaceNDOptions)
-//TFL_BUILTIN_OPTIONS(SpaceToBatchNDOptions)
+TFL_BUILTIN_OPTIONS(SpaceToBatchNDOptions)
TFL_BUILTIN_OPTIONS(TransposeOptions)
TFL_BUILTIN_OPTIONS(ReducerOptions)
TFL_BUILTIN_OPTIONS(SubOptions)
TFL_BUILTIN_OPTIONS(DivOptions)
TFL_BUILTIN_OPTIONS(SqueezeOptions)
//TFL_BUILTIN_OPTIONS(SequenceRNNOptions)
-//TFL_BUILTIN_OPTIONS(StridedSliceOptions)
+TFL_BUILTIN_OPTIONS(StridedSliceOptions)
TFL_BUILTIN_OPTIONS(ExpOptions)
-//TFL_BUILTIN_OPTIONS(TopKV2Options)
+TFL_BUILTIN_OPTIONS(TopKV2Options)
TFL_BUILTIN_OPTIONS(SplitOptions)
-//TFL_BUILTIN_OPTIONS(LogSoftmaxOptions)
+TFL_BUILTIN_OPTIONS(LogSoftmaxOptions)
TFL_BUILTIN_OPTIONS(CastOptions)
//TFL_BUILTIN_OPTIONS(DequantizeOptions)
-//TFL_BUILTIN_OPTIONS(MaximumMinimumOptions)
+TFL_BUILTIN_OPTIONS(MaximumMinimumOptions)
TFL_BUILTIN_OPTIONS(ArgMaxOptions)
-//TFL_BUILTIN_OPTIONS(LessOptions)
-//TFL_BUILTIN_OPTIONS(NegOptions)
+TFL_BUILTIN_OPTIONS(LessOptions)
+TFL_BUILTIN_OPTIONS(NegOptions)
//TFL_BUILTIN_OPTIONS(PadV2Options)
-//TFL_BUILTIN_OPTIONS(GreaterOptions)
+TFL_BUILTIN_OPTIONS(GreaterOptions)
TFL_BUILTIN_OPTIONS(GreaterEqualOptions)
-//TFL_BUILTIN_OPTIONS(LessEqualOptions)
-//TFL_BUILTIN_OPTIONS(SelectOptions)
-//TFL_BUILTIN_OPTIONS(SliceOptions)
-//TFL_BUILTIN_OPTIONS(TransposeConvOptions)
-//TFL_BUILTIN_OPTIONS(SparseToDenseOptions)
-//TFL_BUILTIN_OPTIONS(TileOptions)
+TFL_BUILTIN_OPTIONS(LessEqualOptions)
+TFL_BUILTIN_OPTIONS(SelectOptions)
+TFL_BUILTIN_OPTIONS(SelectV2Options)
+TFL_BUILTIN_OPTIONS(SliceOptions)
+TFL_BUILTIN_OPTIONS(TransposeConvOptions)
+TFL_BUILTIN_OPTIONS(SparseToDenseOptions)
+TFL_BUILTIN_OPTIONS(TileOptions)
TFL_BUILTIN_OPTIONS(ExpandDimsOptions)
TFL_BUILTIN_OPTIONS(EqualOptions)
TFL_BUILTIN_OPTIONS(NotEqualOptions)
TFL_BUILTIN_OPTIONS(ShapeOptions)
-//TFL_BUILTIN_OPTIONS(PowOptions)
-//TFL_BUILTIN_OPTIONS(ArgMinOptions)
+TFL_BUILTIN_OPTIONS(PowOptions)
+TFL_BUILTIN_OPTIONS(ArgMinOptions)
//TFL_BUILTIN_OPTIONS(FakeQuantOptions)
TFL_BUILTIN_OPTIONS(PackOptions)
TFL_BUILTIN_OPTIONS(LogicalOrOptions)
-//TFL_BUILTIN_OPTIONS(OneHotOptions)
-//TFL_BUILTIN_OPTIONS(LogicalAndOptions)
+TFL_BUILTIN_OPTIONS(OneHotOptions)
+TFL_BUILTIN_OPTIONS(LogicalAndOptions)
TFL_BUILTIN_OPTIONS(LogicalNotOptions)
-//TFL_BUILTIN_OPTIONS(UnpackOptions)
-//TFL_BUILTIN_OPTIONS(FloorDivOptions)
-//TFL_BUILTIN_OPTIONS(SquareOptions)
-//TFL_BUILTIN_OPTIONS(ZerosLikeOptions)
+TFL_BUILTIN_OPTIONS(UnpackOptions)
+TFL_BUILTIN_OPTIONS(FloorDivOptions)
+TFL_BUILTIN_OPTIONS(SquareOptions)
+TFL_BUILTIN_OPTIONS(ZerosLikeOptions)
TFL_BUILTIN_OPTIONS(FillOptions)
//TFL_BUILTIN_OPTIONS(BidirectionalSequenceLSTMOptions)
//TFL_BUILTIN_OPTIONS(BidirectionalSequenceRNNOptions)
//TFL_BUILTIN_OPTIONS(UnidirectionalSequenceLSTMOptions)
-//TFL_BUILTIN_OPTIONS(FloorModOptions)
-//TFL_BUILTIN_OPTIONS(RangeOptions)
-//TFL_BUILTIN_OPTIONS(ResizeNearestNeighborOptions)
-//TFL_BUILTIN_OPTIONS(LeakyReluOptions)
-//TFL_BUILTIN_OPTIONS(SquaredDifferenceOptions)
-//TFL_BUILTIN_OPTIONS(MirrorPadOptions)
+TFL_BUILTIN_OPTIONS(FloorModOptions)
+TFL_BUILTIN_OPTIONS(RangeOptions)
+TFL_BUILTIN_OPTIONS(ResizeNearestNeighborOptions)
+TFL_BUILTIN_OPTIONS(LeakyReluOptions)
+TFL_BUILTIN_OPTIONS(SquaredDifferenceOptions)
+TFL_BUILTIN_OPTIONS(MirrorPadOptions)
TFL_BUILTIN_OPTIONS(AbsOptions)
-//TFL_BUILTIN_OPTIONS(SplitVOptions)
-//TFL_BUILTIN_OPTIONS(UniqueOptions)
-//TFL_BUILTIN_OPTIONS(ReverseV2Options)
-//TFL_BUILTIN_OPTIONS(AddNOptions)
-//TFL_BUILTIN_OPTIONS(GatherNdOptions)
+TFL_BUILTIN_OPTIONS(SplitVOptions)
+TFL_BUILTIN_OPTIONS(UniqueOptions)
+TFL_BUILTIN_OPTIONS(ReverseV2Options)
+TFL_BUILTIN_OPTIONS(AddNOptions)
+TFL_BUILTIN_OPTIONS(GatherNdOptions)
TFL_BUILTIN_OPTIONS(CosOptions)
-//TFL_BUILTIN_OPTIONS(WhereOptions)
+TFL_BUILTIN_OPTIONS(WhereOptions)
//TFL_BUILTIN_OPTIONS(RankOptions)
-//TFL_BUILTIN_OPTIONS(ReverseSequenceOptions)
-//TFL_BUILTIN_OPTIONS(MatrixDiagOptions)
+TFL_BUILTIN_OPTIONS(ReverseSequenceOptions)
+TFL_BUILTIN_OPTIONS(MatrixDiagOptions)
//TFL_BUILTIN_OPTIONS(QuantizeOptions)
-//TFL_BUILTIN_OPTIONS(MatrixSetDiagOptions)
+TFL_BUILTIN_OPTIONS(MatrixSetDiagOptions)
//TFL_BUILTIN_OPTIONS(HardSwishOptions)
-//TFL_BUILTIN_OPTIONS(IfOptions)
-//TFL_BUILTIN_OPTIONS(WhileOptions)
-//TFL_BUILTIN_OPTIONS(DepthToSpaceOptions)
+TFL_BUILTIN_OPTIONS(IfOptions)
+TFL_BUILTIN_OPTIONS(WhileOptions)
+TFL_BUILTIN_OPTIONS(DepthToSpaceOptions)
//TFL_BUILTIN_OPTIONS(NonMaxSuppressionV4Options)
//TFL_BUILTIN_OPTIONS(NonMaxSuppressionV5Options)
-//TFL_BUILTIN_OPTIONS(ScatterNdOptions)
+TFL_BUILTIN_OPTIONS(RankOptions)
+TFL_BUILTIN_OPTIONS(ScatterNdOptions)
+TFL_BUILTIN_OPTIONS(SegmentSumOptions)
+TFL_BUILTIN_OPTIONS(BatchMatMulOptions)
diff --git a/compiler/tflite2circle/src/TFLOperator.lst b/compiler/tflite2circle/src/TFLOperator.lst
index ac2f9daec..942c846c7 100644
--- a/compiler/tflite2circle/src/TFLOperator.lst
+++ b/compiler/tflite2circle/src/TFLOperator.lst
@@ -8,6 +8,7 @@ TFL_OPERATOR(ADD)
TFL_OPERATOR(AVERAGE_POOL_2D)
TFL_OPERATOR(CONCATENATION)
TFL_OPERATOR(CONV_2D)
+TFL_OPERATOR(DEPTH_TO_SPACE)
TFL_OPERATOR(DEPTHWISE_CONV_2D)
TFL_OPERATOR(DEQUANTIZE)
TFL_OPERATOR(EMBEDDING_LOOKUP)
@@ -126,3 +127,7 @@ TFL_OPERATOR(WHILE)
TFL_OPERATOR(NON_MAX_SUPPRESSION_V4)
TFL_OPERATOR(NON_MAX_SUPPRESSION_V5)
TFL_OPERATOR(SCATTER_ND)
+TFL_OPERATOR(SELECT_V2)
+TFL_OPERATOR(DENSIFY)
+TFL_OPERATOR(SEGMENT_SUM)
+TFL_OPERATOR(BATCH_MATMUL)