summaryrefslogtreecommitdiff
path: root/compiler/enco
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
commite2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch)
tree44a1a7951d168dd4370e13593ed03f4bc6d920c5 /compiler/enco
parent302e6564a7a76109e1178207e44e45a58631c477 (diff)
downloadnnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'compiler/enco')
-rw-r--r--compiler/enco/CMakeLists.txt4
-rw-r--r--compiler/enco/README.md25
-rw-r--r--compiler/enco/cli/CMakeLists.txt11
-rw-r--r--compiler/enco/cli/src/Driver.cpp221
-rw-r--r--compiler/enco/core/CMakeLists.txt35
-rw-r--r--compiler/enco/core/include/enco/Backend.h41
-rw-r--r--compiler/enco/core/src/ANN/Binder.h219
-rw-r--r--compiler/enco/core/src/ANN/Context.cpp31
-rw-r--r--compiler/enco/core/src/ANN/Context.h57
-rw-r--r--compiler/enco/core/src/ANN/Context.test.cpp73
-rw-r--r--compiler/enco/core/src/ANN/IR/DType.cpp25
-rw-r--r--compiler/enco/core/src/ANN/IR/DType.h36
-rw-r--r--compiler/enco/core/src/ANN/IR/DType.test.cpp25
-rw-r--r--compiler/enco/core/src/ANN/IR/InputList.h31
-rw-r--r--compiler/enco/core/src/ANN/IR/Module.h60
-rw-r--r--compiler/enco/core/src/ANN/IR/Module.test.cpp36
-rw-r--r--compiler/enco/core/src/ANN/IR/Operand.h82
-rw-r--r--compiler/enco/core/src/ANN/IR/Operand.test.cpp37
-rw-r--r--compiler/enco/core/src/ANN/IR/OperandID.h48
-rw-r--r--compiler/enco/core/src/ANN/IR/OperandID.test.cpp33
-rw-r--r--compiler/enco/core/src/ANN/IR/OperandInventory.cpp57
-rw-r--r--compiler/enco/core/src/ANN/IR/OperandInventory.h56
-rw-r--r--compiler/enco/core/src/ANN/IR/OperandInventory.test.cpp30
-rw-r--r--compiler/enco/core/src/ANN/IR/Operation.def17
-rw-r--r--compiler/enco/core/src/ANN/IR/Operation.h59
-rw-r--r--compiler/enco/core/src/ANN/IR/Operation.test.cpp28
-rw-r--r--compiler/enco/core/src/ANN/IR/OperationInventory.cpp32
-rw-r--r--compiler/enco/core/src/ANN/IR/OperationInventory.h48
-rw-r--r--compiler/enco/core/src/ANN/IR/OperationInventory.test.cpp40
-rw-r--r--compiler/enco/core/src/ANN/IR/OutputList.h31
-rw-r--r--compiler/enco/core/src/ANN/IR/Weight.h70
-rw-r--r--compiler/enco/core/src/ANN/IR/Weight.test.cpp53
-rw-r--r--compiler/enco/core/src/ANN/IR/WeightInventory.cpp34
-rw-r--r--compiler/enco/core/src/ANN/IR/WeightInventory.h38
-rw-r--r--compiler/enco/core/src/ANN/IR/WeightInventory.test.cpp29
-rw-r--r--compiler/enco/core/src/AsmCode.cpp33
-rw-r--r--compiler/enco/core/src/AsmCode.h51
-rw-r--r--compiler/enco/core/src/Backend.cpp178
-rw-r--r--compiler/enco/core/src/Code.h47
-rw-r--r--compiler/enco/core/src/Code.test.cpp30
-rw-r--r--compiler/enco/core/src/CodeIndex.h76
-rw-r--r--compiler/enco/core/src/CppCode.cpp553
-rw-r--r--compiler/enco/core/src/CppCode.h51
-rw-r--r--compiler/enco/core/src/CppGen/Host.cpp306
-rw-r--r--compiler/enco/core/src/CppGen/Host.h48
-rw-r--r--compiler/enco/core/src/CppGen/MemoryContext.cpp40
-rw-r--r--compiler/enco/core/src/CppGen/MemoryContext.h55
-rw-r--r--compiler/enco/core/src/CppGen/Subnet.cpp422
-rw-r--r--compiler/enco/core/src/CppGen/Subnet.h91
-rw-r--r--compiler/enco/core/src/Dims.h34
-rw-r--r--compiler/enco/core/src/IRUtils.cpp65
-rw-r--r--compiler/enco/core/src/IRUtils.h41
-rw-r--r--compiler/enco/core/src/IRValidator.cpp85
-rw-r--r--compiler/enco/core/src/IRValidator.h29
-rw-r--r--compiler/enco/core/src/IRValidator.test.cpp130
-rw-r--r--compiler/enco/core/src/Pass.h78
-rw-r--r--compiler/enco/core/src/Pass.test.cpp41
-rw-r--r--compiler/enco/core/src/Pipeline.h46
-rw-r--r--compiler/enco/core/src/Pipeline.test.cpp26
-rw-r--r--compiler/enco/core/src/Session.cpp58
-rw-r--r--compiler/enco/core/src/Session.h45
-rw-r--r--compiler/enco/core/src/String.h57
-rw-r--r--compiler/enco/core/src/Support/Debugging.cpp533
-rw-r--r--compiler/enco/core/src/Support/Debugging.h110
-rw-r--r--compiler/enco/core/src/Support/Debugging.test.cpp26
-rw-r--r--compiler/enco/core/src/Transforms/AvgPoolLowering.cpp229
-rw-r--r--compiler/enco/core/src/Transforms/AvgPoolLowering.h43
-rw-r--r--compiler/enco/core/src/Transforms/ConcatLowering.cpp196
-rw-r--r--compiler/enco/core/src/Transforms/ConcatLowering.h43
-rw-r--r--compiler/enco/core/src/Transforms/ConstantFolding.cpp442
-rw-r--r--compiler/enco/core/src/Transforms/ConstantFolding.h43
-rw-r--r--compiler/enco/core/src/Transforms/ConstantFolding.test.cpp327
-rw-r--r--compiler/enco/core/src/Transforms/CopyLowering.cpp105
-rw-r--r--compiler/enco/core/src/Transforms/CopyLowering.h43
-rw-r--r--compiler/enco/core/src/Transforms/DataLayoutConversion.cpp383
-rw-r--r--compiler/enco/core/src/Transforms/DataLayoutConversion.h43
-rw-r--r--compiler/enco/core/src/Transforms/DataLayoutConversion.test.cpp33
-rw-r--r--compiler/enco/core/src/Transforms/DeadBagElimination.cpp72
-rw-r--r--compiler/enco/core/src/Transforms/DeadBagElimination.h48
-rw-r--r--compiler/enco/core/src/Transforms/DeadObjectElimination.cpp77
-rw-r--r--compiler/enco/core/src/Transforms/DeadObjectElimination.h47
-rw-r--r--compiler/enco/core/src/Transforms/Duplicate.cpp135
-rw-r--r--compiler/enco/core/src/Transforms/Duplicate.h43
-rw-r--r--compiler/enco/core/src/Transforms/DuplicatedObjectReduction.cpp119
-rw-r--r--compiler/enco/core/src/Transforms/DuplicatedObjectReduction.h73
-rw-r--r--compiler/enco/core/src/Transforms/FeatureUnification.cpp216
-rw-r--r--compiler/enco/core/src/Transforms/FeatureUnification.h68
-rw-r--r--compiler/enco/core/src/Transforms/FreeInstrElimination.cpp65
-rw-r--r--compiler/enco/core/src/Transforms/FreeInstrElimination.h54
-rw-r--r--compiler/enco/core/src/Transforms/FreeInstrElimination.test.cpp34
-rw-r--r--compiler/enco/core/src/Transforms/FreeOpElimination.cpp59
-rw-r--r--compiler/enco/core/src/Transforms/FreeOpElimination.h54
-rw-r--r--compiler/enco/core/src/Transforms/FreeOpElimination.test.cpp34
-rw-r--r--compiler/enco/core/src/Transforms/GlobalDataGeneration.cpp181
-rw-r--r--compiler/enco/core/src/Transforms/GlobalDataGeneration.h54
-rw-r--r--compiler/enco/core/src/Transforms/IdenticalObjectReduction.cpp139
-rw-r--r--compiler/enco/core/src/Transforms/IdenticalObjectReduction.h69
-rw-r--r--compiler/enco/core/src/Transforms/IdenticalObjectReduction.test.cpp32
-rw-r--r--compiler/enco/core/src/Transforms/IndirectCopyElimination.cpp84
-rw-r--r--compiler/enco/core/src/Transforms/IndirectCopyElimination.h60
-rw-r--r--compiler/enco/core/src/Transforms/IntrinsicSelection.cpp100
-rw-r--r--compiler/enco/core/src/Transforms/IntrinsicSelection.h47
-rw-r--r--compiler/enco/core/src/Transforms/Optimizations.cpp257
-rw-r--r--compiler/enco/core/src/Transforms/Optimizations.h123
-rw-r--r--compiler/enco/core/src/Transforms/Split.cpp1233
-rw-r--r--compiler/enco/core/src/Transforms/Split.h48
-rw-r--r--compiler/enco/core/src/Usage.cpp58
-rw-r--r--compiler/enco/core/src/Usage.h34
-rw-r--r--compiler/enco/core/src/coex/IR.h109
-rw-r--r--compiler/enco/core/src/coex/IR.test.cpp38
-rw-r--r--compiler/enco/frontend/CMakeLists.txt1
-rw-r--r--compiler/enco/frontend/caffe/CMakeLists.txt39
-rw-r--r--compiler/enco/frontend/caffe/src/ConcatSpec.cpp40
-rw-r--r--compiler/enco/frontend/caffe/src/ConcatSpec.h47
-rw-r--r--compiler/enco/frontend/caffe/src/ConcatSpec.test.cpp42
-rw-r--r--compiler/enco/frontend/caffe/src/Context.cpp21
-rw-r--r--compiler/enco/frontend/caffe/src/Context.h112
-rw-r--r--compiler/enco/frontend/caffe/src/Convert.cpp40
-rw-r--r--compiler/enco/frontend/caffe/src/Convert.h36
-rw-r--r--compiler/enco/frontend/caffe/src/ConvolutionSpec.cpp147
-rw-r--r--compiler/enco/frontend/caffe/src/ConvolutionSpec.h59
-rw-r--r--compiler/enco/frontend/caffe/src/ConvolutionSpec.test.cpp405
-rw-r--r--compiler/enco/frontend/caffe/src/Entry.cpp62
-rw-r--r--compiler/enco/frontend/caffe/src/Frontend.cpp135
-rw-r--r--compiler/enco/frontend/caffe/src/Frontend.h43
-rw-r--r--compiler/enco/frontend/caffe/src/GraphBuilder.cpp21
-rw-r--r--compiler/enco/frontend/caffe/src/GraphBuilder.h36
-rw-r--r--compiler/enco/frontend/caffe/src/GraphBuilderRegistry.cpp47
-rw-r--r--compiler/enco/frontend/caffe/src/GraphBuilderRegistry.h54
-rw-r--r--compiler/enco/frontend/caffe/src/IRBuilder.h180
-rw-r--r--compiler/enco/frontend/caffe/src/Importer.cpp52
-rw-r--r--compiler/enco/frontend/caffe/src/Importer.h29
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/BatchNorm.cpp254
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/BatchNorm.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Concatenation.cpp138
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Concatenation.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Convolution.cpp197
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Convolution.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Eltwise.cpp134
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Eltwise.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Input.cpp60
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Input.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Pooling.cpp138
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Pooling.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/ReLU.cpp83
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/ReLU.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Scale.cpp160
-rw-r--r--compiler/enco/frontend/caffe/src/Layer/Scale.h35
-rw-r--r--compiler/enco/frontend/caffe/src/Padding.h69
-rw-r--r--compiler/enco/frontend/caffe/src/Padding.test.cpp48
-rw-r--r--compiler/enco/frontend/caffe/src/PaddingUtils.cpp131
-rw-r--r--compiler/enco/frontend/caffe/src/PaddingUtils.h81
-rw-r--r--compiler/enco/frontend/caffe/src/PoolingSpec.cpp148
-rw-r--r--compiler/enco/frontend/caffe/src/PoolingSpec.h62
-rw-r--r--compiler/enco/frontend/caffe/src/PoolingSpec.test.cpp294
-rw-r--r--compiler/enco/frontend/caffe/src/ShapeQuery.cpp40
-rw-r--r--compiler/enco/frontend/caffe/src/ShapeQuery.h75
-rw-r--r--compiler/enco/frontend/tflite/CMakeLists.txt36
-rw-r--r--compiler/enco/frontend/tflite/schema/schema.fbs734
-rw-r--r--compiler/enco/frontend/tflite/schema/schema.meta2
-rw-r--r--compiler/enco/frontend/tflite/src/Context.cpp116
-rw-r--r--compiler/enco/frontend/tflite/src/Context.h169
-rw-r--r--compiler/enco/frontend/tflite/src/Convert.cpp57
-rw-r--r--compiler/enco/frontend/tflite/src/Convert.h43
-rw-r--r--compiler/enco/frontend/tflite/src/Entry.cpp36
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.cpp198
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.h40
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.test.cpp41
-rw-r--r--compiler/enco/frontend/tflite/src/GraphBuilder.h46
-rw-r--r--compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h88
-rw-r--r--compiler/enco/frontend/tflite/src/IRBuilder.h178
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Activation.cpp96
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Activation.h37
-rw-r--r--compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp126
-rw-r--r--compiler/enco/frontend/tflite/src/Op/AveragePool2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Concatenation.cpp252
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Concatenation.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Conv2D.cpp181
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Conv2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp230
-rw-r--r--compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Div.cpp116
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Div.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp123
-rw-r--r--compiler/enco/frontend/tflite/src/Op/MaxPool2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Padding.cpp105
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Padding.h42
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU6.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU6.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Reshape.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Reshape.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Sub.cpp112
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Sub.h38
-rw-r--r--compiler/enco/frontend/tflite/src/RawModel.h29
-rw-r--r--compiler/enco/frontend/tflite/src/RawModelLoader.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/RawModelLoader.h29
-rw-r--r--compiler/enco/frontend/tflite/src/TensorBags.h65
-rw-r--r--compiler/enco/requires.cmake8
-rw-r--r--compiler/enco/test/CMakeLists.txt1
-rw-r--r--compiler/enco/test/basic/000/CMakeLists.txt26
-rw-r--r--compiler/enco/test/basic/000/enco.test.cpp81
-rw-r--r--compiler/enco/test/basic/CMakeLists.txt1
-rw-r--r--compiler/enco/test/binder.cpp188
-rw-r--r--compiler/enco/test/caffe/CMakeLists.txt141
-rwxr-xr-xcompiler/enco/test/caffe/runall.sh85
-rw-r--r--compiler/enco/test/tflite/AveragePool2D_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/AveragePool2D_000/test.recipe24
-rw-r--r--compiler/enco/test/tflite/AveragePool2D_001/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/AveragePool2D_001/test.recipe24
-rw-r--r--compiler/enco/test/tflite/CMakeLists.txt108
-rw-r--r--compiler/enco/test/tflite/Concat_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Concat_000/test.recipe28
-rw-r--r--compiler/enco/test/tflite/Concat_001/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Concat_001/test.recipe29
-rw-r--r--compiler/enco/test/tflite/Concat_002/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Concat_002/test.recipe29
-rw-r--r--compiler/enco/test/tflite/Concat_003/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Concat_003/test.recipe29
-rw-r--r--compiler/enco/test/tflite/Conv2D_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Conv2D_000/test.recipe45
-rw-r--r--compiler/enco/test/tflite/Conv2D_001/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Conv2D_001/test.recipe45
-rw-r--r--compiler/enco/test/tflite/Conv2D_002/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Conv2D_002/test.recipe46
-rw-r--r--compiler/enco/test/tflite/Conv2D_003/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Conv2D_003/test.recipe45
-rw-r--r--compiler/enco/test/tflite/Conv2D_004/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Conv2D_004/test.recipe45
-rw-r--r--compiler/enco/test/tflite/DepthwiseConv2D_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/DepthwiseConv2D_000/test.recipe48
-rw-r--r--compiler/enco/test/tflite/DepthwiseConv2D_001/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/DepthwiseConv2D_001/test.recipe46
-rw-r--r--compiler/enco/test/tflite/Div_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Div_000/test.recipe27
-rw-r--r--compiler/enco/test/tflite/MaxPool2D_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/MaxPool2D_000/test.recipe24
-rw-r--r--compiler/enco/test/tflite/ReLU6_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/ReLU6_000/test.recipe17
-rw-r--r--compiler/enco/test/tflite/ReLU_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/ReLU_000/test.recipe17
-rw-r--r--compiler/enco/test/tflite/Regression_0000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Regression_0000/test.recipe84
-rw-r--r--compiler/enco/test/tflite/Regression_0001/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Regression_0001/test.recipe50
-rw-r--r--compiler/enco/test/tflite/Regression_0002/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Regression_0002/test.recipe45
-rw-r--r--compiler/enco/test/tflite/Regression_0003/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Regression_0003/test.recipe33
-rw-r--r--compiler/enco/test/tflite/Regression_0004/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Regression_0004/test.recipe27
-rw-r--r--compiler/enco/test/tflite/Reshape_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Reshape_000/test.recipe21
-rw-r--r--compiler/enco/test/tflite/Sub_000/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/Sub_000/test.recipe27
-rw-r--r--compiler/enco/test/tflite/empty/INFERENCE0
-rw-r--r--compiler/enco/test/tflite/empty/test.recipe0
-rwxr-xr-xcompiler/enco/test/tflite/runall.sh83
259 files changed, 20615 insertions, 0 deletions
diff --git a/compiler/enco/CMakeLists.txt b/compiler/enco/CMakeLists.txt
new file mode 100644
index 000000000..17300e25e
--- /dev/null
+++ b/compiler/enco/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_subdirectory(core)
+add_subdirectory(frontend)
+add_subdirectory(cli)
+add_subdirectory(test)
diff --git a/compiler/enco/README.md b/compiler/enco/README.md
new file mode 100644
index 000000000..d995a1e55
--- /dev/null
+++ b/compiler/enco/README.md
@@ -0,0 +1,25 @@
+# enco
+
+_enco_ is a tool which translates a NN model into a C++ source code that implements the following functions:
+```
+struct Network;
+
+Network *Network_construct();
+void Network_destruct(Network *net);
+
+unsigned Network_input_count(const Network *);
+const char *Network_input_name(const Network *, unsigned n);
+unsigned Network_input_rank(const Network *, unsigned n);
+unsigned Network_input_dim(const Network *, unsigned n, unsigned axis);
+void Network_input_bind(Network *net, unsigned n, const void *ptr, unsigned len);
+
+unsigned Network_output_count(const Network *net);
+const char *Network_output_name(const Network *, unsigned n);
+unsigned Network_output_rank(const Network *, unsigned n);
+unsigned Network_output_dim(const Network *, unsigned n, unsigned axis);
+void Network_output_bind(Network *net, unsigned n, void *ptr, unsigned len);
+
+void Network_invoke(Network *net);
+```
+
+Generated C++ code internally uses Android NN API for acceleration.
diff --git a/compiler/enco/cli/CMakeLists.txt b/compiler/enco/cli/CMakeLists.txt
new file mode 100644
index 000000000..5a43ab655
--- /dev/null
+++ b/compiler/enco/cli/CMakeLists.txt
@@ -0,0 +1,11 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_executable(enco-cli ${SOURCES})
+target_include_directories(enco-cli PRIVATE src)
+target_link_libraries(enco-cli enco_intf_cmdline)
+target_link_libraries(enco-cli enco_intf_frontend)
+target_link_libraries(enco-cli enco_core)
+target_link_libraries(enco-cli stdex)
+target_link_libraries(enco-cli dl)
+# Let's use project-wide compile options
+target_link_libraries(enco-cli nncc_common)
diff --git a/compiler/enco/cli/src/Driver.cpp b/compiler/enco/cli/src/Driver.cpp
new file mode 100644
index 000000000..185bb13b9
--- /dev/null
+++ b/compiler/enco/cli/src/Driver.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <enco/Frontend.h>
+#include <enco/Backend.h>
+
+#include <cmdline/View.h>
+
+#include <string>
+#include <vector>
+
+#include <functional>
+
+namespace cmdline
+{
+
+// TODO Extract this helper class
+class Vector : public cmdline::View
+{
+public:
+ uint32_t size(void) const { return _args.size(); }
+
+public:
+ const char *at(uint32_t nth) const { return _args.at(nth).c_str(); }
+
+public:
+ Vector &append(const std::string &arg)
+ {
+ _args.emplace_back(arg);
+ return (*this);
+ }
+
+private:
+ std::vector<std::string> _args;
+};
+
+} // namespace cmdline
+
+namespace
+{
+
+class Zone
+{
+public:
+ Zone() = default;
+
+public:
+ const cmdline::View *args(void) const { return &_args; }
+
+public:
+ void append(const std::string &arg) { _args.append(arg); }
+
+private:
+ cmdline::Vector _args;
+};
+
+} // namespace
+
+#include <dlfcn.h>
+
+namespace
+{
+
+class FrontendFactory
+{
+public:
+ FrontendFactory(const std::string &path)
+ {
+ _handle = dlopen(path.c_str(), RTLD_LAZY);
+ assert(_handle != nullptr);
+ }
+
+public:
+ // Copy is not allowed to avoid double close
+ FrontendFactory(const FrontendFactory &) = delete;
+ FrontendFactory(FrontendFactory &&) = delete;
+
+public:
+ ~FrontendFactory() { dlclose(_handle); }
+
+private:
+ using Entry = std::unique_ptr<enco::Frontend> (*)(const cmdline::View &);
+
+private:
+ Entry entry(void) const
+ {
+ auto entry = reinterpret_cast<Entry>(dlsym(_handle, "make_frontend"));
+ assert(entry != nullptr);
+ return entry;
+ }
+
+public:
+ std::unique_ptr<enco::Frontend> make(const cmdline::View *args) const
+ {
+ auto fn = entry();
+ return fn(*args);
+ }
+
+private:
+ void *_handle;
+};
+
+} // namespace
+
+namespace
+{
+
+class FrontendZone : public Zone
+{
+public:
+ FrontendZone(const std::string &path) : _factory{path}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const FrontendFactory *factory(void) const { return &_factory; }
+
+private:
+ FrontendFactory _factory;
+};
+
+} // namespace
+
+#include <stdex/Memory.h>
+
+#include <map>
+
+#include <iostream>
+#include <stdexcept>
+
+static int entry(int argc, char **argv)
+{
+ // Usage:
+ // [Command] --frontend [Frontend .so path] --frontend-arg ...
+ std::unique_ptr<FrontendZone> frontend_zone;
+ cmdline::Vector backend_args;
+
+ // Simple argument parser (based on map)
+ std::map<std::string, std::function<void(const std::string &arg)>> argparse;
+
+ argparse["--frontend"] = [&](const std::string &path) {
+ frontend_zone = stdex::make_unique<FrontendZone>(path);
+ };
+
+ argparse["--frontend-arg"] = [&](const std::string &arg) { frontend_zone->append(arg); };
+ argparse["--backend-arg"] = [&](const std::string &arg) { backend_args.append(arg); };
+
+ if (argc < 2)
+ {
+ std::cerr << "Usage:" << std::endl;
+ std::cerr << "[Command] --frontend [.so path]" << std::endl;
+ std::cerr << " --frontend-arg [argument] ..." << std::endl;
+ std::cerr << " --backend-arg [argument] ..." << std::endl;
+ return 255;
+ }
+
+ for (int n = 1; n < argc; n += 2)
+ {
+ const std::string tag{argv[n]};
+ const std::string arg{argv[n + 1]};
+
+ auto it = argparse.find(tag);
+
+ if (it == argparse.end())
+ {
+ std::cerr << "Option '" << tag << "' is not supported" << std::endl;
+ return 255;
+ }
+
+ it->second(arg);
+ }
+
+ assert(frontend_zone != nullptr);
+
+ auto frontend = frontend_zone->factory()->make(frontend_zone->args());
+
+ auto bundle = frontend->load();
+
+ auto backend = make_backend(backend_args);
+
+ backend->compile(bundle.module(), bundle.data());
+
+ return 0;
+}
+
+#ifdef NDEBUG
+int main(int argc, char **argv)
+{
+ try
+ {
+ return entry(argc, argv);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << "ERROR: " << e.what() << std::endl;
+ }
+
+ return 255;
+}
+#else // NDEBUG
+int main(int argc, char **argv)
+{
+ // NOTE main does not catch internal exceptions for debug build to make it easy to
+ // check the stacktrace with a debugger
+ return entry(argc, argv);
+}
+#endif // !NDEBUG
diff --git a/compiler/enco/core/CMakeLists.txt b/compiler/enco/core/CMakeLists.txt
new file mode 100644
index 000000000..f437e687a
--- /dev/null
+++ b/compiler/enco/core/CMakeLists.txt
@@ -0,0 +1,35 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+###
+### enco_core is built as a shared library to support "interactive debugging".
+###
+### interactive debugging helpers are stripped during linking when enco_core is
+### built as a static library
+###
+add_library(enco_core SHARED ${SOURCES})
+target_include_directories(enco_core PRIVATE src)
+target_include_directories(enco_core PUBLIC include)
+target_link_libraries(enco_core PUBLIC enco_intf_cmdline)
+target_link_libraries(enco_core PUBLIC coco_core)
+target_link_libraries(enco_core PUBLIC coco_generic)
+# These libraries are linked for internal use, and thus does not appear in public headers.
+target_link_libraries(enco_core PRIVATE pp)
+target_link_libraries(enco_core PRIVATE morph)
+target_link_libraries(enco_core PRIVATE stdex)
+# Let's use nncc project-wide build options
+target_link_libraries(enco_core PRIVATE nncc_common)
+
+nnas_find_package(GTest QUIET)
+
+if(NOT GTest_FOUND)
+ return()
+endif(NOT GTest_FOUND)
+
+add_executable(enco_core_test ${TESTS})
+target_include_directories(enco_core_test PRIVATE src)
+target_link_libraries(enco_core_test gtest_main)
+target_link_libraries(enco_core_test enco_core)
+target_link_libraries(enco_core_test morph)
+add_test(enco_core_test enco_core_test)
diff --git a/compiler/enco/core/include/enco/Backend.h b/compiler/enco/core/include/enco/Backend.h
new file mode 100644
index 000000000..5da903ed2
--- /dev/null
+++ b/compiler/enco/core/include/enco/Backend.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_BACKEND_H__
+#define __ENCO_BACKEND_H__
+
+#include "cmdline/View.h"
+
+#include "coco/IR/Module.h"
+#include "coco/IR/Data.h"
+
+#include <memory>
+
+namespace enco
+{
+
+struct Backend
+{
+ virtual ~Backend() = default;
+
+ virtual void compile(coco::Module *m, coco::Data *d) = 0;
+};
+
+} // namespace enco
+
+std::unique_ptr<enco::Backend> make_backend(const cmdline::View &);
+
+#endif // __ENCO_BACKEND_H__
diff --git a/compiler/enco/core/src/ANN/Binder.h b/compiler/enco/core/src/ANN/Binder.h
new file mode 100644
index 000000000..71b95676b
--- /dev/null
+++ b/compiler/enco/core/src/ANN/Binder.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_BINDER_H__
+#define __ANN_BINDER_H__
+
+#include "ANN/IR/Module.h"
+
+#include <coco/IR.h>
+
+#include <morph/nnapi.h>
+
+#include <type_traits>
+
+/**
+ * @brief A bridge between ann::Module and coco::Block
+ */
+class ANNBinder
+{
+public:
+ ANNBinder(coco::Block *block, std::unique_ptr<ann::Module> &&module)
+ : _block{block}, _module{std::move(module)}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const coco::Block *block(void) const { return _block; }
+ coco::Block *block(void) { return _block; }
+
+public:
+ const ann::Module *module(void) const { return _module.get(); }
+
+public:
+ /**
+ * @brief Return the set of bags that the current ANN subnet accesses
+ */
+ std::set<coco::Bag *> bags(void) const
+ {
+ std::set<coco::Bag *> res;
+
+ for (auto it = _operands.begin(); it != _operands.end(); ++it)
+ {
+ res.insert(it->first);
+ }
+
+ return res;
+ }
+
+public:
+ template <typename T> ann::OperandID addOperand(void)
+ {
+ return _module->operand()->create(ann::dtype<T>());
+ };
+
+ template <typename T> ann::OperandID addOperand(const nncc::core::ADT::tensor::Shape &shape)
+ {
+ return _module->operand()->create(ann::dtype<T>(), shape);
+ }
+
+public:
+ template <typename T> ann::OperandID addOperand(const coco::FeatureObject *obj)
+ {
+ auto bag = obj->bag();
+ assert(bag != nullptr);
+
+ auto it = _operands.find(bag);
+
+ if (it != _operands.end())
+ {
+ return it->second;
+ }
+
+ auto operand = addOperand<T>(morph::nnapi::as_tensor_shape(obj->shape()));
+ _operands[obj->bag()] = operand;
+ return operand;
+ };
+
+ template <typename T> ann::OperandID addOperand(const coco::KernelObject *obj)
+ {
+ auto bag = obj->bag();
+ assert(bag != nullptr);
+
+ auto it = _operands.find(bag);
+
+ if (it != _operands.end())
+ {
+ return it->second;
+ }
+
+ auto operand = addOperand<T>(morph::nnapi::as_tensor_shape(obj->shape()));
+ _operands[obj->bag()] = operand;
+ return operand;
+ };
+
+public:
+ /// @brief Set scalar weight
+ template <typename T> void setOperand(const ann::OperandID &id, const T &value)
+ {
+ static_assert(std::is_arithmetic<T>::value, "T should be arithmetic");
+ auto weight = _module->weight()->create();
+ weight->fill(value);
+ _module->operand()->at(id)->weight(weight);
+ }
+
+ /// @brief Set non-scalar weight
+ template <typename It> void setOperand(const ann::OperandID &id, It beg, It end)
+ {
+ auto weight = _module->weight()->create();
+ weight->fill(beg, end);
+ _module->operand()->at(id)->weight(weight);
+ }
+
+public:
+ void addOperation(ann::Operation::Code code, std::initializer_list<ann::OperandID> inputs,
+ std::initializer_list<ann::OperandID> outputs)
+ {
+ _module->operation()->create(code, inputs, outputs);
+ }
+
+public:
+ /**
+ * @brief Identify a sequence of coco::Bag * as subnet's inputs
+ *
+ * NOTE 1. This method takes input iterator over coco::Bag * values
+ * NOTE 2. All the identifyInputs class except the last one will be ignored if there are
+ * multiple identifyInputs calls
+ */
+ template <typename It> void identifyInputs(It beg, It end)
+ {
+ _inputs.clear();
+ _module->input()->clear();
+
+ for (auto it = beg; it != end; ++it)
+ {
+ auto const bag = *it;
+ _inputs.emplace_back(*it);
+ _module->input()->emplace_back(_operands.at(bag));
+ }
+ }
+
+ template <typename T> void identifyInputs(T &&values)
+ {
+ identifyInputs(std::begin(values), std::end(values));
+ }
+
+public:
+ /**
+ * @brief Identify a sequence of coco::Bag * as subnet's outputs
+ *
+ * NOTE 1. This method takes input iterator over coco::Bag * values
+ * NOTE 2. All the identifyOutputs class except the last one will be ignored if there are
+ * multiple identifyOutputs calls
+ */
+ template <typename It> void identifyOutputs(It beg, It end)
+ {
+ _outputs.clear();
+ _module->output()->clear();
+
+ for (auto it = beg; it != end; ++it)
+ {
+ auto const bag = *it;
+ _outputs.emplace_back(bag);
+ _module->output()->emplace_back(_operands.at(bag));
+ }
+ }
+
+ template <typename T> void identifyOutputs(T &&values)
+ {
+ identifyOutputs(std::begin(values), std::end(values));
+ }
+
+public:
+ coco::Bag *input(uint32_t n) const { return _inputs.at(n); }
+ coco::Bag *output(uint32_t n) const { return _outputs.at(n); }
+
+public:
+ /**
+ * @brief Return true if a given bag has an associated operand in ANN IR
+ */
+ bool associated(coco::Bag *b) const { return _operands.find(b) != _operands.end(); }
+
+ /**
+ * @brief Return operand ID associated with a given bag
+ * @note The behavior of operand(b) is defined only when associated(b) holds.
+ */
+ ann::OperandID operand(coco::Bag *b) const
+ {
+ assert(associated(b));
+ return _operands.at(b);
+ }
+
+private:
+ coco::Block *const _block;
+ std::unique_ptr<ann::Module> _module;
+
+private:
+ std::vector<coco::Bag *> _inputs;
+ std::vector<coco::Bag *> _outputs;
+
+private:
+ /// @brief Operand ID assigned for each coco::Bag
+ std::map<coco::Bag *, ann::OperandID> _operands;
+};
+
+#endif // __ANN_BINDER_H__
diff --git a/compiler/enco/core/src/ANN/Context.cpp b/compiler/enco/core/src/ANN/Context.cpp
new file mode 100644
index 000000000..d4d1882fa
--- /dev/null
+++ b/compiler/enco/core/src/ANN/Context.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ANN/Context.h"
+
+#include <stdex/Memory.h>
+
+ANNBinder *ANNContext::create(coco::Block *blk)
+{
+ auto mod = stdex::make_unique<ann::Module>();
+ auto obj = stdex::make_unique<ANNBinder>(blk, std::move(mod));
+ auto ptr = obj.get();
+
+ _binders.emplace_back(std::move(obj));
+ _map[blk] = ptr;
+
+ return ptr;
+}
diff --git a/compiler/enco/core/src/ANN/Context.h b/compiler/enco/core/src/ANN/Context.h
new file mode 100644
index 000000000..915651eb5
--- /dev/null
+++ b/compiler/enco/core/src/ANN/Context.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_CONTEXT_H__
+#define __ANN_CONTEXT_H__
+
+#include "ANN/Binder.h"
+
+#include <map>
+#include <vector>
+
+#include <memory>
+
+struct ANNContext
+{
+public:
+ ANNBinder *create(coco::Block *blk);
+
+public:
+ uint32_t count(void) const { return _binders.size(); }
+
+public:
+ ANNBinder *nth(uint32_t n) { return _binders.at(n).get(); }
+ const ANNBinder *nth(uint32_t n) const { return _binders.at(n).get(); }
+
+public:
+ ANNBinder *find(const coco::Block *blk) const
+ {
+ auto it = _map.find(blk);
+
+ if (it == _map.end())
+ {
+ return nullptr;
+ }
+
+ return it->second;
+ }
+
+private:
+ std::vector<std::unique_ptr<ANNBinder>> _binders;
+ std::map<const coco::Block *, ANNBinder *> _map;
+};
+
+#endif // __ANN_CONTEXT_H__
diff --git a/compiler/enco/core/src/ANN/Context.test.cpp b/compiler/enco/core/src/ANN/Context.test.cpp
new file mode 100644
index 000000000..7fd26f30c
--- /dev/null
+++ b/compiler/enco/core/src/ANN/Context.test.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Context.h"
+
+#include <set>
+
+#include <gtest/gtest.h>
+
+namespace
+{
+class ANNContextTest : public ::testing::Test
+{
+public:
+ ANNContextTest() { m = coco::Module::create(); }
+
+public:
+ virtual ~ANNContextTest() = default;
+
+protected:
+ std::unique_ptr<coco::Module> m;
+};
+}
+
+TEST_F(ANNContextTest, constructor)
+{
+ ANNContext ann_ctx;
+
+ ASSERT_EQ(ann_ctx.count(), 0);
+}
+
+TEST_F(ANNContextTest, create)
+{
+ ANNContext ann_ctx;
+
+ auto blk = m->entity()->block()->create();
+ auto binder = ann_ctx.create(blk);
+
+ ASSERT_NE(binder, nullptr);
+}
+
+TEST_F(ANNContextTest, find)
+{
+ ANNContext ann_ctx;
+
+ // CASE: Corresponding binder does not exist
+ {
+ auto blk = m->entity()->block()->create();
+ ASSERT_EQ(ann_ctx.find(blk), nullptr);
+ }
+
+ // CASE: Corresponding binder does exist
+ {
+ auto blk = m->entity()->block()->create();
+ auto binder_created = ann_ctx.create(blk);
+ auto binder_found = ann_ctx.find(blk);
+
+ ASSERT_EQ(binder_created, binder_found);
+ }
+}
diff --git a/compiler/enco/core/src/ANN/IR/DType.cpp b/compiler/enco/core/src/ANN/IR/DType.cpp
new file mode 100644
index 000000000..7d4585a49
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/DType.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DType.h"
+
+namespace ann
+{
+
+template <> DType dtype<int32_t>(void) { return DType::S32; }
+template <> DType dtype<float>(void) { return DType::F32; }
+
+} // namespace ann
diff --git a/compiler/enco/core/src/ANN/IR/DType.h b/compiler/enco/core/src/ANN/IR/DType.h
new file mode 100644
index 000000000..b7583b09a
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/DType.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_DTYPE_H__
+#define __ANN_IR_DTYPE_H__
+
+#include <cstdint>
+
+namespace ann
+{
+
+enum class DType
+{
+ UNK,
+ S32,
+ F32
+};
+
+template <typename T> DType dtype(void);
+
+} // namespace ann
+
+#endif // __ANN_IR_DTYPE_H__
diff --git a/compiler/enco/core/src/ANN/IR/DType.test.cpp b/compiler/enco/core/src/ANN/IR/DType.test.cpp
new file mode 100644
index 000000000..8184ece9b
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/DType.test.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DType.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_DTYPE, dtype)
+{
+ ASSERT_EQ(ann::dtype<int>(), ann::DType::S32);
+ ASSERT_EQ(ann::dtype<float>(), ann::DType::F32);
+}
diff --git a/compiler/enco/core/src/ANN/IR/InputList.h b/compiler/enco/core/src/ANN/IR/InputList.h
new file mode 100644
index 000000000..51f0fd95a
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/InputList.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_INPUT_LIST_H__
+#define __ANN_IR_INPUT_LIST_H__
+
+#include "ANN/IR/OperandID.h"
+
+#include <vector>
+
+namespace ann
+{
+
+using InputList = std::vector<OperandID>;
+
+} // namespace ann
+
+#endif // __ANN_IR_INPUT_LIST_H__
diff --git a/compiler/enco/core/src/ANN/IR/Module.h b/compiler/enco/core/src/ANN/IR/Module.h
new file mode 100644
index 000000000..b443b4235
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Module.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_MODULE_H__
+#define __ANN_IR_MODULE_H__
+
+#include "ANN/IR/WeightInventory.h"
+#include "ANN/IR/OperandInventory.h"
+#include "ANN/IR/OperationInventory.h"
+#include "ANN/IR/InputList.h"
+#include "ANN/IR/OutputList.h"
+
+namespace ann
+{
+
+class Module
+{
+public:
+ Module() = default;
+
+public:
+ WeightInventory *weight(void) { return &_weight; }
+ const WeightInventory *weight(void) const { return &_weight; }
+
+ OperandInventory *operand(void) { return &_operand; }
+ const OperandInventory *operand(void) const { return &_operand; }
+
+ OperationInventory *operation(void) { return &_operation; }
+ const OperationInventory *operation(void) const { return &_operation; }
+
+ InputList *input(void) { return &_input; }
+ const InputList *input(void) const { return &_input; }
+
+ OutputList *output(void) { return &_output; }
+ const OutputList *output(void) const { return &_output; }
+
+private:
+ WeightInventory _weight;
+ OperandInventory _operand;
+ OperationInventory _operation;
+ InputList _input;
+ OutputList _output;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_MODULE_H__
diff --git a/compiler/enco/core/src/ANN/IR/Module.test.cpp b/compiler/enco/core/src/ANN/IR/Module.test.cpp
new file mode 100644
index 000000000..4b946c875
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Module.test.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Module.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_MODULE, constructor)
+{
+ ann::Module m;
+
+ ann::Module *mutable_ptr = &m;
+ const ann::Module *immutable_ptr = &m;
+
+ ASSERT_NE(mutable_ptr->weight(), nullptr);
+ ASSERT_EQ(mutable_ptr->weight(), immutable_ptr->weight());
+
+ ASSERT_NE(mutable_ptr->operand(), nullptr);
+ ASSERT_EQ(mutable_ptr->operand(), immutable_ptr->operand());
+
+ ASSERT_NE(mutable_ptr->operation(), nullptr);
+ ASSERT_EQ(mutable_ptr->operation(), immutable_ptr->operation());
+}
diff --git a/compiler/enco/core/src/ANN/IR/Operand.h b/compiler/enco/core/src/ANN/IR/Operand.h
new file mode 100644
index 000000000..3b15ed739
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Operand.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OPERAND_H__
+#define __ANN_IR_OPERAND_H__
+
+#include "ANN/IR/DType.h"
+#include "ANN/IR/Weight.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+namespace ann
+{
+
+class Operand
+{
+public:
+ virtual ~Operand() = default;
+
+public:
+ DType dtype(void) const { return _dtype; }
+ void dtype(const DType &dtype) { _dtype = dtype; }
+
+ const Weight *weight(void) const { return _weight; }
+ void weight(const Weight *weight) { _weight = weight; }
+
+private:
+ DType _dtype = DType::UNK;
+ const Weight *_weight = nullptr;
+};
+
+} // namespace ann
+
+namespace ann
+{
+
+/**
+ * @brief Plain (non-qunatized) Scalar Operand
+ */
+struct ScalarOperand final : public Operand
+{
+};
+
+} // namespace ann
+
+namespace ann
+{
+
+/**
+ * @brief Plain (non-qunatized) Tensor Operand
+ */
+struct TensorOperand final : public Operand
+{
+public:
+ TensorOperand(const nncc::core::ADT::tensor::Shape &shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const nncc::core::ADT::tensor::Shape &shape(void) const { return _shape; }
+
+private:
+ nncc::core::ADT::tensor::Shape _shape;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_OPERAND_H__
diff --git a/compiler/enco/core/src/ANN/IR/Operand.test.cpp b/compiler/enco/core/src/ANN/IR/Operand.test.cpp
new file mode 100644
index 000000000..98ac4ebd0
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Operand.test.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Operand.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_SCALAR_OPERAND, constructor)
+{
+ const ann::ScalarOperand operand;
+
+ ASSERT_EQ(operand.dtype(), ann::DType::UNK);
+ ASSERT_EQ(operand.weight(), nullptr);
+}
+
+TEST(ANN_IR_TENSOR_OPERAND, constructor)
+{
+ const nncc::core::ADT::tensor::Shape shape{1, 2};
+ const ann::TensorOperand operand{shape};
+
+ ASSERT_EQ(operand.dtype(), ann::DType::UNK);
+ ASSERT_EQ(operand.weight(), nullptr);
+ ASSERT_EQ(operand.shape(), shape);
+}
diff --git a/compiler/enco/core/src/ANN/IR/OperandID.h b/compiler/enco/core/src/ANN/IR/OperandID.h
new file mode 100644
index 000000000..f1617aacb
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperandID.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OPERAND_ID_H__
+#define __ANN_IR_OPERAND_ID_H__
+
+#include <cstdint>
+
+namespace ann
+{
+
+class OperandID
+{
+public:
+ OperandID() : _value{0}
+ {
+ // DO NOTHING
+ }
+
+public:
+ explicit OperandID(uint32_t value) : _value{value}
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t value(void) const { return _value; }
+
+private:
+ uint32_t _value;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_OPERAND_ID_H__
diff --git a/compiler/enco/core/src/ANN/IR/OperandID.test.cpp b/compiler/enco/core/src/ANN/IR/OperandID.test.cpp
new file mode 100644
index 000000000..04c23b9c8
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperandID.test.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperandID.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_OPERAND_ID, default_constructor)
+{
+ ann::OperandID id;
+
+ ASSERT_EQ(id.value(), 0);
+}
+
+TEST(ANN_IR_OPERAND_ID, explicit_constructor)
+{
+ ann::OperandID id{4};
+
+ ASSERT_EQ(id.value(), 4);
+}
diff --git a/compiler/enco/core/src/ANN/IR/OperandInventory.cpp b/compiler/enco/core/src/ANN/IR/OperandInventory.cpp
new file mode 100644
index 000000000..c7ad38811
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperandInventory.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ANN/IR/OperandInventory.h"
+
+#include <stdex/Memory.h>
+
+using stdex::make_unique;
+
+namespace ann
+{
+
+OperandID OperandInventory::create(const DType &dtype)
+{
+ uint32_t id = _operands.size();
+
+ auto operand = make_unique<ScalarOperand>();
+ operand->dtype(dtype);
+
+ _operands.emplace_back(std::move(operand));
+
+ return OperandID{id};
+}
+
+OperandID OperandInventory::create(const DType &dtype, const nncc::core::ADT::tensor::Shape &shape)
+{
+ uint32_t id = _operands.size();
+
+ auto operand = make_unique<TensorOperand>(shape);
+ operand->dtype(dtype);
+
+ _operands.emplace_back(std::move(operand));
+
+ return OperandID{id};
+}
+
+Operand *OperandInventory::at(const OperandID &id) { return _operands.at(id.value()).get(); }
+
+const Operand *OperandInventory::at(const OperandID &id) const
+{
+ return _operands.at(id.value()).get();
+}
+
+} // namespace ann
diff --git a/compiler/enco/core/src/ANN/IR/OperandInventory.h b/compiler/enco/core/src/ANN/IR/OperandInventory.h
new file mode 100644
index 000000000..23eb08119
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperandInventory.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OPERAND_INVENTORY_H__
+#define __ANN_IR_OPERAND_INVENTORY_H__
+
+#include "ANN/IR/OperandID.h"
+#include "ANN/IR/Operand.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <memory>
+#include <vector>
+
+namespace ann
+{
+
+class OperandInventory
+{
+public:
+ OperandID create(const DType &);
+ OperandID create(const DType &, const nncc::core::ADT::tensor::Shape &);
+
+public:
+ template <typename Callable> void each(Callable &&cb) const
+ {
+ for (uint32_t n = 0; n < _operands.size(); ++n)
+ {
+ cb(OperandID{n}, _operands.at(n).get());
+ }
+ }
+
+public:
+ Operand *at(const OperandID &id);
+ const Operand *at(const OperandID &id) const;
+
+private:
+ std::vector<std::unique_ptr<Operand>> _operands;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_OPERAND_INVENTORY_H__
diff --git a/compiler/enco/core/src/ANN/IR/OperandInventory.test.cpp b/compiler/enco/core/src/ANN/IR/OperandInventory.test.cpp
new file mode 100644
index 000000000..e576752bc
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperandInventory.test.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperandInventory.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_OPERAND_INVENTORY, constructor)
+{
+ ann::OperandInventory inven;
+
+ uint32_t count = 0;
+
+ inven.each([&](const ann::OperandID &, const ann::Operand *) { ++count; });
+
+ ASSERT_EQ(count, 0);
+}
diff --git a/compiler/enco/core/src/ANN/IR/Operation.def b/compiler/enco/core/src/ANN/IR/Operation.def
new file mode 100644
index 000000000..68fd394cf
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Operation.def
@@ -0,0 +1,17 @@
+#ifndef ANN_OPERATION
+#error Define ANN_OPERATION first
+#endif // ANN_OPERATION
+
+// ANN_OPERATION(TAG, ENUM_VALUE)
+ANN_OPERATION(ADD, ANEURALNETWORKS_ADD)
+ANN_OPERATION(MUL, ANEURALNETWORKS_MUL)
+ANN_OPERATION(CONV_2D, ANEURALNETWORKS_CONV_2D)
+ANN_OPERATION(DEPTHWISE_CONV_2D, ANEURALNETWORKS_DEPTHWISE_CONV_2D)
+ANN_OPERATION(MAX_POOL_2D, ANEURALNETWORKS_MAX_POOL_2D)
+ANN_OPERATION(AVG_POOL_2D, ANEURALNETWORKS_AVERAGE_POOL_2D)
+ANN_OPERATION(RELU, ANEURALNETWORKS_RELU)
+ANN_OPERATION(RELU6, ANEURALNETWORKS_RELU6)
+ANN_OPERATION(PAD, ANEURALNETWORKS_PAD)
+ANN_OPERATION(CONCAT, ANEURALNETWORKS_CONCATENATION)
+ANN_OPERATION(SUB, ANEURALNETWORKS_SUB)
+ANN_OPERATION(DIV, ANEURALNETWORKS_DIV)
diff --git a/compiler/enco/core/src/ANN/IR/Operation.h b/compiler/enco/core/src/ANN/IR/Operation.h
new file mode 100644
index 000000000..cacc2b794
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Operation.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OPERATION_H__
+#define __ANN_IR_OPERATION_H__
+
+#include "ANN/IR/OperandID.h"
+
+#include <initializer_list>
+#include <vector>
+
+namespace ann
+{
+
+class Operation
+{
+public:
+ enum class Code
+ {
+#define ANN_OPERATION(TAG, VALUE) TAG,
+#include "Operation.def"
+#undef ANN_OPERATION
+ };
+
+public:
+ Operation(const Code &code, std::initializer_list<OperandID> inputs,
+ std::initializer_list<OperandID> outputs)
+ : _code{code}, _inputs{inputs}, _outputs{outputs}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Code &code(void) const { return _code; }
+ const std::vector<OperandID> &inputs(void) const { return _inputs; }
+ const std::vector<OperandID> &outputs(void) const { return _outputs; }
+
+private:
+ Code _code;
+ std::vector<OperandID> _inputs;
+ std::vector<OperandID> _outputs;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_OPERATION_H__
diff --git a/compiler/enco/core/src/ANN/IR/Operation.test.cpp b/compiler/enco/core/src/ANN/IR/Operation.test.cpp
new file mode 100644
index 000000000..d1b716733
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Operation.test.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Operation.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_OPERATION, constructor)
+{
+ ann::Operation op{ann::Operation::Code::CONV_2D, {}, {}};
+
+ ASSERT_EQ(op.code(), ann::Operation::Code::CONV_2D);
+ ASSERT_EQ(op.inputs().size(), 0);
+ ASSERT_EQ(op.outputs().size(), 0);
+}
diff --git a/compiler/enco/core/src/ANN/IR/OperationInventory.cpp b/compiler/enco/core/src/ANN/IR/OperationInventory.cpp
new file mode 100644
index 000000000..37d48c170
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperationInventory.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationInventory.h"
+
+#include <stdex/Memory.h>
+
+using stdex::make_unique;
+
+namespace ann
+{
+
+void OperationInventory::create(Operation::Code code, std::initializer_list<OperandID> inputs,
+ std::initializer_list<OperandID> outputs)
+{
+ _operations.emplace_back(make_unique<Operation>(code, inputs, outputs));
+}
+
+} // namespace ann
diff --git a/compiler/enco/core/src/ANN/IR/OperationInventory.h b/compiler/enco/core/src/ANN/IR/OperationInventory.h
new file mode 100644
index 000000000..11c6be98a
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperationInventory.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OPERATION_INVENTORY_H__
+#define __ANN_IR_OPERATION_INVENTORY_H__
+
+#include "ANN/IR/Operation.h"
+#include "ANN/IR/OperandID.h"
+
+#include <initializer_list>
+
+#include <memory>
+
+namespace ann
+{
+
+class OperationInventory
+{
+public:
+ void create(Operation::Code code, std::initializer_list<OperandID> inputs,
+ std::initializer_list<OperandID> outputs);
+
+public:
+ uint32_t count(void) const { return _operations.size(); }
+
+public:
+ const Operation *at(uint32_t n) const { return _operations.at(n).get(); }
+
+private:
+ std::vector<std::unique_ptr<Operation>> _operations;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_OPERATION_INVENTORY_H__
diff --git a/compiler/enco/core/src/ANN/IR/OperationInventory.test.cpp b/compiler/enco/core/src/ANN/IR/OperationInventory.test.cpp
new file mode 100644
index 000000000..0e91a4f53
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OperationInventory.test.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationInventory.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_OPERATION_INVENTORY, constructor)
+{
+ ann::OperationInventory inven;
+
+ ASSERT_EQ(inven.count(), 0);
+}
+
+TEST(ANN_IR_OPERATION_INVENTORY, create)
+{
+ ann::OperationInventory inven;
+
+ inven.create(ann::Operation::Code::CONV_2D, {ann::OperandID{0}}, {ann::OperandID{3}});
+
+ ASSERT_EQ(inven.count(), 1);
+ ASSERT_NE(inven.at(0), nullptr);
+
+ ASSERT_EQ(inven.at(0)->code(), ann::Operation::Code::CONV_2D);
+ ASSERT_EQ(inven.at(0)->inputs().size(), 1);
+ ASSERT_EQ(inven.at(0)->outputs().size(), 1);
+}
diff --git a/compiler/enco/core/src/ANN/IR/OutputList.h b/compiler/enco/core/src/ANN/IR/OutputList.h
new file mode 100644
index 000000000..2dd891138
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/OutputList.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_OUTPUT_LIST_H__
+#define __ANN_IR_OUTPUT_LIST_H__
+
+#include "ANN/IR/OperandID.h"
+
+#include <vector>
+
+namespace ann
+{
+
+using OutputList = std::vector<OperandID>;
+
+} // namespace ann
+
+#endif // __ANN_IR_OUTPUT_LIST_H__
diff --git a/compiler/enco/core/src/ANN/IR/Weight.h b/compiler/enco/core/src/ANN/IR/Weight.h
new file mode 100644
index 000000000..062aa6d19
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Weight.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANN_IR_WEIGHT_H__
+#define __ANN_IR_WEIGHT_H__
+
+#include <vector>
+
+#include <cstdint>
+#include <type_traits>
+
+namespace ann
+{
+
+class Weight
+{
+public:
+ const uint8_t *base(void) const { return _buffer.data(); }
+ uint32_t size(void) const { return _buffer.size(); }
+
+public:
+ template <typename T> void fill(const T &value)
+ {
+ static_assert(std::is_arithmetic<T>::value, "T should be arithmetic");
+ _buffer.clear();
+
+ auto arr = reinterpret_cast<const uint8_t *>(&value);
+
+ for (uint32_t b = 0; b < sizeof(T); ++b)
+ {
+ _buffer.emplace_back(arr[b]);
+ }
+ }
+
+ template <typename It> void fill(It beg, It end)
+ {
+ _buffer.clear();
+
+ for (auto it = beg; it != end; ++it)
+ {
+ const auto value = *it;
+ auto arr = reinterpret_cast<const uint8_t *>(&value);
+
+ for (uint32_t b = 0; b < sizeof(value); ++b)
+ {
+ _buffer.emplace_back(arr[b]);
+ }
+ }
+ }
+
+private:
+ std::vector<uint8_t> _buffer;
+};
+
+} // namespace ann
+
+#endif // __ANN_IR_WEIGHT_H__
diff --git a/compiler/enco/core/src/ANN/IR/Weight.test.cpp b/compiler/enco/core/src/ANN/IR/Weight.test.cpp
new file mode 100644
index 000000000..53532114c
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/Weight.test.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Weight.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_WEIGHT, constructor)
+{
+ ann::Weight weight;
+
+ ASSERT_EQ(weight.base(), nullptr);
+ ASSERT_EQ(weight.size(), 0);
+}
+
+TEST(ANN_IR_WEIGHT, fill_scalar_int)
+{
+ ann::Weight weight;
+
+ weight.fill(3);
+
+ ASSERT_NE(weight.base(), nullptr);
+ ASSERT_EQ(*reinterpret_cast<const int *>(weight.base()), 3);
+}
+
+TEST(ANN_IR_WEIGHT, fill_vector_float)
+{
+ std::vector<float> values{1.0f, 2.0f};
+
+ ann::Weight weight;
+
+ weight.fill(values.begin(), values.end());
+
+ ASSERT_NE(weight.base(), nullptr);
+
+ auto arr = reinterpret_cast<const float *>(weight.base());
+
+ ASSERT_FLOAT_EQ(arr[0], 1.0f);
+ ASSERT_FLOAT_EQ(arr[1], 2.0f);
+}
diff --git a/compiler/enco/core/src/ANN/IR/WeightInventory.cpp b/compiler/enco/core/src/ANN/IR/WeightInventory.cpp
new file mode 100644
index 000000000..d8809ac08
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/WeightInventory.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WeightInventory.h"
+
+#include <stdex/Memory.h>
+
+using stdex::make_unique;
+
+namespace ann
+{
+
+Weight *WeightInventory::create(void)
+{
+ auto hnd = make_unique<Weight>();
+ auto ptr = hnd.get();
+ _weights.push_back(std::move(hnd));
+ return ptr;
+}
+
+} // namespace ann
diff --git a/compiler/enco/core/src/ANN/IR/WeightInventory.h b/compiler/enco/core/src/ANN/IR/WeightInventory.h
new file mode 100644
index 000000000..fd166837f
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/WeightInventory.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __WEIGHT_INVENTORY_H__
+#define __WEIGHT_INVENTORY_H__
+
+#include "ANN/IR/Weight.h"
+
+#include <memory>
+
+namespace ann
+{
+
+class WeightInventory
+{
+public:
+ Weight *create(void);
+
+private:
+ std::vector<std::unique_ptr<Weight>> _weights;
+};
+
+} // namespace ann
+
+#endif // __WEIGHT_INVENTORY_H__
diff --git a/compiler/enco/core/src/ANN/IR/WeightInventory.test.cpp b/compiler/enco/core/src/ANN/IR/WeightInventory.test.cpp
new file mode 100644
index 000000000..143bdfddf
--- /dev/null
+++ b/compiler/enco/core/src/ANN/IR/WeightInventory.test.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WeightInventory.h"
+
+#include <gtest/gtest.h>
+
+TEST(ANN_IR_WEIGHT_INVENTORY, create)
+{
+ ann::WeightInventory inven;
+
+ auto weight = inven.create();
+
+ ASSERT_EQ(weight->base(), nullptr);
+ ASSERT_EQ(weight->size(), 0);
+}
diff --git a/compiler/enco/core/src/AsmCode.cpp b/compiler/enco/core/src/AsmCode.cpp
new file mode 100644
index 000000000..70d6f30b3
--- /dev/null
+++ b/compiler/enco/core/src/AsmCode.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AsmCode.h"
+
+namespace enco
+{
+
+void AsmCode::dump(std::ostream &os) const
+{
+ os << ".section .rodata" << std::endl;
+ os << ".global " << _varname << std::endl;
+ // Please refer to https://www.sourceware.org/binutils/docs/as/Type.html#Type for details
+ os << ".type " << _varname << ", STT_OBJECT" << std::endl;
+ os << ".align " << 4 << std::endl;
+ os << _varname << ":" << std::endl;
+ os << ".incbin " << '"' << _filename << '"' << std::endl;
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/AsmCode.h b/compiler/enco/core/src/AsmCode.h
new file mode 100644
index 000000000..c43892888
--- /dev/null
+++ b/compiler/enco/core/src/AsmCode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_ASM_CODE_H__
+#define __ENCO_ASM_CODE_H__
+
+#include <ostream>
+#include <string>
+
+namespace enco
+{
+
+class AsmCode
+{
+public:
+ AsmCode(const std::string &filename, const std::string &varname)
+ : _filename{filename}, _varname{varname}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(std::ostream &) const;
+
+private:
+ std::string _filename;
+ std::string _varname;
+};
+
+} // namespace enco
+
+static inline std::ostream &operator<<(std::ostream &os, const enco::AsmCode &code)
+{
+ code.dump(os);
+ return os;
+}
+
+#endif // __ENCO_ASM_CODE_H__
diff --git a/compiler/enco/core/src/Backend.cpp b/compiler/enco/core/src/Backend.cpp
new file mode 100644
index 000000000..d4bec7447
--- /dev/null
+++ b/compiler/enco/core/src/Backend.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "enco/Backend.h"
+
+#include "IRValidator.h"
+
+#include "Session.h"
+#include "Pipeline.h"
+
+#include "Code.h"
+#include "AsmCode.h"
+#include "CppCode.h"
+
+#include "Transforms/Duplicate.h"
+#include "Transforms/FeatureUnification.h"
+#include "Transforms/AvgPoolLowering.h"
+#include "Transforms/IntrinsicSelection.h"
+#include "Transforms/DataLayoutConversion.h"
+#include "Transforms/IndirectCopyElimination.h"
+#include "Transforms/IdenticalObjectReduction.h"
+#include "Transforms/DuplicatedObjectReduction.h"
+#include "Transforms/DeadObjectElimination.h"
+#include "Transforms/ConstantFolding.h"
+#include "Transforms/CopyLowering.h"
+#include "Transforms/ConcatLowering.h"
+#include "Transforms/FreeInstrElimination.h"
+#include "Transforms/FreeOpElimination.h"
+#include "Transforms/DeadBagElimination.h"
+#include "Transforms/Optimizations.h"
+#include "Transforms/Split.h"
+#include "Transforms/GlobalDataGeneration.h"
+
+#include <stdex/Memory.h>
+
+#include <stdexcept>
+#include <iostream>
+#include <fstream>
+
+using stdex::make_unique;
+using namespace enco;
+
+namespace
+{
+
+// has_inout_bag(m) returns true if there is a pair of coco::Input and coco::Output that share
+// the same bag as their backing storage
+inline bool has_inout_bag(const coco::Module *m)
+{
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ if (bag->isInput() && bag->isOutput())
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+class BackendImpl final : public enco::Backend
+{
+public:
+ BackendImpl(const std::string &prefix) : _prefix{prefix}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void compile(coco::Module *m, coco::Data *d) override;
+
+private:
+ std::string _prefix;
+};
+
+void BackendImpl::compile(coco::Module *m, coco::Data *d)
+{
+ auto sess = make_session(m, d);
+
+ // validate if IR from frontend is correct
+ assert(validate(code(sess)));
+
+ enco::Pipeline pipeline;
+
+ // Configure pipeline
+
+ // As explained below, the current implementation does not work if there is a pair of input/output
+ // that share the same bag as their underlying bag.
+ //
+ // BagDuplicationPass creates a copy of such bags in order to eliminate such a pair.
+ pipeline.append(make_unique<BagDuplicationPass>());
+ pipeline.append(make_unique<FeatureUnificationPass>());
+ pipeline.append(make_unique<AvgPoolLoweringPass>());
+ pipeline.append(make_unique<IntrinsicSelectionPass>());
+ // Insert data ordering if necessary
+ pipeline.append(make_unique<DataLayoutConversionPass>());
+ pipeline.append(make_unique<IndirectCopyEliminationPass>());
+ pipeline.append(make_unique<IdenticalObjectReductionPass>());
+ pipeline.append(make_unique<DuplicatedObjectReductionPass>());
+ pipeline.append(make_unique<ConstantFoldingPass>());
+ // Eliminate dead object
+ //
+ // NOTE Dead Object Elimination (DOE) is performed before Copy lowering
+ // in order to reduce compilation overhead.
+ pipeline.append(make_unique<DeadObjectEliminationPass>());
+ // Lower Copy as Shuffle
+ pipeline.append(make_unique<CopyLoweringPass>());
+ // Lower ConcatF as Shuffle if it is not delegated to NNAPI yet
+ pipeline.append(make_unique<ConcatLoweringPass>());
+ pipeline.append(make_unique<BypassGenerationPass>());
+ pipeline.append(make_unique<FreeInstrEliminationPass>());
+ // NOTE Free Op Elimination should be applied after Free Instr Elimination
+ // - Free Instr Elimination may generate additional free Op(s)
+ pipeline.append(make_unique<FreeOpEliminationPass>());
+ pipeline.append(make_unique<DeadBagEliminationPass>());
+ // Split instructions into a set of phases (each block serves as a phase)
+ pipeline.append(make_unique<PhaseConstructionPass>());
+
+ // Apply transforms in the pipeline
+ for (uint32_t n = 0; n < pipeline.size(); ++n)
+ {
+ const auto &pass = pipeline.at(n);
+
+ pass.run(sess);
+ }
+
+ // The current implementation will assign memory region for each bag as follows:
+ // Bind input bag to the region provided by Network_input_bind
+ // Bind output bag to the region provided by Network_output_bind
+ // Bind intermediate bag to the region allocated during execution
+ //
+ // Note that this scheme does not work if there is a pair of input/output
+ // that share the same bag as their underlying bag
+ assert(!has_inout_bag(code(sess)->module()));
+
+ const std::string data_var = "data";
+ const std::string data_filename = _prefix + ".bin";
+
+ // Generate 'bin' file
+ {
+ std::ofstream ofs{data_filename, std::ios::binary};
+ generate_global_data(ofs, code(sess));
+ }
+
+ // Generate 'embed.S' file
+ {
+ std::ofstream ofs{_prefix + ".embed.S"};
+ ofs << AsmCode{data_filename, data_var};
+ }
+
+ // TODO Run various transforms over enco::Code
+
+ std::ofstream ofs{_prefix + ".cpp"};
+ ofs << CppCode{data_var, code(sess)} << std::endl;
+}
+
+} // namespace enco
+
+#include <iostream>
+
+std::unique_ptr<enco::Backend> make_backend(const cmdline::View &cmdline)
+{
+ return make_unique<::BackendImpl>(cmdline.at(0));
+}
diff --git a/compiler/enco/core/src/Code.h b/compiler/enco/core/src/Code.h
new file mode 100644
index 000000000..91756d5f8
--- /dev/null
+++ b/compiler/enco/core/src/Code.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CODE_H__
+#define __ENCO_CODE_H__
+
+#include "ANN/Context.h"
+
+#include <coco/IR/Module.h>
+#include <coco/IR/Data.h>
+
+namespace enco
+{
+
+struct Code
+{
+public:
+ Code(coco::Module *module, coco::Data *data) : _module{module}, _data{data}
+ {
+ // DO NOTHING
+ }
+
+public:
+ coco::Module *module(void) const { return _module; }
+ coco::Data *data(void) const { return _data; }
+
+private:
+ coco::Module *const _module;
+ coco::Data *const _data;
+};
+
+} // namespace enco
+
+#endif // __ENCO_CODE_H__
diff --git a/compiler/enco/core/src/Code.test.cpp b/compiler/enco/core/src/Code.test.cpp
new file mode 100644
index 000000000..8e96e4751
--- /dev/null
+++ b/compiler/enco/core/src/Code.test.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Code.h"
+
+#include <gtest/gtest.h>
+
+TEST(CODE, constructor)
+{
+ auto m = coco::Module::create();
+ auto d = coco::Data::create();
+
+ enco::Code code{m.get(), d.get()};
+
+ ASSERT_EQ(code.module(), m.get());
+ ASSERT_EQ(code.data(), d.get());
+}
diff --git a/compiler/enco/core/src/CodeIndex.h b/compiler/enco/core/src/CodeIndex.h
new file mode 100644
index 000000000..7f2da6463
--- /dev/null
+++ b/compiler/enco/core/src/CodeIndex.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CODE_INDEX_H__
+#define __CODE_INDEX_H__
+
+#include <coco/IR/Block.h>
+#include <coco/IR/Instr.h>
+
+/**
+ * @brief A CodeIndex denotes the index of instruction inside the whole module
+ */
+class CodeIndex
+{
+public:
+ CodeIndex() = default;
+
+public:
+ CodeIndex(const coco::BlockIndex &blk_ind, const coco::InstrIndex &ins_ind)
+ : _blk_ind{blk_ind}, _ins_ind{ins_ind}
+ {
+ }
+
+public:
+ const coco::BlockIndex &block(void) const { return _blk_ind; }
+ const coco::InstrIndex &instr(void) const { return _ins_ind; }
+
+private:
+ coco::BlockIndex _blk_ind;
+ coco::InstrIndex _ins_ind;
+};
+
+static inline coco::BlockIndex block_index(const coco::Block *blk)
+{
+ if (blk == nullptr)
+ {
+ return coco::BlockIndex{};
+ }
+
+ return blk->index();
+}
+
+static inline CodeIndex code_index(const coco::Instr *ins)
+{
+ return CodeIndex{block_index(ins->parent()), ins->index()};
+}
+
+static inline bool operator<(const CodeIndex &lhs, const CodeIndex &rhs)
+{
+ if (lhs.block() < rhs.block())
+ {
+ return true;
+ }
+
+ if (lhs.block().value() > rhs.block().value())
+ {
+ return false;
+ }
+
+ return lhs.instr() < rhs.instr();
+}
+
+#endif // __CODE_INDEX_H__
diff --git a/compiler/enco/core/src/CppCode.cpp b/compiler/enco/core/src/CppCode.cpp
new file mode 100644
index 000000000..aa5ef3156
--- /dev/null
+++ b/compiler/enco/core/src/CppCode.cpp
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CppCode.h"
+
+#include "Transforms/GlobalDataGeneration.h"
+#include "Transforms/Split.h"
+
+#include "CppGen/MemoryContext.h"
+
+#include "CppGen/Host.h"
+#include "CppGen/Subnet.h"
+
+#include "Dims.h"
+
+#include <pp/LinearDocument.h>
+#include <pp/MultiLineTextUtils.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <stdexcept>
+
+namespace
+{
+
+struct SubnetInfo
+{
+ std::string struct_name;
+ /// @brief The field name (in this subnet struct) of ANeuralNetworksCompilation value
+ std::string compilation_field;
+
+ /// @brief The field name (in Network struct) for this subnet
+ std::string field_name;
+};
+
+struct NetworkStruct
+{
+ pp::LinearDocument def;
+};
+
+struct InvokeFunction
+{
+ pp::LinearDocument head;
+ pp::LinearDocument body;
+ pp::LinearDocument tail{pp::LinearDocument::Direction::Reverse};
+
+public:
+ /** @brief Create a (fresh) local variable */
+ std::string local(void) { return pp::fmt("v_", ++_var_count); }
+
+private:
+ uint32_t _var_count = 0;
+};
+
+/**
+ * @brief Enumerate a set of Bag accessed by a given instruction
+ *
+ * Supported instruction:
+ * "Shuffle"
+ */
+class AccessedBagAccumulator : public coco::Instr::Visitor<void>
+{
+public:
+ AccessedBagAccumulator(std::set<coco::Bag *> *out) : _out{out}
+ {
+ // Validate "out"
+ assert(_out != nullptr);
+ }
+
+public:
+ void visit(const coco::Shuffle *shuffle) override
+ {
+ assert(shuffle->from() != nullptr);
+ assert(shuffle->into() != nullptr);
+
+ _out->insert(shuffle->from());
+ _out->insert(shuffle->into());
+ }
+
+private:
+ std::set<coco::Bag *> *_out;
+};
+
+/**
+ * @brief Return a set of bags that SHOULD have a host allocation
+ */
+std::set<coco::Bag *> hosted(const enco::Code *code)
+{
+ std::set<coco::Bag *> res;
+
+ auto m = code->module();
+ auto ann_ctx = enco::SubnetManager::context(m);
+
+ for (auto blk = m->block()->head(); blk; blk = blk->next())
+ {
+ if (auto ann_binder = ann_ctx->find(blk))
+ {
+ // Case: The current block is ANN-compatible
+
+ // Each ANN input SHOULD have a corresponding host allocation
+ for (uint32_t n = 0; n < ann_binder->module()->input()->size(); ++n)
+ {
+ res.insert(ann_binder->input(n));
+ }
+
+ // Each ANN output SHOULD have a corresponding host allocation
+ for (uint32_t n = 0; n < ann_binder->module()->output()->size(); ++n)
+ {
+ res.insert(ann_binder->output(n));
+ }
+ }
+ else
+ {
+ // Every bag that ANN-incompatible block accesses SHOULD have a corresponding host allocation
+ AccessedBagAccumulator acc{&res};
+
+ for (auto ins = blk->instr()->head(); ins; ins = ins->next())
+ {
+ ins->accept(acc);
+ }
+ }
+ }
+
+ return res;
+}
+} // namespace
+
+namespace enco
+{
+
+void CppCode::dump(std::ostream &os) const
+{
+ auto m = _code->module();
+ auto d = _code->data();
+ auto ann_ctx = enco::SubnetManager::context(m);
+
+ NetworkStruct network;
+ InvokeFunction invoke;
+ pp::LinearDocument internal;
+
+ auto data_exp = [this](const GlobalOffset &off) { return pp::fmt(_varname, " + ", off); };
+
+ // Record the subnet information
+ std::map<const ANNBinder *, SubnetInfo> subnet_ctx;
+
+ /**
+ * Create a struct for each android NN network of the following form:
+ *
+ * struct [Name]
+ * {
+ * ...
+ *
+ * [Name]() // constructor
+ * {
+ * ...
+ * }
+ *
+ * ~[Name]() // destructor
+ * {
+ * ...
+ * }
+ * };
+ *
+ */
+ for (uint32_t n = 0; n < ann_ctx->count(); ++n)
+ {
+ SubnetStructBuilder builder;
+
+ auto subnet_binder = ann_ctx->nth(n);
+ auto subnet_struct_name = pp::fmt("Subnet_", subnet_ctx.size());
+ auto subnet_field_name = pp::fmt("_subnet_", subnet_ctx.size());
+
+ // Create global data variable
+ auto emit_weight = [&](const ann::OperandID &, const ann::Operand *info) {
+ if (info->weight())
+ {
+ auto size = info->weight()->size();
+ auto off = enco::GlobalData::data_offset(info);
+ auto base_exp = pp::fmt("reinterpret_cast<const void *>(", data_exp(off), ")");
+ auto size_exp = pp::fmt(size);
+
+ builder.expr(info, base_exp, size_exp);
+ }
+ };
+ subnet_binder->module()->operand()->each(emit_weight);
+
+ auto subnet_struct_content = builder.build(subnet_binder);
+
+ // Emit C++ declaration
+ internal.append("struct ", subnet_struct_name);
+ internal.append("{");
+ internal.indent();
+
+ internal.append(subnet_struct_content->def());
+
+ internal.append(subnet_struct_name, "()");
+ internal.append("{");
+ internal.indent();
+ internal.append(subnet_struct_content->ctor());
+ internal.unindent();
+ internal.append("}");
+
+ internal.append("~", subnet_struct_name, "()");
+ internal.append("{");
+ internal.indent();
+ internal.append(subnet_struct_content->dtor());
+ internal.unindent();
+ internal.append("}");
+
+ internal.unindent();
+ internal.append("};");
+
+ // Declare subnet field
+ network.def.append(subnet_struct_name, " ", subnet_field_name, ";");
+
+ // Update subnet context
+ SubnetInfo subnet_info;
+
+ subnet_info.struct_name = subnet_struct_name;
+ subnet_info.compilation_field = subnet_struct_content->compilation();
+ subnet_info.field_name = subnet_field_name;
+
+ assert(subnet_ctx.find(subnet_binder) == subnet_ctx.end());
+ subnet_ctx[subnet_binder] = subnet_info;
+ }
+
+ MemoryContext mem;
+
+ // Set dedicated memory region for network inputs
+ for (uint32_t n = 0; n < m->input()->size(); ++n)
+ {
+ mem.base(m->input()->at(n)->bag(), pp::fmt("net->inputs[", n, "].ptr"));
+ mem.size(m->input()->at(n)->bag(), pp::fmt("net->inputs[", n, "].len"));
+ }
+
+ // Set dedicated memory region for network outputs
+ for (uint32_t n = 0; n < m->output()->size(); ++n)
+ {
+ mem.base(m->output()->at(n)->bag(), pp::fmt("net->outputs[", n, "].ptr"));
+ mem.size(m->output()->at(n)->bag(), pp::fmt("net->outputs[", n, "].len"));
+ }
+
+ // Set dedicated memory region for constant weight values
+ // TODO Support non-constant bags with initial values
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ if (!d->allocated(bag))
+ {
+ // Skip if no weight exists
+ continue;
+ }
+
+ // TODO Support non-float(fp32) weight
+ auto offset = enco::GlobalData::data_offset(bag);
+
+ auto base_expr = data_exp(offset);
+ auto size_expr = pp::fmt(bag->size() * sizeof(float));
+
+ mem.base(bag, base_expr);
+ mem.size(bag, size_expr);
+ }
+
+ // Set dedicated memory reigion for intermediate buffer(s)
+ for (const auto &bag : hosted(_code))
+ {
+ // Skip if a bag is already allocated
+ if (mem.member(bag))
+ {
+ continue;
+ }
+
+ auto name = invoke.local();
+
+ invoke.head.append("auto ", name, " = new uint8_t[", bag->size() * sizeof(float), "];");
+ invoke.tail.append("delete[] ", name, ";");
+
+ mem.base(bag, name);
+ mem.size(bag, pp::fmt(bag->size() * sizeof(float)));
+ }
+
+ // Create Code Block Builder
+ SubnetBlockCompiler subnet_compiler{mem};
+
+ for (auto it = subnet_ctx.begin(); it != subnet_ctx.end(); ++it)
+ {
+ // Specify how to access ANeuralNetworksCompilation
+ const auto &info = it->second;
+ subnet_compiler.bind(it->first, pp::fmt("net->", info.field_name, ".", info.compilation_field));
+ }
+
+ HostBlockCompiler host_compiler{mem};
+
+ for (auto blk = m->block()->head(); blk; blk = blk->next())
+ {
+ invoke.body.append("{");
+ invoke.body.indent();
+
+ if (auto binder = ann_ctx->find(blk))
+ {
+ // Generate code that invokes Android NN sub-network
+ auto lines = subnet_compiler.compile(binder);
+ invoke.body.append(*lines);
+ }
+ else
+ {
+ // Generate code on-the-fly for Android NN-incompatible blocks
+ auto lines = host_compiler.compile(blk);
+ invoke.body.append(*lines);
+ }
+
+ invoke.body.unindent();
+ invoke.body.append("}");
+ }
+
+ //
+ // Generate full C++ source code with code snippet
+ //
+ const std::string name{"Network"};
+
+ pp::LinearDocument includes;
+ {
+ // Include Android NN API header
+ includes.append("#include <NeuralNetworks.h>");
+ includes.append();
+
+ includes.append("#include <cstdint>");
+ includes.append("#include <cassert>");
+ includes.append("#include <array>");
+ }
+
+ pp::LinearDocument net_def;
+ {
+ net_def.append("struct ", name, " {");
+ net_def.indent();
+ net_def.append("struct Shape { uint32_t rank; const uint32_t *dims; };");
+ net_def.append("struct Input {");
+ net_def.indent();
+ net_def.append("const char *name;");
+ net_def.append("const uint8_t *ptr;");
+ net_def.append("unsigned len;");
+ net_def.append("Shape shape;");
+ net_def.unindent();
+ net_def.append("};");
+ net_def.append("struct Output {");
+ net_def.indent();
+ net_def.append("const char *name;");
+ net_def.append("uint8_t *ptr;");
+ net_def.append("unsigned len;");
+ net_def.append("Shape shape;");
+ net_def.unindent();
+ net_def.append("};");
+ net_def.append();
+ net_def.append(name, "();");
+ net_def.append("~", name, "();");
+
+ net_def.append();
+ net_def.append(network.def);
+ net_def.append();
+
+ net_def.append("std::array<Input, ", m->input()->size(), "> inputs;");
+ net_def.append("std::array<Output, ", m->output()->size(), "> outputs;");
+
+ net_def.unindent();
+ net_def.append("};");
+ }
+
+ pp::LinearDocument net_ctor;
+ {
+ net_ctor.append("Network::Network() {");
+ net_ctor.indent();
+
+ // Initialize input metadata
+ for (uint32_t n = 0; n < m->input()->size(); ++n)
+ {
+ auto input = m->input()->at(n);
+ auto dims = as_dims(input->shape());
+
+ auto name_off = enco::GlobalData::name_offset(input);
+ auto name_exp = pp::fmt("reinterpret_cast<const char *>(", data_exp(name_off), ")");
+ auto dims_off = enco::GlobalData::dims_offset(input);
+ auto dims_exp = pp::fmt("reinterpret_cast<const unsigned *>(", data_exp(dims_off), ")");
+
+ net_ctor.append("inputs.at(", n, ").name = ", name_exp, ";");
+ net_ctor.append("inputs.at(", n, ").shape.rank = ", dims.size(), ";");
+ net_ctor.append("inputs.at(", n, ").shape.dims = ", dims_exp, ";");
+ }
+
+ // Initialize output metadata
+ for (uint32_t n = 0; n < m->output()->size(); ++n)
+ {
+ auto output = m->output()->at(n);
+ auto dims = as_dims(output->shape());
+
+ auto name_off = enco::GlobalData::name_offset(output);
+ auto name_exp = pp::fmt("reinterpret_cast<const char *>(", data_exp(name_off), ")");
+ auto dims_off = enco::GlobalData::dims_offset(output);
+ auto dims_exp = pp::fmt("reinterpret_cast<const unsigned *>(", data_exp(dims_off), ")");
+
+ net_ctor.append("outputs.at(", n, ").name = ", name_exp, ";");
+ net_ctor.append("outputs.at(", n, ").shape.rank = ", dims.size(), ";");
+ net_ctor.append("outputs.at(", n, ").shape.dims = ", dims_exp, ";");
+ }
+
+ // TODO Implement this
+ net_ctor.unindent();
+ net_ctor.append("}");
+ }
+
+ pp::LinearDocument net_dtor;
+ {
+ net_dtor.append("Network::~Network() {");
+ net_dtor.indent();
+ // TODO Implement this
+ net_dtor.unindent();
+ net_dtor.append("}");
+ }
+
+ pp::LinearDocument source;
+
+ source.append(includes);
+ source.append();
+ source.append("extern uint8_t ", _varname, "[];");
+ source.append();
+
+ source.append("namespace");
+ source.append("{");
+ source.append(internal);
+ source.append("} // namespace");
+ source.append();
+ source.append(net_def);
+ source.append();
+ source.append(net_ctor);
+ source.append();
+ source.append(net_dtor);
+
+ source.append();
+ source.append(name, " *", name, "_construct() { return new ", name, "{}; }");
+ source.append("void ", name, "_destruct(", name, " *net) { delete net; }");
+
+ source.append();
+
+ // Emit Network_input_count function
+ source.append("unsigned ", name, "_input_count(const ", name, " *net) {");
+ source.indent();
+ source.append("return net->inputs.size();");
+ source.unindent();
+ source.append("}");
+
+ source.append();
+
+ // Emit Network_input_name function
+ source.append("const char *", name, "_input_name(const ", name, " *net, unsigned n) {");
+ source.indent();
+ source.append("return net->inputs.at(n).name;");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_input_rank function
+ source.append("unsigned ", name, "_input_rank(const ", name, " *net, unsigned n) {");
+ source.indent();
+ source.append("return net->inputs.at(n).shape.rank;");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_input_dim function
+ source.append("unsigned ", name, "_input_dim(const ", name, " *net, unsigned n, unsigned axe)");
+ source.append("{");
+ source.indent();
+ source.append("return net->inputs.at(n).shape.dims[axe];");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_input_bind function
+ source.append("void ", name, "_input_bind(", name,
+ " *net, unsigned n, const void *ptr, unsigned len) {");
+ source.indent();
+ source.append("net->inputs.at(n).ptr = reinterpret_cast<const uint8_t *>(ptr);");
+ source.append("net->inputs.at(n).len = len;");
+ source.unindent();
+ source.append("}");
+
+ source.append();
+
+ // Emit Network_output_count function
+ source.append("unsigned ", name, "_output_count(const ", name, " *net) {");
+ source.indent();
+ source.append("return net->outputs.size();");
+ source.unindent();
+ source.append("}");
+
+ source.append();
+
+ // Emit Network_output_name function
+ source.append("const char *", name, "_output_name(const ", name, " *net, unsigned n) {");
+ source.indent();
+ source.append("return net->outputs.at(n).name;");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_output_rank function
+ source.append("unsigned ", name, "_output_rank(const ", name, " *net, unsigned n) {");
+ source.indent();
+ source.append("return net->outputs.at(n).shape.rank;");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_output_dim function
+ source.append("unsigned ", name, "_output_dim(const ", name, " *net, unsigned n, unsigned axe)");
+ source.append("{");
+ source.indent();
+ source.append("return net->outputs.at(n).shape.dims[axe];");
+ source.unindent();
+ source.append("}");
+
+ // Emit Network_output_bind function
+ source.append("void ", name, "_output_bind(", name,
+ " *net, unsigned n, void *ptr, unsigned len) {");
+ source.indent();
+ source.append("net->outputs.at(n).ptr = reinterpret_cast<uint8_t *>(ptr);");
+ source.append("net->outputs.at(n).len = len;");
+ source.unindent();
+ source.append("}");
+
+ source.append();
+
+ source.append("void ", name, "_invoke(", name, " *net) {");
+ source.indent();
+ source.append(invoke.head);
+ source.append(invoke.body);
+ source.append(invoke.tail);
+ source.unindent();
+ source.append("}");
+
+ os << source;
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/CppCode.h b/compiler/enco/core/src/CppCode.h
new file mode 100644
index 000000000..c52ea1d5d
--- /dev/null
+++ b/compiler/enco/core/src/CppCode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CPP_CODE_H__
+#define __ENCO_CPP_CODE_H__
+
+#include "Code.h"
+
+#include <ostream>
+
+namespace enco
+{
+
+class CppCode
+{
+public:
+ CppCode(const std::string &varname, const Code *code) : _varname{varname}, _code{code}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(std::ostream &) const;
+
+private:
+ const std::string _varname;
+ const Code *_code;
+};
+
+} // namespace enco
+
+static inline std::ostream &operator<<(std::ostream &os, const enco::CppCode &code)
+{
+ code.dump(os);
+ return os;
+}
+
+#endif // __ENCO_CPP_CODE_H__
diff --git a/compiler/enco/core/src/CppGen/Host.cpp b/compiler/enco/core/src/CppGen/Host.cpp
new file mode 100644
index 000000000..37e0583d7
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/Host.cpp
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Host.h"
+
+#include <pp/EnclosedDocument.h>
+
+#include <stdex/Memory.h>
+
+#include <map>
+#include <string>
+
+namespace
+{
+
+/**
+ * @brief Data transfer between flat arrays
+ *
+ * Transfer(from, into) denotes the following C code:
+ * dst[into] = src[from];
+ */
+class Transfer
+{
+public:
+ Transfer(uint32_t from, uint32_t into) : _from{from}, _into{into}
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t from(void) const { return _from; }
+ uint32_t into(void) const { return _into; }
+
+private:
+ uint32_t _from;
+ uint32_t _into;
+};
+
+using TransferSequence = std::vector<Transfer>;
+
+/**
+ * @brief Convert Shuffle instruction as a sequence of data transfer
+ */
+TransferSequence as_transfer_sequence(const coco::Shuffle *shuffle)
+{
+ TransferSequence seq;
+
+ for (const auto &dst : shuffle->range())
+ {
+ const auto src = shuffle->at(dst);
+ seq.emplace_back(src.value(), dst.value());
+ }
+
+ return seq;
+}
+
+/**
+ * Given a sequence of N data transfers,
+ * find_loop tries to compute count, src_step, dst_step that satisfies
+ * the following properties:
+ *
+ * First, N should be a multiple of count.
+ * Below we refer to that multiplier as 'window' (= N / count)
+ *
+ * Second,
+ * for all n in [0, count),
+ * for all k in [0, window),
+ * from[n * window + k] == from[k] + src_step, and
+ * into[n * window + k] == into[k] + dst_step
+ */
+bool find_loop(TransferSequence::const_iterator beg, TransferSequence::const_iterator end,
+ uint32_t *p_count, uint32_t *p_src_step, uint32_t *p_dst_step)
+{
+ assert(p_count != nullptr);
+ assert(p_src_step != nullptr);
+ assert(p_dst_step != nullptr);
+
+ const uint32_t size = end - beg;
+
+ for (uint32_t window = 1; window <= size; ++window)
+ {
+ if (size % window != 0)
+ {
+ continue;
+ }
+
+ auto src_step_at = [&beg, window](uint32_t n) {
+ return (beg + n)->from() - (beg + n - window)->from();
+ };
+
+ auto dst_step_at = [&beg, window](uint32_t n) {
+ return (beg + n)->into() - (beg + n - window)->into();
+ };
+
+ const uint32_t count = size / window;
+ const uint32_t src_step = src_step_at(window);
+ const uint32_t dst_step = dst_step_at(window);
+
+ bool consistent = true;
+
+ for (uint32_t n = window + 1; n < size; ++n)
+ {
+ if ((src_step_at(n) != src_step) || (dst_step_at(n) != dst_step))
+ {
+ consistent = false;
+ break;
+ }
+ }
+
+ if (consistent)
+ {
+ *p_count = count;
+ *p_src_step = src_step;
+ *p_dst_step = dst_step;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * @brief Single transfer loop (a triple of count, source step, detination step)
+ */
+class TransferLoop
+{
+public:
+ class Step
+ {
+ public:
+ Step(uint32_t src, uint32_t dst) : _src{src}, _dst{dst}
+ {
+ // DO NOTHING
+ }
+
+ public:
+ uint32_t src(void) const { return _src; }
+ uint32_t dst(void) const { return _dst; }
+
+ private:
+ uint32_t _src;
+ uint32_t _dst;
+ };
+
+public:
+ TransferLoop(uint32_t count, uint32_t src_step, uint32_t dst_step)
+ : _count{count}, _step{src_step, dst_step}
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t count(void) const { return _count; }
+ const Step &step(void) const { return _step; }
+
+private:
+ uint32_t _count;
+ Step _step;
+};
+
+/**
+ * @brief Nested transfer loops
+ */
+using TransferNest = std::vector<TransferLoop>;
+
+/**
+ * @brief Construct nested transfer loop-nest that correponds to a given Shuffle instruction
+ */
+TransferNest as_nest(const TransferSequence &seq)
+{
+ TransferNest nest;
+
+ auto beg = seq.begin();
+ auto end = seq.end();
+
+ uint32_t window = end - beg;
+ uint32_t count = 0;
+ uint32_t src_step = 0;
+ uint32_t dst_step = 0;
+
+ while ((window > 1) && find_loop(beg, end, &count, &src_step, &dst_step))
+ {
+ assert(window % count == 0);
+
+ window /= count;
+ end = beg + window;
+
+ nest.emplace_back(count, src_step, dst_step);
+ }
+
+ return nest;
+};
+
+uint32_t loop_count(const TransferNest &nest)
+{
+ uint32_t count = 1;
+
+ for (const auto &loop : nest)
+ {
+ count *= loop.count();
+ }
+
+ return count;
+};
+
+class InstrPrinter : public coco::Instr::Visitor<pp::LinearDocument>
+{
+public:
+ InstrPrinter(const enco::MemoryContext &mem) : _mem(mem)
+ {
+ // DO NOTHING
+ }
+
+private:
+ pp::LinearDocument visit(const coco::Shuffle *shuffle) override
+ {
+ auto from = shuffle->from();
+ auto into = shuffle->into();
+
+ //
+ // Analyze 'Shuffle' pattern, and convert it as nested loops
+ //
+ auto tseq = as_transfer_sequence(shuffle);
+ auto nest = as_nest(tseq);
+ assert(tseq.size() % loop_count(nest) == 0);
+ uint32_t window = tseq.size() / loop_count(nest);
+
+ //
+ // Generate loop body
+ //
+ pp::EnclosedDocument loop_body;
+
+ auto var_at = [](uint32_t lv) { return pp::fmt("_", lv); };
+
+ for (uint32_t lv = 0; lv < nest.size(); ++lv)
+ {
+ auto var = var_at(lv);
+
+ loop_body.front().append("for (uint32_t ", var, " = 0; ", var, " < ", nest.at(lv).count(),
+ "; ++", var, ") {");
+ loop_body.front().indent();
+
+ loop_body.back().append("}");
+ loop_body.back().indent();
+ }
+
+ std::string src_index = "0";
+ std::string dst_index = "0";
+
+ for (uint32_t lv = 0; lv < nest.size(); ++lv)
+ {
+ src_index += pp::fmt(" + ", nest.at(lv).step().src(), " * ", var_at(lv));
+ dst_index += pp::fmt(" + ", nest.at(lv).step().dst(), " * ", var_at(lv));
+ }
+
+ for (uint32_t n = 0; n < window; ++n)
+ {
+ const auto src_base = pp::fmt("reinterpret_cast<const float *>(", _mem.base(from), ")");
+ const auto dst_base = pp::fmt("reinterpret_cast<float *>(", _mem.base(into), ")");
+
+ loop_body.front().append(dst_base, "[", dst_index, " + ", tseq.at(n).into(), "] = ", src_base,
+ "[", src_index, " + ", tseq.at(n).from(), "];");
+ }
+
+ pp::LinearDocument res;
+ res.append(loop_body);
+ return res;
+ }
+
+private:
+ const enco::MemoryContext &_mem;
+};
+
+} // namespace
+
+namespace enco
+{
+
+std::unique_ptr<pp::MultiLineText> HostBlockCompiler::compile(const coco::Block *blk) const
+{
+ InstrPrinter prn{_mem};
+
+ auto res = stdex::make_unique<pp::LinearDocument>();
+
+ for (auto ins = blk->instr()->head(); ins; ins = ins->next())
+ {
+ res->append(ins->accept(prn));
+ }
+
+ return std::move(res);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/CppGen/Host.h b/compiler/enco/core/src/CppGen/Host.h
new file mode 100644
index 000000000..0adb7fe1f
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/Host.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CPP_GEN_HOST_H__
+#define __ENCO_CPP_GEN_HOST_H__
+
+#include "CppGen/MemoryContext.h"
+
+#include <coco/IR.h>
+#include <pp/MultiLineText.h>
+
+namespace enco
+{
+
+/***
+ * @brief Generate C++ code that does not depend on Anroid NN API
+ */
+class HostBlockCompiler
+{
+public:
+ HostBlockCompiler(const enco::MemoryContext &mem) : _mem(mem)
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::unique_ptr<pp::MultiLineText> compile(const coco::Block *blk) const;
+
+private:
+ const enco::MemoryContext &_mem;
+};
+
+} // namespace enco
+
+#endif // __ENCO_CPP_GEN_HOST_H__
diff --git a/compiler/enco/core/src/CppGen/MemoryContext.cpp b/compiler/enco/core/src/CppGen/MemoryContext.cpp
new file mode 100644
index 000000000..e522968a8
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/MemoryContext.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MemoryContext.h"
+
+#include <cassert>
+
+namespace enco
+{
+
+bool MemoryContext::member(const coco::Bag *bag) const
+{
+ // NOTE _base and _size SHOULD BE consistent
+ if (_base.find(bag) != _base.end())
+ {
+ assert(_size.find(bag) != _size.end());
+ return true;
+ }
+
+ assert(_size.find(bag) == _size.end());
+ return false;
+}
+
+void MemoryContext::base(const coco::Bag *bag, const std::string &exp) { _base[bag] = exp; }
+void MemoryContext::size(const coco::Bag *bag, const std::string &exp) { _size[bag] = exp; }
+
+} // namespace enco
diff --git a/compiler/enco/core/src/CppGen/MemoryContext.h b/compiler/enco/core/src/CppGen/MemoryContext.h
new file mode 100644
index 000000000..99c20f3e8
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/MemoryContext.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CPP_GEN_MEMORY_CONTEXT_H__
+#define __ENCO_CPP_GEN_MEMORY_CONTEXT_H__
+
+#include <coco/IR/Bag.h>
+
+#include <string>
+#include <map>
+
+namespace enco
+{
+
+/**
+ * @brief Record C/C++ expression that denotes the base and size of memory region
+ * dedicated to each bag
+ */
+class MemoryContext
+{
+public:
+ /**
+ * @brief Check whether a base/size expression for a given bag
+ */
+ bool member(const coco::Bag *bag) const;
+
+public:
+ void base(const coco::Bag *bag, const std::string &exp);
+ void size(const coco::Bag *bag, const std::string &exp);
+
+public:
+ const std::string &base(const coco::Bag *bag) const { return _base.at(bag); }
+ const std::string &size(const coco::Bag *bag) const { return _size.at(bag); }
+
+private:
+ std::map<const coco::Bag *, std::string> _base;
+ std::map<const coco::Bag *, std::string> _size;
+};
+
+} // namespace enco
+
+#endif // __ENCO_CPP_GEN_MEMORY_CONTEXT_H__
diff --git a/compiler/enco/core/src/CppGen/Subnet.cpp b/compiler/enco/core/src/CppGen/Subnet.cpp
new file mode 100644
index 000000000..9a636c6ae
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/Subnet.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CppGen/Subnet.h"
+
+#include "Dims.h"
+#include "String.h"
+
+#include <pp/LinearDocument.h>
+
+#include <stdex/Memory.h>
+
+#include <sstream>
+
+using stdex::make_unique;
+using enco::concat;
+
+#define S(content) #content
+
+namespace ann
+{
+static std::ostream &operator<<(std::ostream &os, const ann::OperandID &id)
+{
+ os << id.value();
+ return os;
+}
+} // namespace ann
+
+namespace
+{
+
+class SubnetStructImpl final : public enco::SubnetStruct
+{
+public:
+ SubnetStructImpl() : _dtor{pp::LinearDocument::Direction::Reverse}
+ {
+ // DO NOTHING
+ }
+
+public:
+ std::string model(void) const override { return "_model"; }
+ std::string compilation(void) const override { return "_compilation"; }
+
+public:
+ const pp::MultiLineText &def(void) const override { return _def; }
+ pp::LinearDocument *def(void) { return &_def; }
+
+public:
+ const pp::MultiLineText &ctor(void) const override { return _ctor; }
+ pp::LinearDocument *ctor(void) { return &_ctor; }
+
+public:
+ const pp::MultiLineText &dtor(void) const override { return _dtor; }
+ pp::LinearDocument *dtor(void) { return &_dtor; }
+
+private:
+ pp::LinearDocument _def;
+ pp::LinearDocument _ctor;
+ pp::LinearDocument _dtor;
+};
+
+struct CodeFragment
+{
+ virtual ~CodeFragment() = default;
+
+ virtual void dump(pp::LinearDocument *) const = 0;
+};
+
+pp::LinearDocument *operator<<(pp::LinearDocument *doc, const CodeFragment &fragment)
+{
+ fragment.dump(doc);
+ return doc;
+}
+
+const char *scalar_operand_code(const ann::DType &dtype)
+{
+ switch (dtype)
+ {
+ case ann::DType::S32:
+ return "ANEURALNETWORKS_INT32";
+ default:
+ break;
+ };
+
+ throw std::invalid_argument("dtype");
+}
+
+const char *tensor_operand_code(const ann::DType &dtype)
+{
+ switch (dtype)
+ {
+ case ann::DType::S32:
+ return "ANEURALNETWORKS_TENSOR_INT32";
+ case ann::DType::F32:
+ return "ANEURALNETWORKS_TENSOR_FLOAT32";
+ default:
+ break;
+ };
+
+ throw std::invalid_argument("dtype");
+}
+
+class ScalarOperandDecl final : public CodeFragment
+{
+public:
+ ScalarOperandDecl(const std::string &model, const ann::DType &dtype)
+ : _model{model}, _dtype{dtype}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(pp::LinearDocument *doc) const override
+ {
+ doc->append("{");
+ doc->indent();
+ doc->append("ANeuralNetworksOperandType t;");
+ doc->append();
+ doc->append("t.type = ", scalar_operand_code(_dtype), ";");
+ doc->append("t.dimensionCount = 0;");
+ doc->append("t.dimensions = nullptr;");
+ doc->append("t.scale = 1.0f;");
+ doc->append("t.zeroPoint = 0;");
+ doc->append();
+ doc->append("ANeuralNetworksModel_addOperand(", _model, ", &t);");
+ doc->unindent();
+ doc->append("}");
+ }
+
+private:
+ std::string _model;
+ ann::DType _dtype;
+};
+
+class TensorOperandDecl final : public CodeFragment
+{
+public:
+ TensorOperandDecl(const std::string &model, const ann::DType &dtype,
+ const nncc::core::ADT::tensor::Shape &shape)
+ : _model{model}, _dtype{dtype}, _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(pp::LinearDocument *doc) const override
+ {
+ const auto rank = _shape.rank();
+ const auto dims = as_dims(_shape);
+
+ assert(rank == dims.size());
+
+ doc->append("{");
+ doc->indent();
+ doc->append("uint32_t d[", rank, "] = { ", concat(", ", dims.begin(), dims.end()), " };");
+ doc->append();
+ doc->append("ANeuralNetworksOperandType t;");
+ doc->append();
+ doc->append("t.type = ", tensor_operand_code(_dtype), ";");
+ doc->append("t.dimensionCount = ", rank, ";");
+ doc->append("t.dimensions = d;");
+ doc->append("t.scale = 1.0f;");
+ doc->append("t.zeroPoint = 0;");
+ doc->append();
+ doc->append("ANeuralNetworksModel_addOperand(", _model, ", &t);");
+ doc->unindent();
+ doc->append("}");
+ }
+
+private:
+ std::string _model;
+ ann::DType _dtype;
+ nncc::core::ADT::tensor::Shape _shape;
+};
+
+/**
+ * @brief Code fragment that calls ANeuralNetworksModel_setOperandValue
+ */
+class WeightDecl final : public CodeFragment
+{
+public:
+ WeightDecl(const std::string &model, const ann::OperandID &id, const std::string &base,
+ const std::string &size)
+ : _model{model}, _id{id}, _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(pp::LinearDocument *doc) const override
+ {
+ doc->append("ANeuralNetworksModel_setOperandValue(", _model, ", ", _id.value(), ", ", _base,
+ ", ", _size, ");");
+ }
+
+private:
+ std::string _model;
+ ann::OperandID _id;
+ std::string _base;
+ std::string _size;
+};
+
+/**
+ * @brief Code fragment that calls ANeuralNetworksModel_addOperation
+ */
+class OperationDecl final : public CodeFragment
+{
+public:
+ OperationDecl(const std::string &model, const ann::Operation *op) : _model{model}, _op{op}
+ {
+ // DO NOTHING
+ }
+
+private:
+ static std::string opcode(const ann::Operation::Code &code)
+ {
+ switch (code)
+ {
+#define ANN_OPERATION(TAG, ENUM) \
+ case ann::Operation::Code::TAG: \
+ return #ENUM;
+#include "ANN/IR/Operation.def"
+#undef ANN_OPERATION
+ default:
+ throw std::invalid_argument{"code"};
+ };
+ }
+
+public:
+ void dump(pp::LinearDocument *doc) const override
+ {
+ const auto in_count = _op->inputs().size();
+ auto in_beg = _op->inputs().begin();
+ auto in_end = _op->inputs().end();
+
+ const auto out_count = _op->outputs().size();
+ auto out_beg = _op->outputs().begin();
+ auto out_end = _op->outputs().end();
+
+ auto op = opcode(_op->code());
+
+ doc->append("{");
+ doc->indent();
+ doc->append("uint32_t inputs[", in_count, "] = { ", concat(", ", in_beg, in_end), " };");
+ doc->append("uint32_t outputs[", out_count, "] = { ", concat(", ", out_beg, out_end), " };");
+ doc->append();
+ doc->append("ANeuralNetworksModel_addOperation(", _model, ", ", op, ", ", in_count,
+ ", inputs, ", out_count, ", outputs);");
+ doc->unindent();
+ doc->append("}");
+ }
+
+private:
+ std::string _model;
+ const ann::Operation *_op;
+};
+
+/**
+ * @brief Code fragment that calls ANeuralNetworksModel_identifyInputsAndOutputs
+ */
+class ArgumentDecl final : public CodeFragment
+{
+public:
+ ArgumentDecl(const std::string &mname, const ANNBinder *binder) : _mname{mname}, _binder{binder}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void dump(pp::LinearDocument *doc) const override
+ {
+ doc->append("{");
+ doc->indent();
+
+ auto module = _binder->module();
+ const uint32_t input_count = module->input()->size();
+
+ doc->append("uint32_t inputs[", input_count, "];");
+ for (uint32_t n = 0; n < input_count; ++n)
+ {
+ doc->append("inputs[", n, "] = ", module->input()->at(n), ";");
+ }
+
+ const uint32_t output_count = module->output()->size();
+
+ doc->append("uint32_t outputs[", output_count, "];");
+ for (uint32_t n = 0; n < output_count; ++n)
+ {
+ doc->append("outputs[", n, "] = ", module->output()->at(n), ";");
+ }
+
+ doc->append("ANeuralNetworksModel_identifyInputsAndOutputs(", _mname, ", ", input_count,
+ ", inputs, ", output_count, ", outputs);");
+ doc->unindent();
+ doc->append("}");
+ }
+
+private:
+ std::string _mname;
+ const ANNBinder *_binder;
+};
+
+} // namespace
+
+namespace enco
+{
+
+std::unique_ptr<SubnetStruct> SubnetStructBuilder::build(const ANNBinder *binder) const
+{
+ auto res = make_unique<SubnetStructImpl>();
+
+ auto mname = res->model();
+ auto cname = res->compilation();
+
+ res->def()->append("ANeuralNetworksModel *", mname, ";");
+ res->def()->append("ANeuralNetworksCompilation *", cname, ";");
+
+ res->ctor()->append("ANeuralNetworksModel_create(&", mname, ");");
+ res->dtor()->append("ANeuralNetworksModel_free(", mname, ");");
+
+ binder->module()->operand()->each([&](const ann::OperandID &id, const ann::Operand *info) {
+ // TODO Remove dynamic cast
+ if (auto scalar = dynamic_cast<const ann::ScalarOperand *>(info))
+ {
+ res->ctor() << ScalarOperandDecl{mname, scalar->dtype()};
+ }
+ else if (auto tensor = dynamic_cast<const ann::TensorOperand *>(info))
+ {
+ res->ctor() << TensorOperandDecl{mname, tensor->dtype(), tensor->shape()};
+ }
+ else
+ {
+ throw std::runtime_error{"Unsupported"};
+ }
+
+ if (_weighted.find(info) != _weighted.end())
+ {
+ const auto &base_exp = _base_exprs.at(info);
+ const auto &size_exp = _size_exprs.at(info);
+
+ res->ctor() << WeightDecl{mname, id, base_exp, size_exp};
+ }
+ });
+
+ for (unsigned n = 0; n < binder->module()->operation()->count(); ++n)
+ {
+ auto op = binder->module()->operation()->at(n);
+ res->ctor() << OperationDecl{mname, op};
+ }
+
+ // Emit ANeuralNetworksModel_identifyInputsAndOutputs call
+ res->ctor() << ArgumentDecl{mname, binder};
+
+ // Emit ANeuralNetworksModel_finish call
+ res->ctor()->append("ANeuralNetworksModel_finish(", mname, ");");
+
+ // Create compilation
+ res->ctor()->append("ANeuralNetworksCompilation_create(", mname, ", &", cname, ");");
+ res->dtor()->append("ANeuralNetworksCompilation_free(", cname, ");");
+
+ // Finalize compilation
+ res->ctor()->append("ANeuralNetworksCompilation_finish(", cname, ");");
+
+ return std::move(res);
+}
+
+std::unique_ptr<pp::MultiLineText> SubnetBlockCompiler::compile(const ANNBinder *binder) const
+{
+ auto res = make_unique<pp::LinearDocument>();
+
+ const auto compilation = _compilation_ctx.at(binder);
+
+ res->append("ANeuralNetworksExecution *execution;");
+ res->append("ANeuralNetworksEvent *event;");
+ res->append();
+ res->append("ANeuralNetworksExecution_create(", compilation, ", &execution);");
+
+ // Emit ANeuralNetworksExecution_setInput call(s)
+ for (uint32_t n = 0; n < binder->module()->input()->size(); ++n)
+ {
+ auto bag = binder->input(n);
+ auto base = _mem.base(bag);
+ auto size = _mem.size(bag);
+
+ res->append("ANeuralNetworksExecution_setInput(execution, ", n, ", nullptr, ", base, ", ", size,
+ ");");
+ }
+
+ // Emit ANeuralNetworksExecution_setOutput call(s)
+ for (uint32_t n = 0; n < binder->module()->output()->size(); ++n)
+ {
+ auto bag = binder->output(n);
+ auto base = _mem.base(bag);
+ auto size = _mem.size(bag);
+
+ res->append("ANeuralNetworksExecution_setOutput(execution, ", n, ", nullptr, ", base, ", ",
+ size, ");");
+ }
+
+ res->append("ANeuralNetworksExecution_startCompute(execution, &event);");
+ res->append("ANeuralNetworksEvent_wait(event);");
+ res->append("ANeuralNetworksEvent_free(event);");
+
+ res->append("ANeuralNetworksExecution_free(execution);");
+
+ return std::move(res);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/CppGen/Subnet.h b/compiler/enco/core/src/CppGen/Subnet.h
new file mode 100644
index 000000000..4a5738876
--- /dev/null
+++ b/compiler/enco/core/src/CppGen/Subnet.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CPP_GEN_SUBNET_H__
+#define __ENCO_CPP_GEN_SUBNET_H__
+
+#include "ANN/Binder.h"
+#include "CppGen/MemoryContext.h"
+
+#include <pp/MultiLineText.h>
+#include <map>
+#include <set>
+
+namespace enco
+{
+
+/**
+ * @brief A C++ struct that provides Android NN model & compilation
+ */
+struct SubnetStruct
+{
+ virtual ~SubnetStruct() = default;
+
+ /// @brief Return the field name of ANeuralNetworksModel value
+ virtual std::string model(void) const = 0;
+ /// @brief Return the field name of ANeuralNetworksCompilatoin value
+ virtual std::string compilation(void) const = 0;
+
+ virtual const pp::MultiLineText &def(void) const = 0;
+ virtual const pp::MultiLineText &ctor(void) const = 0;
+ virtual const pp::MultiLineText &dtor(void) const = 0;
+};
+
+class SubnetStructBuilder
+{
+public:
+ std::unique_ptr<SubnetStruct> build(const ANNBinder *binder) const;
+
+public:
+ void expr(const ann::Operand *oper, const std::string &base, const std::string &size)
+ {
+ _weighted.insert(oper);
+ _base_exprs[oper] = base;
+ _size_exprs[oper] = size;
+ }
+
+private:
+ std::set<const ann::Operand *> _weighted;
+ std::map<const ann::Operand *, std::string> _base_exprs;
+ std::map<const ann::Operand *, std::string> _size_exprs;
+};
+
+/**
+ * @brief Generate C++ code that invokes Android NN subnet
+ */
+class SubnetBlockCompiler
+{
+public:
+ SubnetBlockCompiler(const enco::MemoryContext &mem) : _mem(mem)
+ {
+ // DO NOTHING
+ }
+
+public:
+ /// @brief Specify how to access ANeuralNetworksCompilation value (C expression)
+ void bind(const ANNBinder *binder, const std::string &exp) { _compilation_ctx[binder] = exp; }
+
+public:
+ std::unique_ptr<pp::MultiLineText> compile(const ANNBinder *binder) const;
+
+private:
+ const enco::MemoryContext &_mem;
+ std::map<const ANNBinder *, std::string> _compilation_ctx;
+};
+
+} // namespace enco
+
+#endif // __ENCO_CPP_GEN_SUBNET_H__
diff --git a/compiler/enco/core/src/Dims.h b/compiler/enco/core/src/Dims.h
new file mode 100644
index 000000000..e0a4fd44d
--- /dev/null
+++ b/compiler/enco/core/src/Dims.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DIMS_H__
+#define __DIMS_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+static inline std::vector<uint32_t> as_dims(const nncc::core::ADT::tensor::Shape &shape)
+{
+ std::vector<uint32_t> res;
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ res.emplace_back(shape.dim(axis));
+ }
+
+ return res;
+}
+
+#endif // __DIMS_H__
diff --git a/compiler/enco/core/src/IRUtils.cpp b/compiler/enco/core/src/IRUtils.cpp
new file mode 100644
index 000000000..59f6b0dbe
--- /dev/null
+++ b/compiler/enco/core/src/IRUtils.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IRUtils.h"
+
+#include <cassert>
+
+namespace enco
+{
+
+/**
+ * @brief Substitute all the USE occurrences of an object with another object
+ * @param from Object to be replaced
+ * @param into Object to be used instead
+ * NOTE This maybe used when something like -- 'from' will be removed so we need
+ * to replace object Consumers that use 'from' to 'into'
+ * EXAMPLE
+ * {
+ * subst(child, bigone);
+ * m->entity()->object()->destroy(child);
+ * }
+ * This code will change all the Consumers that use 'child' to 'bigone' and
+ * destroy the 'child' object.
+ */
+void subst(coco::Object *from, coco::Object *into)
+{
+ assert(from != into);
+
+ while (!from->uses()->empty())
+ {
+ auto use = *(from->uses()->begin());
+
+ use->value(into);
+ }
+}
+
+std::vector<coco::Instr *> instr_sequence(coco::Module *m)
+{
+ std::vector<coco::Instr *> res;
+
+ for (auto B = m->block()->head(); B; B = B->next())
+ {
+ for (auto I = B->instr()->head(); I; I = I->next())
+ {
+ res.emplace_back(I);
+ }
+ }
+
+ return res;
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/IRUtils.h b/compiler/enco/core/src/IRUtils.h
new file mode 100644
index 000000000..da0754303
--- /dev/null
+++ b/compiler/enco/core/src/IRUtils.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_IR_UTILS_H__
+#define __ENCO_IR_UTILS_H__
+
+#include <coco/IR.h>
+
+#include <vector>
+
+namespace enco
+{
+
+/**
+ * @brief Replace all the "USE" of 'from' with 'into'
+ *
+ * NOTE subst(from, into) WILL NOT update 'DEF'
+ */
+void subst(coco::Object *from, coco::Object *into);
+
+/**
+ * @brief Return instructions in execution order
+ */
+std::vector<coco::Instr *> instr_sequence(coco::Module *m);
+
+} // namespace enco
+
+#endif // __ENCO_IR_UTILS_H__
diff --git a/compiler/enco/core/src/IRValidator.cpp b/compiler/enco/core/src/IRValidator.cpp
new file mode 100644
index 000000000..1337b88e4
--- /dev/null
+++ b/compiler/enco/core/src/IRValidator.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IRValidator.h"
+
+#include <cassert>
+
+namespace enco
+{
+
+coco::FeatureShape output_shape(coco::Conv2D *conv2D)
+{
+ auto load = conv2D->arg()->asLoad();
+ assert(load);
+
+ auto ifm = load->object()->asFeature();
+ assert(ifm);
+
+ auto ker = conv2D->ker();
+ auto stride = conv2D->stride();
+ auto pad = conv2D->pad();
+
+ auto striding_width = ifm->shape().width() + pad->left() + pad->right() - ker->shape().width();
+ auto striding_height = ifm->shape().height() + pad->top() + pad->bottom() - ker->shape().height();
+
+ // Normally the formula is round(striding_width)/stride->horizontal.
+ // in coco IR, striding_width should be a multiple of stride->horizontal(), so round(...) was
+ // removed. So does striding_height.
+ assert(striding_width % stride->horizontal() == 0);
+ assert(striding_height % stride->vertical() == 0);
+
+ auto ofm_width = striding_width / stride->horizontal() + 1;
+ auto ofm_height = striding_height / stride->vertical() + 1;
+
+ return coco::FeatureShape(ifm->shape().batch(), ker->shape().count(), ofm_height, ofm_width);
+}
+
+bool validate_output_shape(Code *code)
+{
+ auto module = code->module();
+
+ // for each eval ( conv2d ( ... ) ), check the output shape of conv2D matches output of eval
+ for (auto blk = module->block()->head(); blk; blk = blk->next())
+ {
+ for (auto instr = blk->instr()->head(); instr; instr = instr->next())
+ {
+ auto eval = instr->asEval();
+ if (eval == nullptr)
+ continue;
+
+ auto op = eval->op();
+ if (!op->asConv2D())
+ continue;
+
+ auto conv2D = op->asConv2D();
+ auto expected_shape = output_shape(conv2D);
+
+ auto eval_out = eval->out()->asFeature();
+ assert(eval_out);
+
+ auto actual_shape = eval_out->shape();
+
+ if (actual_shape != expected_shape)
+ return false;
+ }
+ }
+ return true;
+}
+
+bool validate(Code *code) { return validate_output_shape(code); }
+
+} // namespace enco
diff --git a/compiler/enco/core/src/IRValidator.h b/compiler/enco/core/src/IRValidator.h
new file mode 100644
index 000000000..f4adb0a5e
--- /dev/null
+++ b/compiler/enco/core/src/IRValidator.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_IR_VALIDATOR_H__
+#define __ENCO_IR_VALIDATOR_H__
+
+#include "Code.h"
+
+namespace enco
+{
+
+bool validate(Code *code);
+
+} // namespace enco
+
+#endif // __ENCO_IR_VALIDATOR_H__
diff --git a/compiler/enco/core/src/IRValidator.test.cpp b/compiler/enco/core/src/IRValidator.test.cpp
new file mode 100644
index 000000000..14cda6173
--- /dev/null
+++ b/compiler/enco/core/src/IRValidator.test.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IRValidator.h"
+
+#include "Code.h"
+
+#include <gtest/gtest.h>
+
+#include <array>
+
+namespace
+{
+
+using IntList4 = std::array<int, 4>;
+using IntList2 = std::array<int, 2>;
+
+} // namespace
+
+// The layout of ifm, ker, ofm is NHWC, pad == {top, bottom, left, right}, and stride == {vertical,
+// horizontal}.
+std::unique_ptr<coco::Module> get_conv2D(IntList4 ifm, IntList4 ker, IntList4 ofm, IntList4 pad,
+ IntList2 stride)
+{
+ auto module = coco::Module::create();
+ auto block = module->entity()->block()->create();
+ auto eval = module->entity()->instr()->create<coco::Eval>();
+ auto load = module->entity()->op()->create<coco::Load>();
+ auto conv2D = module->entity()->op()->create<coco::Conv2D>();
+
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+ coco::FeatureShape ifm_shape(ifm[0], ifm[3], ifm[1], ifm[2]);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(ifm_shape));
+
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+ coco::FeatureShape ofm_shape(ofm[0], ofm[3], ofm[1], ofm[2]);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_shape));
+
+ auto ker_obj = module->entity()->object()->create<coco::KernelObject>();
+ nncc::core::ADT::kernel::Shape ker_shape(ker[0], ker[3], ker[1], ker[2]);
+ ker_obj->layout(coco::KernelLayouts::NHWC::create(ker_shape));
+
+ // linking entities
+ module->block()->append(block);
+ block->instr()->append(eval);
+ eval->op(conv2D);
+ eval->out(ofm_obj);
+ load->object(ifm_obj);
+ conv2D->ker(ker_obj);
+ conv2D->arg(load);
+
+ // param setting
+ conv2D->pad()->top(pad[0]).bottom(pad[1]).left(pad[2]).right(pad[3]);
+ conv2D->stride()->vertical(stride[0]).horizontal(stride[1]);
+
+ return std::move(module);
+}
+
+TEST(IRValidatorTest, conv2D_simple)
+{
+ auto ifm_nhwc = IntList4{1, 3, 3, 2};
+ auto ker_nhwc = IntList4{1, 1, 1, 2};
+ auto ofm_nhwc = IntList4{1, 3, 3, 1};
+
+ auto pad_tblr = IntList4{0, 0, 0, 0};
+ auto stride_vh = IntList2{1, 1};
+
+ auto module = get_conv2D(ifm_nhwc, ker_nhwc, ofm_nhwc, pad_tblr, stride_vh);
+ enco::Code code{module.get(), nullptr};
+
+ ASSERT_TRUE(enco::validate(&code));
+}
+
+TEST(IRValidatorTest, conv2D_stride_2)
+{
+ auto ifm_nhwc = IntList4{1, 4, 4, 3};
+ auto ker_nhwc = IntList4{2, 2, 2, 3};
+ auto ofm_nhwc = IntList4{1, 3, 3, 2};
+
+ auto pad_tblr = IntList4{1, 1, 1, 1};
+ auto stride_vh = IntList2{2, 2};
+
+ auto module = get_conv2D(ifm_nhwc, ker_nhwc, ofm_nhwc, pad_tblr, stride_vh);
+ enco::Code code{module.get(), nullptr};
+
+ ASSERT_TRUE(enco::validate(&code));
+}
+
+TEST(IRValidatorTest, conv2D_output_batch_check)
+{
+ auto ifm_nhwc = IntList4{1, 2, 2, 2};
+ auto ker_nhwc = IntList4{3, 1, 1, 2}; // expected output depth is 3
+ auto ofm_nhwc = IntList4{1, 2, 2, 1}; // but 1
+
+ auto pad_tblr = IntList4{0, 0, 0, 0};
+ auto stride_vh = IntList2{1, 1};
+
+ auto module = get_conv2D(ifm_nhwc, ker_nhwc, ofm_nhwc, pad_tblr, stride_vh);
+ enco::Code code{module.get(), nullptr};
+
+ ASSERT_FALSE(enco::validate(&code));
+}
+
+TEST(IRValidatorTest, conv2D_wrong_HW)
+{
+ auto ifm_nhwc = IntList4{1, 2, 2, 1};
+ auto ker_nhwc = IntList4{1, 2, 2, 1};
+ auto ofm_nhwc = IntList4{1, 1, 1, 1}; // HW should be 2, 2
+
+ auto pad_tblr = IntList4{1, 1, 1, 1};
+ auto stride_vh = IntList2{2, 2};
+
+ auto module = get_conv2D(ifm_nhwc, ker_nhwc, ofm_nhwc, pad_tblr, stride_vh);
+ enco::Code code{module.get(), nullptr};
+
+ ASSERT_FALSE(enco::validate(&code));
+}
diff --git a/compiler/enco/core/src/Pass.h b/compiler/enco/core/src/Pass.h
new file mode 100644
index 000000000..d78cfaad3
--- /dev/null
+++ b/compiler/enco/core/src/Pass.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_PASS_H__
+#define __ENCO_PASS_H__
+
+#include "Session.h"
+
+#include <string>
+
+namespace enco
+{
+
+class Pass
+{
+public:
+ class Name
+ {
+ public:
+ Name(const std::string &content) : _content{content}
+ {
+ // DO NOTHING
+ }
+
+ Name(const Name &) = default;
+ Name(Name &&) = default;
+
+ ~Name() = default;
+
+ public:
+ const std::string &content(void) const { return _content; }
+
+ private:
+ std::string _content;
+ };
+
+public:
+ Pass(const Name &name) : _name{name}
+ {
+ // DO NOTHING
+ }
+
+ Pass(const Pass &) = delete;
+ Pass(Pass &&) = delete;
+
+ virtual ~Pass() = default;
+
+public:
+ const Name &name(void) const { return _name; }
+
+public:
+ virtual void run(const SessionID &) const = 0;
+
+private:
+ Name _name;
+};
+
+static inline Pass::Name pass_name(const std::string &name) { return Pass::Name{name}; }
+
+} // namespace enco
+
+#define PASS_CTOR(NAME) \
+ NAME() : enco::Pass { enco::pass_name(#NAME) }
+
+#endif // __ENCO_PASS_H__
diff --git a/compiler/enco/core/src/Pass.test.cpp b/compiler/enco/core/src/Pass.test.cpp
new file mode 100644
index 000000000..112bd7478
--- /dev/null
+++ b/compiler/enco/core/src/Pass.test.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pass.h"
+
+#include <gtest/gtest.h>
+
+namespace
+{
+
+struct ExamplePass final : public enco::Pass
+{
+ PASS_CTOR(ExamplePass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const enco::SessionID &) const override { return; }
+};
+
+} // namespace
+
+TEST(PASS, ctor)
+{
+ ExamplePass pass;
+
+ ASSERT_EQ(pass.name().content(), "ExamplePass");
+}
diff --git a/compiler/enco/core/src/Pipeline.h b/compiler/enco/core/src/Pipeline.h
new file mode 100644
index 000000000..8ab43c16a
--- /dev/null
+++ b/compiler/enco/core/src/Pipeline.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_PIPELINE_H__
+#define __ENCO_PIPELINE_H__
+
+#include "Pass.h"
+
+#include <memory>
+#include <vector>
+#include <cstdint>
+
+namespace enco
+{
+
+class Pipeline
+{
+public:
+ uint32_t size(void) const { return _passes.size(); }
+
+public:
+ const Pass &at(uint32_t n) const { return *(_passes.at(n)); }
+
+public:
+ void append(std::unique_ptr<Pass> &&pass) { _passes.emplace_back(std::move(pass)); }
+
+private:
+ std::vector<std::unique_ptr<Pass>> _passes;
+};
+
+} // namespace enco
+
+#endif // __ENCO_PIPELINE_H__
diff --git a/compiler/enco/core/src/Pipeline.test.cpp b/compiler/enco/core/src/Pipeline.test.cpp
new file mode 100644
index 000000000..1cd730e98
--- /dev/null
+++ b/compiler/enco/core/src/Pipeline.test.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pipeline.h"
+
+#include <gtest/gtest.h>
+
+TEST(PIPELINE, default_ctor)
+{
+ enco::Pipeline pipeline;
+
+ ASSERT_EQ(pipeline.size(), 0);
+}
diff --git a/compiler/enco/core/src/Session.cpp b/compiler/enco/core/src/Session.cpp
new file mode 100644
index 000000000..034f23892
--- /dev/null
+++ b/compiler/enco/core/src/Session.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Session.h"
+
+#include <stdex/Memory.h>
+
+#include <map>
+#include <memory>
+
+using stdex::make_unique;
+
+namespace
+{
+
+std::map<enco::SessionID, std::unique_ptr<enco::Code>> sess_to_code;
+std::map<const coco::Module *, enco::SessionID> module_to_sess;
+std::map<const coco::Data *, enco::SessionID> data_to_sess;
+
+} // namespace
+
+namespace enco
+{
+
+SessionID make_session(coco::Module *m, coco::Data *d)
+{
+ static uint32_t sess = 0;
+ SessionID curr{sess++};
+
+ sess_to_code[curr] = make_unique<Code>(m, d);
+ module_to_sess[m] = curr;
+ data_to_sess[d] = curr;
+
+ return curr;
+}
+
+SessionID session(const coco::Module *m) { return module_to_sess.at(m); }
+SessionID session(const coco::Data *d) { return data_to_sess.at(d); }
+
+coco::Module *module(const SessionID &sess) { return sess_to_code.at(sess)->module(); }
+coco::Data *data(const SessionID &sess) { return sess_to_code.at(sess)->data(); }
+
+Code *code(const SessionID &sess) { return sess_to_code.at(sess).get(); }
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Session.h b/compiler/enco/core/src/Session.h
new file mode 100644
index 000000000..b6d502f3b
--- /dev/null
+++ b/compiler/enco/core/src/Session.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_SESSION_H__
+#define __ENCO_SESSION_H__
+
+#include "Code.h"
+
+namespace enco
+{
+
+// TODO Rewrite this definition
+using SessionID = uint32_t;
+
+SessionID make_session(coco::Module *m, coco::Data *d);
+
+SessionID session(const coco::Module *m);
+SessionID session(const coco::Data *d);
+
+coco::Module *module(const SessionID &);
+coco::Data *data(const SessionID &);
+
+static inline coco::Module *module(const coco::Data *d) { return module(session(d)); }
+static inline coco::Data *data(const coco::Module *m) { return data(session(m)); }
+
+// WARN This API is introduced just for backward compatibility
+// Do NOT use this anymore as it will be removed
+Code *code(const SessionID &);
+
+} // namespace enco
+
+#endif // __ENCO_SESSION_H__
diff --git a/compiler/enco/core/src/String.h b/compiler/enco/core/src/String.h
new file mode 100644
index 000000000..0f04f1ffe
--- /dev/null
+++ b/compiler/enco/core/src/String.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_STRING_H__
+#define __ENCO_STRING_H__
+
+//
+// String-manipulating routines
+//
+#include <ostream>
+#include <sstream>
+
+#include <string>
+
+namespace enco
+{
+
+template <typename It> void concat(std::ostream &os, const std::string &sep, It beg, It end)
+{
+ uint32_t count = 0;
+
+ for (auto it = beg; it != end; ++it, ++count)
+ {
+ if (count == 0)
+ {
+ os << *it;
+ }
+ else
+ {
+ os << sep << *it;
+ }
+ }
+}
+
+template <typename It> std::string concat(const std::string &sep, It beg, It end)
+{
+ std::stringstream ss;
+ concat(ss, sep, beg, end);
+ return ss.str();
+}
+
+} // namespace enco
+
+#endif // __ENCO_STRING_H__
diff --git a/compiler/enco/core/src/Support/Debugging.cpp b/compiler/enco/core/src/Support/Debugging.cpp
new file mode 100644
index 000000000..bd65a27d8
--- /dev/null
+++ b/compiler/enco/core/src/Support/Debugging.cpp
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Debugging.h"
+
+#include <pp/LinearDocument.h>
+#include <pp/MultiLineTextUtils.h>
+
+#include <stack>
+
+#include <iostream>
+
+#define DEBUGGING_API_P(NAME, TYPE, VAR) \
+ static void _##NAME(const TYPE *); \
+ void NAME(long p) { NAME(reinterpret_cast<const TYPE *>(p)); } \
+ void NAME(const TYPE *p) \
+ { \
+ if (p == nullptr) \
+ { \
+ std::cout << "(nullptr)" << std::endl; \
+ } \
+ else \
+ { \
+ _##NAME(p); \
+ } \
+ } \
+ void _##NAME(const TYPE *VAR)
+
+namespace
+{
+
+class SectionBuilder
+{
+public:
+ SectionBuilder(const std::string &tag) : _tag{tag}
+ {
+ // DO NOTHING
+ }
+
+public:
+ template <typename Callback> pp::LinearDocument build(Callback cb) const
+ {
+ pp::LinearDocument res;
+
+ res.append(_tag, " {");
+ res.indent();
+
+ cb(res);
+
+ res.unindent();
+ res.append("}");
+
+ return res;
+ }
+
+private:
+ std::string _tag;
+};
+
+template <typename Callback>
+pp::LinearDocument operator<<(const SectionBuilder &builder, Callback cb)
+{
+ return builder.build(std::forward<Callback>(cb));
+}
+
+SectionBuilder section(const std::string &tag) { return SectionBuilder{tag}; }
+}
+
+/**
+ * SECTION: Bag
+ */
+namespace
+{
+
+pp::LinearDocument describe(const coco::Bag *bag)
+{
+ pp::LinearDocument doc;
+
+ doc.append("addr: ", bag);
+ doc.append("size: ", bag->size());
+ // TODO Print Read
+ // TODO Print Update
+ // TODO Print Dep
+ return doc;
+}
+
+} // namespace
+
+DEBUGGING_API_P(enco_dump_all_bags, coco::Module, m)
+{
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+ assert(bag != nullptr);
+
+ auto set = [bag](pp::LinearDocument &doc) { doc.append(describe(bag)); };
+ auto desc = section("bag").build(set);
+
+ std::cout << desc << std::endl;
+ }
+}
+
+/**
+ * SECTION: Object
+ */
+namespace
+{
+std::string op_kind(const coco::Op *op);
+
+/**
+ * @brief Return the def(producer) type of object
+ */
+std::string def_kind(const coco::Def *def)
+{
+ if (def)
+ {
+ if (auto instr = dynamic_cast<coco::Instr *>(def->producer()))
+ {
+ std::stringstream ss;
+
+ if (auto eval = instr->asEval())
+ {
+ ss << op_kind(eval->op()) << "(" << instr << ")";
+ return ss.str();
+ }
+ else if (instr->asCopy())
+ {
+ ss << "Copy(" << instr << ")";
+ return ss.str();
+ }
+ else if (instr->asShuffle())
+ {
+ ss << "Shuffle(" << instr << ")";
+ return ss.str();
+ }
+ }
+ else
+ {
+ return "(unknown)";
+ }
+ }
+
+ return "(none)";
+}
+
+pp::LinearDocument describe(const coco::Object *obj)
+{
+ pp::LinearDocument doc;
+
+ doc.append("addr: ", obj);
+ doc.append("bag: ", obj->bag());
+ doc.append("producer: ", def_kind(obj->def()));
+ // TODO Show Uses
+ // TODO Show FeatureObject/KernelObect info
+
+ return doc;
+}
+
+} // namespace
+
+DEBUGGING_API_P(enco_dump_all_objects, coco::Module, m)
+{
+ for (uint32_t n = 0; n < m->entity()->object()->size(); ++n)
+ {
+ auto obj = m->entity()->object()->at(n);
+ assert(obj != nullptr);
+
+ auto set = [obj](pp::LinearDocument &doc) { doc.append(describe(obj)); };
+ auto desc = section("object").build(set);
+
+ std::cout << desc << std::endl;
+ }
+}
+
+/**
+ * SECTION: Op
+ */
+namespace
+{
+
+struct OpTree
+{
+public:
+ OpTree(const coco::Op *op) : _op{op}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const coco::Op *root(void) const { return _op; }
+
+private:
+ const coco::Op *_op;
+};
+
+std::string op_kind(const coco::Op *op)
+{
+ struct OpKind : public coco::Op::Visitor<std::string>
+ {
+ std::string visit(const coco::Load *) override { return "Load"; }
+ std::string visit(const coco::Conv2D *) override { return "Conv2D"; }
+ std::string visit(const coco::MaxPool2D *) override { return "MaxPool2D"; }
+ std::string visit(const coco::AvgPool2D *) override { return "AvgPool2D"; }
+ std::string visit(const coco::PadF *) override { return "PadF"; }
+ std::string visit(const coco::ReLU *) override { return "ReLU"; }
+ std::string visit(const coco::Add *) override { return "Add"; }
+ std::string visit(const coco::Mul *) override { return "Mul"; }
+ std::string visit(const coco::ConcatF *) override { return "ConcatF"; }
+ std::string visit(const coco::Sub *) override { return "Sub"; }
+ std::string visit(const coco::Sqrt *) override { return "Sqrt"; }
+ std::string visit(const coco::Div *) override { return "Div"; }
+ };
+
+ OpKind v;
+
+ return op->accept(v);
+}
+
+pp::LinearDocument describe(const coco::Padding2D *pad)
+{
+ pp::LinearDocument doc;
+
+ doc.append("top: ", pad->top());
+ doc.append("bottom: ", pad->bottom());
+ doc.append("left: ", pad->left());
+ doc.append("right: ", pad->right());
+
+ return doc;
+}
+
+pp::LinearDocument describe(const coco::Stride2D *stride)
+{
+ pp::LinearDocument doc;
+
+ doc.append("vertical: ", stride->vertical());
+ doc.append("horizontal ", stride->horizontal());
+
+ return doc;
+}
+
+pp::LinearDocument describe(const coco::Conv2D *conv)
+{
+ pp::LinearDocument doc;
+
+ doc.append("arg: ", conv->arg());
+ doc.append("ker: ", conv->ker());
+ doc.append("group: ", conv->group());
+
+ if (auto pad = conv->pad())
+ {
+ auto set = [pad](pp::LinearDocument &doc) { doc.append(describe(pad)); };
+ auto desc = section("pad").build(set);
+ doc.append(desc);
+ }
+
+ if (auto stride = conv->stride())
+ {
+ auto set = [stride](pp::LinearDocument &doc) { doc.append(describe(stride)); };
+ auto desc = section("stride").build(set);
+ doc.append(desc);
+ }
+
+ return doc;
+}
+
+pp::LinearDocument describe(const coco::Op *op)
+{
+ pp::LinearDocument doc;
+
+ doc.append("addr: ", op);
+ doc.append("kind: ", op_kind(op));
+ doc.append("parent(instr): ", op->parent());
+ doc.append("up(op): ", op->up());
+
+ if (auto conv = op->asConv2D())
+ {
+ auto set = [conv](pp::LinearDocument &doc) { doc.append(describe(conv)); };
+ auto desc = section("conv2d").build(set);
+ doc.append(desc);
+ }
+ else if (auto load = op->asLoad())
+ {
+ auto set = [load](pp::LinearDocument &doc) { doc.append(describe(load->object())); };
+ auto desc = section("load").build(set);
+ doc.append(desc);
+ }
+
+ return doc;
+}
+
+pp::LinearDocument describe(const OpTree &t, bool verbose = false)
+{
+ pp::LinearDocument doc;
+
+ struct Frame
+ {
+ public:
+ Frame(const coco::Op *op) : _op{op}, _indicator{0}
+ {
+ // op SHOULD BE valid
+ assert(_op != nullptr);
+ }
+
+ public:
+ /**
+ * @brief Return a pointer to coco::Op of interest
+ */
+ const coco::Op *op(void) const { return _op; }
+
+ /**
+ * @brief Return the indicator
+ *
+ * Let's assume that the arity of a coco::Op of interest is N
+ * INDICATOR 0 -> Print the op itself
+ * INDICATOR 1 -> Print the first argument
+ * ...
+ * INDICATOR N -> Print the N-th argument
+ * INDICATOR N + 1 -> Done
+ */
+ uint32_t indicator(void) const { return _indicator; }
+
+ public:
+ void advance(void) { _indicator += 1; }
+
+ private:
+ const coco::Op *_op;
+ uint32_t _indicator;
+ };
+
+ std::stack<Frame> stack;
+
+ stack.emplace(t.root());
+
+ while (stack.size() > 0)
+ {
+ auto op = stack.top().op();
+ uint32_t indicator = stack.top().indicator();
+
+ if (indicator == 0)
+ {
+ doc.append(op_kind(op), " (", op, ")");
+
+ doc.indent();
+ stack.top().advance();
+
+ // TODO Need to update it to better design for verbose flag
+ if (verbose)
+ {
+ auto set = [op](pp::LinearDocument &doc) { doc.append(describe(op)); };
+ auto desc = section("op").build(set);
+ doc.append(desc);
+ }
+ }
+ else if (indicator < op->arity() + 1)
+ {
+ stack.top().advance();
+ stack.emplace(op->arg(indicator - 1));
+ }
+ else
+ {
+ assert(indicator == op->arity() + 1);
+ doc.unindent();
+ stack.pop();
+ }
+ }
+
+ return doc;
+}
+
+} // namespace
+
+DEBUGGING_API_P(enco_dump_op, coco::Op, op)
+{
+ {
+ std::cout << describe(op) << std::endl;
+ }
+}
+
+DEBUGGING_API_P(enco_dump_op_tree, coco::Op, op)
+{
+ {
+ std::cout << describe(OpTree(op)) << std::endl;
+ }
+}
+
+DEBUGGING_API_P(enco_dump_all_ops, coco::Module, m)
+{
+ SectionBuilder section_builder{"op"};
+
+ for (uint32_t n = 0; n < m->entity()->op()->size(); ++n)
+ {
+ auto op = m->entity()->op()->at(n);
+ assert(op != nullptr);
+
+ auto desc = section("op").build([op](pp::LinearDocument &doc) { doc.append(describe(op)); });
+
+ std::cout << desc << std::endl;
+ }
+}
+
+/**
+ * SECTION: Instr
+ */
+namespace
+{
+
+std::string kind(const coco::Instr *ins)
+{
+ struct InstrKind : public coco::Instr::Visitor<std::string>
+ {
+ std::string visit(const coco::Eval *) override { return "Eval"; }
+ std::string visit(const coco::Copy *) override { return "Copy"; }
+ std::string visit(const coco::Shuffle *) override { return "Shuffle"; }
+ };
+
+ InstrKind v;
+
+ return ins->accept(v);
+}
+
+pp::LinearDocument describe(const coco::Instr *ins, bool verbose = false)
+{
+ pp::LinearDocument doc;
+
+ doc.append("addr: ", ins);
+ doc.append("kind: ", kind(ins));
+ doc.append("parent: ", ins->parent());
+
+ // TODO Need to update it to better design for verbose flag
+ if (verbose)
+ {
+ if (auto eval = ins->asEval())
+ {
+ auto optset = [eval, verbose](pp::LinearDocument &doc) {
+ doc.append(describe(OpTree(eval->op()), verbose));
+ };
+ auto optdesc = section("op").build(optset);
+ doc.append(optdesc);
+
+ auto outset = [eval](pp::LinearDocument &doc) { doc.append(describe(eval->out())); };
+ auto outdesc = section("out").build(outset);
+ doc.append(outdesc);
+ }
+ else if (auto copy = ins->asCopy())
+ {
+ auto from = [copy](pp::LinearDocument &doc) { doc.append(describe(copy->from())); };
+ auto into = [copy](pp::LinearDocument &doc) { doc.append(describe(copy->into())); };
+
+ auto fdesc = section("from").build(from);
+ doc.append(fdesc);
+
+ auto idesc = section("into").build(into);
+ doc.append(idesc);
+ }
+ }
+
+ return doc;
+}
+
+} // namespace
+
+DEBUGGING_API_P(enco_dump_all_instrs, coco::Module, m)
+{
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ auto ins = m->entity()->instr()->at(n);
+ assert(ins != nullptr);
+
+ auto setter = [ins](pp::LinearDocument &doc) { doc.append(describe(ins)); };
+ auto desc = section("instr").build(setter);
+
+ std::cout << desc << std::endl;
+ }
+}
+
+DEBUGGING_API_P(enco_dump_all_instrs_v, coco::Module, m)
+{
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ auto ins = m->entity()->instr()->at(n);
+ assert(ins != nullptr);
+
+ auto setter = [ins](pp::LinearDocument &doc) { doc.append(describe(ins, true)); };
+ auto desc = section("instr").build(setter);
+
+ std::cout << desc << std::endl;
+ }
+}
+
+DEBUGGING_API_P(enco_dump_instr, coco::Instr, ins)
+{
+ auto setter = [ins](pp::LinearDocument &doc) { doc.append(describe(ins, true)); };
+ auto desc = section("instr").build(setter);
+
+ std::cout << desc << std::endl;
+}
+
+/**
+ * SECTION: Block
+ */
+namespace
+{
+
+pp::LinearDocument describe(const coco::Block *blk)
+{
+ pp::LinearDocument doc;
+
+ for (auto ins = blk->instr()->head(); ins; ins = ins->next())
+ {
+ auto setter = [ins](pp::LinearDocument &doc) { doc.append(describe(ins)); };
+ auto desc = section("instr").build(setter);
+ doc.append(desc);
+ }
+
+ return doc;
+}
+
+} // namespace
+
+DEBUGGING_API_P(enco_dump_block, coco::Block, blk) { std::cout << describe(blk) << std::endl; }
diff --git a/compiler/enco/core/src/Support/Debugging.h b/compiler/enco/core/src/Support/Debugging.h
new file mode 100644
index 000000000..c28356e76
--- /dev/null
+++ b/compiler/enco/core/src/Support/Debugging.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Debugging.h
+ * @brief This file includes various interactive debugging helpers
+ */
+
+#ifndef __ENCO_SUPPORT_DEBUGGING_H__
+#define __ENCO_SUPPORT_DEBUGGING_H__
+
+#include <coco/IR.h>
+
+static_assert(sizeof(long) == sizeof(void *), "sizeof(long) == sizeof(pointer)");
+
+/**
+ * Debugging API with a single pointer argument
+ */
+#define DEBUGGING_API_P(NAME, TYPE) \
+ void NAME(const TYPE *); \
+ void NAME(long);
+
+/**
+ * Print the details of all the allocated coco::Bag in coco::Module
+ *
+ * (gdb) call enco_dump_all_bags(bag->module())
+ * (gdb) call enco_dump_all_bags(0x...)
+ */
+DEBUGGING_API_P(enco_dump_all_bags, coco::Module);
+
+/**
+ * Print the details of all the allocated coco::Object in coco::Module
+ *
+ * (gdb) call enco_dump_all_objects(obj->module())
+ * (gdb) call enco_dump_all_objects(0x...)
+ */
+DEBUGGING_API_P(enco_dump_all_objects, coco::Module);
+
+/**
+ * Print the details of coco::Op
+ *
+ * (gdb) call enco_dump_op(op)
+ * (gdb) call enco_dump_op(0x....)
+ */
+DEBUGGING_API_P(enco_dump_op, coco::Op);
+
+/**
+ * Print the (simplified) tree layout of coco::Op
+ *
+ * (gdb) call enco_dump_op_tree(op)
+ * (gdb) call enco_dump_op_tree(0x....)
+ */
+DEBUGGING_API_P(enco_dump_op_tree, coco::Op);
+
+/**
+ * Print the details of all the allocated coco::Op in coco::Module
+ *
+ * (gdb) call enco_dump_all_ops(op->module())
+ * (gdb) call enco_dump_all_ops(0x....)
+ */
+DEBUGGING_API_P(enco_dump_all_ops, coco::Module);
+
+/**
+ * Print the details of all the allocated coco::Instr in coco::Module
+ *
+ * (gdb) call enco_dump_all_instrs(instr->module())
+ * (gdb) call enco_dump_all_instrs(0x...)
+ */
+DEBUGGING_API_P(enco_dump_all_instrs, coco::Module);
+
+/**
+ * Print the more details of all the allocated coco::Instr in coco::Module
+ *
+ * (gdb) call enco_dump_all_instrs_v(instr->module())
+ * (gdb) call enco_dump_all_instrs_v(0x...)
+ */
+DEBUGGING_API_P(enco_dump_all_instrs_v, coco::Module);
+
+/**
+ * Print the details of a given coco::Instr
+ *
+ * (gdb) call enco_dump_instr(instr)
+ * (gdb) call enco_dump_instr(0x...)
+ */
+DEBUGGING_API_P(enco_dump_instr, coco::Instr);
+
+/**
+ * Print the details of all the instruction in a given block
+ *
+ * (gdb) call enco_dump_block(b)
+ * (gdb) call enco_dump_block(0x...)
+ */
+DEBUGGING_API_P(enco_dump_block, coco::Block);
+
+#undef DEBUGGING_API_P
+
+#endif // __ENCO_SUPPORT_DEBUGGING_H__
diff --git a/compiler/enco/core/src/Support/Debugging.test.cpp b/compiler/enco/core/src/Support/Debugging.test.cpp
new file mode 100644
index 000000000..49a2ad162
--- /dev/null
+++ b/compiler/enco/core/src/Support/Debugging.test.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Debugging.h"
+
+#include <gtest/gtest.h>
+
+// This test aims to check whether debugging API is actually defined
+TEST(DebuggingTest, defined)
+{
+ enco_dump_op(nullptr);
+ enco_dump_all_ops(nullptr);
+}
diff --git a/compiler/enco/core/src/Transforms/AvgPoolLowering.cpp b/compiler/enco/core/src/Transforms/AvgPoolLowering.cpp
new file mode 100644
index 000000000..17502fb1f
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/AvgPoolLowering.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AvgPoolLowering.h"
+#include "IRUtils.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/feature/Shape.h>
+#include <nncc/core/ADT/feature/HWCLayout.h>
+
+#include <set>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using nncc::core::ADT::feature::num_elements;
+
+namespace
+{
+
+bool empty(coco::Padding2D *pad)
+{
+ return (pad->top() == 0) && (pad->bottom() == 0) && (pad->left() == 0) && (pad->right() == 0);
+}
+
+/**
+ * @brief Return a set of AvgPool2D operations (in Eval instruction) that SHOULD be lowered
+ */
+std::set<coco::AvgPool2D *> candidates(coco::Module *m)
+{
+ std::set<coco::AvgPool2D *> res;
+
+ for (auto I : enco::instr_sequence(m))
+ {
+ if (auto eval = I->asEval())
+ {
+ if (auto avgpool = eval->op()->asAvgPool2D())
+ {
+ /* Originally it was preferred to use `auto load = avgpool->arg()->asLoad()' for
+ * consitent style with other if statements.
+ * Someone may think compiler will be happy because `load` in `if` statement can
+ * be considered as a use, however, it turend out that it is not the case.
+ */
+ if (avgpool->arg()->asLoad())
+ {
+ if (avgpool->divisor() == coco::AvgPool2D::Divisor::Static)
+ {
+ res.insert(avgpool);
+ }
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace
+{
+namespace ShapeTransform
+{
+
+class Pad
+{
+public:
+ Pad(const coco::Padding2D *pad) : _pad{pad}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /// @brief Return the expected OFM shape for a given IFM shape
+ feature::Shape forward(const feature::Shape &ifm_shape) const
+ {
+ const uint32_t OFM_C = ifm_shape.depth();
+ const uint32_t OFM_H = ifm_shape.height() + _pad->top() + _pad->bottom();
+ const uint32_t OFM_W = ifm_shape.width() + _pad->left() + _pad->right();
+
+ return feature::Shape{OFM_C, OFM_H, OFM_W};
+ }
+
+private:
+ const coco::Padding2D *_pad;
+};
+
+} // namespace ShapeTransform
+
+ShapeTransform::Pad shape_xform(const coco::Padding2D *pad) { return ShapeTransform::Pad{pad}; }
+
+} // namespace
+
+namespace
+{
+
+class PadInstrBuilder final
+{
+public:
+ PadInstrBuilder(const coco::Padding2D *pad) : _pad{pad}
+ {
+ // DO NOTHING
+ }
+
+public:
+ coco::Instr *build(coco::FeatureObject *ifm_obj, coco::FeatureObject *ofm_obj) const
+ {
+ assert(ifm_obj->module() == ofm_obj->module());
+ auto m = ifm_obj->module();
+ assert(m != nullptr);
+
+ auto load_op = m->entity()->op()->create<coco::Load>();
+
+ load_op->object(ifm_obj);
+
+ auto pad_op = m->entity()->op()->create<coco::PadF>();
+
+ pad_op->arg(load_op);
+
+ pad_op->pad()->top(_pad->top());
+ pad_op->pad()->bottom(_pad->bottom());
+ pad_op->pad()->left(_pad->left());
+ pad_op->pad()->right(_pad->right());
+
+ auto pad_instr = m->entity()->instr()->create<coco::Eval>();
+
+ pad_instr->out(ofm_obj);
+ pad_instr->op(pad_op);
+
+ return pad_instr;
+ }
+
+private:
+ const coco::Padding2D *_pad;
+};
+
+PadInstrBuilder pad_instr_builder(const coco::Padding2D *pad) { return PadInstrBuilder{pad}; }
+
+} // namespace
+
+namespace
+{
+
+class AvgPoolRewritePass
+{
+private:
+ void runOnModule(coco::Module *m) const;
+
+public:
+ void runOnCode(enco::Code *) const;
+};
+
+void AvgPoolRewritePass::runOnModule(coco::Module *m) const
+{
+ // Lower AvgPool2D op that resides in Eval instruction
+ for (auto avgpool : candidates(m))
+ {
+ auto ins = avgpool->parent();
+ auto load = avgpool->arg()->asLoad();
+
+ assert(ins != nullptr);
+ assert(load != nullptr);
+ assert(avgpool->divisor() == coco::AvgPool2D::Divisor::Static);
+
+ if (empty(avgpool->pad()))
+ {
+ // NOTE If there is no padding, Static and PaddingExcluded schemes are equivalent
+ avgpool->divisor(coco::AvgPool2D::Divisor::PaddingExcluded);
+ }
+ else
+ {
+ // Before: Static AvgPool2D with Padding
+ // After: PadF; PaddingExcluded AvgPool2D without Padding
+
+ // Create PadF
+ auto ifm_obj = load->object()->asFeature();
+ assert(ifm_obj != nullptr);
+
+ auto pad_shape = shape_xform(avgpool->pad()).forward(ifm_obj->shape());
+ auto pad_bag = m->entity()->bag()->create(num_elements(pad_shape));
+ auto pad_obj = m->entity()->object()->create<coco::FeatureObject>();
+
+ pad_obj->bag(pad_bag);
+ pad_obj->layout(coco::FeatureLayouts::BHWC::create(pad_shape));
+
+ auto pad_instr = pad_instr_builder(avgpool->pad()).build(ifm_obj, pad_obj);
+
+ // Insert PadF before AvgPool2D
+ pad_instr->insertBefore(ins);
+
+ // Rewrite AvgPool2D as PaddingExcluded AvgPool2D without Padding
+ load->object(pad_obj);
+
+ avgpool->divisor(coco::AvgPool2D::Divisor::PaddingExcluded);
+ avgpool->pad()->top(0);
+ avgpool->pad()->bottom(0);
+ avgpool->pad()->left(0);
+ avgpool->pad()->right(0);
+ }
+ }
+}
+
+void AvgPoolRewritePass::runOnCode(enco::Code *code) const { runOnModule(code->module()); }
+
+} // namespace
+
+namespace enco
+{
+
+void lower_avgpool(enco::Code *code)
+{
+ AvgPoolRewritePass pass;
+ pass.runOnCode(code);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/AvgPoolLowering.h b/compiler/enco/core/src/Transforms/AvgPoolLowering.h
new file mode 100644
index 000000000..71a5253df
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/AvgPoolLowering.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __REWRITE_H__
+#define __REWRITE_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Rewrite NN API-incompatible average pooling
+ */
+void lower_avgpool(enco::Code *);
+
+struct AvgPoolLoweringPass final : public Pass
+{
+ PASS_CTOR(AvgPoolLoweringPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { lower_avgpool(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __REWRITE_H__
diff --git a/compiler/enco/core/src/Transforms/ConcatLowering.cpp b/compiler/enco/core/src/Transforms/ConcatLowering.cpp
new file mode 100644
index 000000000..bf613c983
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/ConcatLowering.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CopyLowering.h"
+#include "IRUtils.h"
+
+#include <nncc/core/ADT/tensor/IndexEnumerator.h>
+
+#include <set>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace
+{
+
+inline uint32_t as_tensor_axis(const coco::ConcatF::Axis &axis)
+{
+ switch (axis)
+ {
+ case coco::ConcatF::Axis::Batch:
+ return 0;
+ case coco::ConcatF::Axis::Depth:
+ return 1;
+ case coco::ConcatF::Axis::Height:
+ return 2;
+ case coco::ConcatF::Axis::Width:
+ return 3;
+ default:
+ break;
+ };
+
+ throw std::invalid_argument{"axis is unknown value"};
+}
+
+tensor::Shape as_tensor_shape(const coco::FeatureLayout *l)
+{
+ assert(l != nullptr);
+
+ tensor::Shape res;
+
+ res.resize(4);
+
+ res.dim(as_tensor_axis(coco::ConcatF::Axis::Batch)) = l->batch();
+ res.dim(as_tensor_axis(coco::ConcatF::Axis::Depth)) = l->depth();
+ res.dim(as_tensor_axis(coco::ConcatF::Axis::Height)) = l->height();
+ res.dim(as_tensor_axis(coco::ConcatF::Axis::Width)) = l->width();
+
+ return res;
+}
+
+coco::ElemID as_element_index(const coco::FeatureLayout *l, const tensor::Index &idx)
+{
+ assert(l != nullptr);
+ assert(idx.rank() == 4);
+
+ const auto b = idx.at(as_tensor_axis(coco::ConcatF::Axis::Batch));
+ const auto ch = idx.at(as_tensor_axis(coco::ConcatF::Axis::Depth));
+ const auto row = idx.at(as_tensor_axis(coco::ConcatF::Axis::Height));
+ const auto col = idx.at(as_tensor_axis(coco::ConcatF::Axis::Width));
+
+ return l->at(b, ch, row, col);
+}
+
+std::set<coco::Eval *> candidates(coco::Module *m)
+{
+ std::set<coco::Eval *> res;
+
+ for (auto ins : enco::instr_sequence(m))
+ {
+ if (auto eval = ins->asEval())
+ {
+ if (eval->op()->asConcatF())
+ {
+ res.insert(eval);
+ }
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void lower_concat(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (auto eval : candidates(m))
+ {
+ auto concat_f = eval->op()->asConcatF();
+ assert(concat_f != nullptr);
+
+ auto left_feature = concat_f->left()->asLoad()->object()->asFeature();
+ assert(left_feature != nullptr);
+ auto left_shape = as_tensor_shape(left_feature->layout());
+
+ auto right_feature = concat_f->right()->asLoad()->object()->asFeature();
+ assert(right_feature != nullptr);
+ auto right_shape = as_tensor_shape(right_feature->layout());
+
+ auto out_feature = eval->out()->asFeature();
+ assert(out_feature != nullptr);
+ auto out_shape = as_tensor_shape(out_feature->layout());
+
+ auto concat_axe = as_tensor_axis(concat_f->axis());
+
+ // Lower: Left -> Output
+ {
+ auto src_feature = left_feature;
+ auto src_shape = left_shape;
+
+ auto ins = m->entity()->instr()->create<coco::Shuffle>();
+
+ assert(src_feature->bag() != nullptr);
+ assert(out_feature->bag() != nullptr);
+
+ ins->from(src_feature->bag());
+ ins->into(out_feature->bag());
+
+ for (tensor::IndexEnumerator e{src_shape}; e.valid(); e.advance())
+ {
+ tensor::Index src_index = e.current();
+ tensor::Index out_index = e.current();
+
+ auto from = as_element_index(src_feature->layout(), src_index);
+ auto into = as_element_index(out_feature->layout(), out_index);
+
+ ins->insert(from, into);
+ }
+
+ ins->insertAfter(eval);
+ }
+
+ // Lower: Right -> Output
+ {
+ auto src_feature = right_feature;
+ auto src_shape = right_shape;
+
+ auto ins = m->entity()->instr()->create<coco::Shuffle>();
+
+ assert(src_feature->bag() != nullptr);
+ assert(out_feature->bag() != nullptr);
+
+ ins->from(src_feature->bag());
+ ins->into(out_feature->bag());
+
+ for (tensor::IndexEnumerator e{src_shape}; e.valid(); e.advance())
+ {
+ tensor::Index src_index = e.current();
+ tensor::Index out_index = e.current();
+
+ out_index.at(concat_axe) = out_index.at(concat_axe) + left_shape.dim(concat_axe);
+
+ auto from = as_element_index(src_feature->layout(), src_index);
+ auto into = as_element_index(out_feature->layout(), out_index);
+
+ ins->insert(from, into);
+ }
+
+ ins->insertAfter(eval);
+ }
+
+ // Unlink "Eval" and "ConcatF" op tree
+ eval->op(nullptr);
+
+ // Delete "Concat" op tree
+ m->entity()->op()->destroy(concat_f->left());
+ m->entity()->op()->destroy(concat_f->right());
+ m->entity()->op()->destroy(concat_f);
+
+ // Deatch "Eval" instruction from the block
+ eval->detach();
+
+ // Delete "Eval" instruction
+ m->entity()->instr()->destroy(eval);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/ConcatLowering.h b/compiler/enco/core/src/Transforms/ConcatLowering.h
new file mode 100644
index 000000000..5d20e627b
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/ConcatLowering.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_CONCAT_LOWERING_H__
+#define __ENCO_CONCAT_LOWERING_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Lower eval(Concat(...)) as a sequence of shuffle instructions
+ */
+void lower_concat(enco::Code *code);
+
+struct ConcatLoweringPass final : public Pass
+{
+ PASS_CTOR(ConcatLoweringPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { lower_concat(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_CONCAT_LOWERING_H__
diff --git a/compiler/enco/core/src/Transforms/ConstantFolding.cpp b/compiler/enco/core/src/Transforms/ConstantFolding.cpp
new file mode 100644
index 000000000..cd6f22351
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/ConstantFolding.cpp
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConstantFolding.h"
+#include "Session.h"
+
+#include <queue>
+#include <cmath>
+#include <cassert>
+
+namespace
+{
+
+/**
+ * @brief is_constant_bag(b) returns true if the bag "b" has corresponding weight
+ */
+bool is_constant_bag(coco::Bag *b)
+{
+ auto m = b->module();
+ auto d = enco::data(m);
+ return d->allocated(b);
+}
+
+class ConstantBagEnumerator
+{
+public:
+ ConstantBagEnumerator(enco::Code *code) : _code{code}
+ {
+ // DO NOTHING
+ }
+
+public:
+ template <typename Callable> void enumerate(Callable cb) const
+ {
+ auto m = _code->module();
+
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto b = m->entity()->bag()->at(n);
+
+ if (is_constant_bag(b))
+ {
+ cb(b);
+ }
+ }
+ }
+
+private:
+ enco::Code *_code;
+};
+
+template <typename Callable> void operator<<(const ConstantBagEnumerator &e, Callable &&cb)
+{
+ e.enumerate(std::forward<Callable>(cb));
+}
+
+ConstantBagEnumerator constant_bag_enumerator(enco::Code *code)
+{
+ return ConstantBagEnumerator{code};
+}
+
+} // namespace
+
+namespace
+{
+
+/**
+ * @brief Take the first element from the queue
+ * @note The queue SHOULD have at least one element.
+ */
+template <typename T> T take(std::queue<T> &q)
+{
+ assert(q.size() > 0);
+ auto res = q.front();
+ q.pop();
+ return res;
+}
+
+} // namespace
+
+namespace
+{
+
+void fold_constant(std::queue<coco::Bag *> &q, coco::Copy *copy)
+{
+ auto m = copy->module();
+ auto d = enco::data(m);
+
+ auto src_obj = copy->from();
+ auto src_bag = src_obj->bag();
+
+ auto dst_obj = copy->into();
+ auto dst_bag = dst_obj->bag();
+
+ // Output calculation should not be folded
+ // TODO Reduce code duplication of this kind
+ if (dst_bag->isOutput())
+ {
+ return;
+ }
+
+ // NOTE d->allocated(bag) returns true if bag has corresponding initial
+ // values (e.g. convolution kernel)
+ assert(d->allocated(src_bag));
+ assert(!d->allocated(dst_bag));
+
+ // TODO Support other data type
+ auto src_span = d->f32()->weight(src_bag);
+
+ assert(src_span.data() != nullptr);
+
+ auto src_feature = src_obj->asFeature();
+ auto dst_feature = dst_obj->asFeature();
+
+ // TODO Support other object type
+ if (src_feature == nullptr || dst_feature == nullptr)
+ {
+ return;
+ }
+
+ assert(src_feature != nullptr);
+ assert(dst_feature != nullptr);
+
+ // Allocate weight for destination
+ d->f32()->allocate(dst_bag);
+
+ auto dst_span = d->f32()->weight(dst_bag);
+
+ assert(src_feature->layout()->batch() == dst_feature->layout()->batch());
+ assert(src_feature->layout()->depth() == dst_feature->layout()->depth());
+ assert(src_feature->layout()->height() == dst_feature->layout()->height());
+ assert(src_feature->layout()->width() == dst_feature->layout()->width());
+
+ uint32_t const B = src_feature->layout()->batch();
+ uint32_t const C = src_feature->layout()->depth();
+ uint32_t const H = src_feature->layout()->height();
+ uint32_t const W = src_feature->layout()->width();
+
+ for (uint32_t b = 0; b < B; ++b)
+ {
+ for (uint32_t ch = 0; ch < C; ++ch)
+ {
+ for (uint32_t row = 0; row < H; ++row)
+ {
+ for (uint32_t col = 0; col < W; ++col)
+ {
+ auto src_ind = src_feature->layout()->at(b, ch, row, col);
+ auto dst_ind = dst_feature->layout()->at(b, ch, row, col);
+
+ dst_span[dst_ind.value()] = src_span[src_ind.value()];
+ }
+ }
+ }
+ }
+
+ // Let's detach copy
+ copy->from(nullptr);
+ copy->into(nullptr);
+ copy->detach();
+
+ // Let's visit destination bag!
+ q.push(dst_bag);
+}
+
+template <typename Callable>
+void fold_constant_op(std::queue<coco::Bag *> &q, coco::UnaryOp *op, Callable evaluate)
+{
+ auto m = op->module();
+ auto d = enco::data(m);
+
+ auto ins = op->parent();
+ auto eval = ins->asEval();
+
+ // UnaryOp has only one arg
+ auto src_obj = *(op->uses().begin());
+ auto src_bag = src_obj->bag();
+
+ auto dst_obj = eval->out();
+ auto dst_bag = dst_obj->bag();
+
+ // Output calculation should not be folded
+ // TODO Reduce code duplication of this kind
+ if (dst_bag->isOutput())
+ {
+ return;
+ }
+
+ assert(d->allocated(src_bag));
+ assert(!d->allocated(dst_bag));
+
+ // TODO Support other data type
+ auto src_span = d->f32()->weight(src_bag);
+ assert(src_span.data() != nullptr);
+
+ auto src_feature = src_obj->asFeature();
+ auto dst_feature = dst_obj->asFeature();
+
+ // TODO Support other object type
+ if (src_feature == nullptr || dst_feature == nullptr)
+ {
+ return;
+ }
+
+ assert(src_feature != nullptr);
+ assert(dst_feature != nullptr);
+
+ // Allocate weight for destination
+ d->f32()->allocate(dst_bag);
+ auto dst_span = d->f32()->weight(dst_bag);
+
+ assert(src_feature->layout()->batch() == dst_feature->layout()->batch());
+ assert(src_feature->layout()->depth() == dst_feature->layout()->depth());
+ assert(src_feature->layout()->height() == dst_feature->layout()->height());
+ assert(src_feature->layout()->width() == dst_feature->layout()->width());
+
+ uint32_t const B = src_feature->layout()->batch();
+ uint32_t const C = src_feature->layout()->depth();
+ uint32_t const H = src_feature->layout()->height();
+ uint32_t const W = src_feature->layout()->width();
+
+ for (uint32_t b = 0; b < B; ++b)
+ {
+ for (uint32_t ch = 0; ch < C; ++ch)
+ {
+ for (uint32_t row = 0; row < H; ++row)
+ {
+ for (uint32_t col = 0; col < W; ++col)
+ {
+ auto src_ind = src_feature->layout()->at(b, ch, row, col);
+ auto dst_ind = dst_feature->layout()->at(b, ch, row, col);
+
+ evaluate(&dst_span[dst_ind.value()], src_span[src_ind.value()]);
+ }
+ }
+ }
+ }
+
+ // Let's detach eval
+ eval->out(nullptr);
+ eval->detach();
+
+ // Let's visit destination bag!
+ q.push(dst_bag);
+}
+
+template <typename Callable>
+void fold_constant_op(std::queue<coco::Bag *> &q, coco::BinaryOp *op, Callable evaluate)
+{
+ auto m = op->module();
+ auto d = enco::data(m);
+
+ auto ins = op->parent();
+ auto eval = ins->asEval();
+
+ // Already folded by the other bag
+ if (!eval->out())
+ {
+ return;
+ }
+
+ auto lhs_load = op->left()->asLoad();
+ auto lhs_obj = lhs_load->object();
+ auto lhs_bag = lhs_obj->bag();
+
+ auto rhs_load = op->right()->asLoad();
+ auto rhs_obj = rhs_load->object();
+ auto rhs_bag = rhs_obj->bag();
+
+ auto dst_obj = eval->out();
+ auto dst_bag = dst_obj->bag();
+
+ // Output calculation should not be folded
+ // TODO Reduce code duplication of this kind
+ if (dst_bag->isOutput())
+ {
+ return;
+ }
+
+ // The other bag is non-constant
+ if (!d->allocated(lhs_bag) || !d->allocated(rhs_bag))
+ {
+ return;
+ }
+
+ assert(d->allocated(lhs_bag));
+ assert(d->allocated(rhs_bag));
+ assert(!d->allocated(dst_bag));
+
+ // TODO Support other data type
+ auto lhs_span = d->f32()->weight(lhs_bag);
+ auto rhs_span = d->f32()->weight(rhs_bag);
+ assert(lhs_span.data() != nullptr);
+ assert(rhs_span.data() != nullptr);
+
+ auto lhs_feature = lhs_obj->asFeature();
+ auto rhs_feature = rhs_obj->asFeature();
+ auto dst_feature = dst_obj->asFeature();
+
+ // TODO Support other object type
+ if (lhs_feature == nullptr || rhs_feature == nullptr || dst_feature == nullptr)
+ {
+ return;
+ }
+
+ assert(lhs_feature != nullptr);
+ assert(rhs_feature != nullptr);
+ assert(dst_feature != nullptr);
+
+ // Allocate weight for destination
+ d->f32()->allocate(dst_bag);
+ auto dst_span = d->f32()->weight(dst_bag);
+
+ assert(lhs_feature->layout()->batch() == rhs_feature->layout()->batch());
+ assert(lhs_feature->layout()->depth() == rhs_feature->layout()->depth());
+ assert(lhs_feature->layout()->height() == rhs_feature->layout()->height());
+ assert(lhs_feature->layout()->width() == rhs_feature->layout()->width());
+
+ assert(lhs_feature->layout()->batch() == dst_feature->layout()->batch());
+ assert(lhs_feature->layout()->depth() == dst_feature->layout()->depth());
+ assert(lhs_feature->layout()->height() == dst_feature->layout()->height());
+ assert(lhs_feature->layout()->width() == dst_feature->layout()->width());
+
+ uint32_t const B = lhs_feature->layout()->batch();
+ uint32_t const C = lhs_feature->layout()->depth();
+ uint32_t const H = lhs_feature->layout()->height();
+ uint32_t const W = lhs_feature->layout()->width();
+
+ for (uint32_t b = 0; b < B; ++b)
+ {
+ for (uint32_t ch = 0; ch < C; ++ch)
+ {
+ for (uint32_t row = 0; row < H; ++row)
+ {
+ for (uint32_t col = 0; col < W; ++col)
+ {
+ auto lhs_ind = lhs_feature->layout()->at(b, ch, row, col);
+ auto rhs_ind = rhs_feature->layout()->at(b, ch, row, col);
+ auto dst_ind = dst_feature->layout()->at(b, ch, row, col);
+
+ evaluate(&dst_span[dst_ind.value()], lhs_span[lhs_ind.value()],
+ rhs_span[rhs_ind.value()]);
+ }
+ }
+ }
+ }
+
+ // Let's detach eval
+ eval->out(nullptr);
+ eval->detach();
+
+ // Let's visit destination bag!
+ q.push(dst_bag);
+}
+
+void fold_constant(std::queue<coco::Bag *> &q, coco::Eval *eval)
+{
+ // TODO Support other data types
+ if (auto op = eval->op()->asSqrt())
+ {
+ fold_constant_op(q, op, [](float *dst, float value) { *dst = std::sqrt(value); });
+ }
+ else if (auto op = eval->op()->asAdd())
+ {
+ fold_constant_op(q, op, [](float *dst, float lhs, float rhs) { *dst = lhs + rhs; });
+ }
+ else if (auto op = eval->op()->asSub())
+ {
+ fold_constant_op(q, op, [](float *dst, float lhs, float rhs) { *dst = lhs - rhs; });
+ }
+ else if (auto op = eval->op()->asMul())
+ {
+ fold_constant_op(q, op, [](float *dst, float lhs, float rhs) { *dst = lhs * rhs; });
+ }
+ else if (auto op = eval->op()->asDiv())
+ {
+ fold_constant_op(q, op, [](float *dst, float lhs, float rhs) { *dst = lhs / rhs; });
+ }
+ else
+ {
+ // Not supported opteration, do nothing
+ // TODO Support other operations
+ }
+}
+
+void fold_constant(std::queue<coco::Bag *> &q, coco::Instr *ins)
+{
+ if (auto copy = coco::safe_cast<coco::Copy>(ins))
+ {
+ fold_constant(q, copy);
+ return;
+ }
+ if (auto eval = coco::safe_cast<coco::Eval>(ins))
+ {
+ fold_constant(q, eval);
+ return;
+ }
+
+ // TODO Add more cases for constant folding
+}
+
+} // namespace
+
+namespace enco
+{
+
+void fold_constants(enco::Code *code)
+{
+ std::queue<coco::Bag *> q;
+
+ // Collect the initial set of "constant" bag
+ constant_bag_enumerator(code) << [&q](coco::Bag *bag) { q.push(bag); };
+
+ while (!q.empty())
+ {
+ auto candidate_bag = take(q);
+
+ // Scan the readers of each candidate bag
+ for (auto reader : coco::readers(candidate_bag))
+ {
+ // TODO Decide how to handle the reader with unknown instruction
+ if (auto ins = reader->loc())
+ {
+ fold_constant(q, ins);
+ }
+ }
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/ConstantFolding.h b/compiler/enco/core/src/Transforms/ConstantFolding.h
new file mode 100644
index 000000000..6faa9c876
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/ConstantFolding.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONSTANT_FOLDING_H__
+#define __CONSTANT_FOLDING_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Evaluate "constant" expressions at compile time
+ */
+void fold_constants(enco::Code *);
+
+struct ConstantFoldingPass final : public Pass
+{
+ PASS_CTOR(ConstantFoldingPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { fold_constants(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __CONSTANT_FOLDING_H__
diff --git a/compiler/enco/core/src/Transforms/ConstantFolding.test.cpp b/compiler/enco/core/src/Transforms/ConstantFolding.test.cpp
new file mode 100644
index 000000000..5ac71ac14
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/ConstantFolding.test.cpp
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConstantFolding.h"
+#include "Session.h"
+
+#include <cmath>
+#include <gtest/gtest.h>
+
+namespace
+{
+
+class BinaryNetwork
+{
+public:
+ BinaryNetwork(coco::Module *module, coco::Data *data) : _module{module}, _data{data}
+ {
+ // DO NOTHING
+ }
+
+ template <typename Op> void build(void);
+
+ void fold(void)
+ {
+ // Execute constant folding
+ enco::make_session(_module, _data);
+ enco::Code code{_module, _data};
+ enco::fold_constants(&code);
+ }
+
+public:
+ coco::Bag *out;
+ coco::Bag *lhs;
+ coco::Bag *rhs;
+
+ coco::Eval *eval;
+
+private:
+ coco::Module *_module;
+ coco::Data *_data;
+};
+
+template <typename Op> void BinaryNetwork::build(void)
+{
+ // Create lhs bag and object
+ auto lhs_bag = _module->entity()->bag()->create(12);
+ auto lhs_obj = _module->entity()->object()->template create<coco::FeatureObject>();
+ coco::FeatureShape lhs_shape(1, 2, 2, 3);
+ lhs_obj->bag(lhs_bag);
+ lhs_obj->layout(coco::FeatureLayouts::BHWC::create(lhs_shape));
+
+ // Create rhs bag and object
+ auto rhs_bag = _module->entity()->bag()->create(12);
+ auto rhs_obj = _module->entity()->object()->template create<coco::FeatureObject>();
+ coco::FeatureShape rhs_shape(1, 2, 2, 3);
+ rhs_obj->bag(rhs_bag);
+ rhs_obj->layout(coco::FeatureLayouts::BHWC::create(rhs_shape));
+
+ // Create output bag and object
+ auto output_bag = _module->entity()->bag()->create(12);
+ auto output_obj = _module->entity()->object()->template create<coco::FeatureObject>();
+ coco::FeatureShape ofm_shape(1, 2, 2, 3);
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_shape));
+
+ // Create instruction and operations
+ auto block = _module->entity()->block()->create();
+ auto eval = _module->entity()->instr()->template create<coco::Eval>();
+ auto load_lhs = _module->entity()->op()->template create<coco::Load>();
+ auto load_rhs = _module->entity()->op()->template create<coco::Load>();
+ auto add_op = _module->entity()->op()->template create<Op>();
+
+ _module->block()->append(block);
+ block->instr()->append(eval);
+
+ load_lhs->object(lhs_obj);
+ load_rhs->object(rhs_obj);
+ add_op->left(load_lhs);
+ add_op->right(load_rhs);
+
+ eval->op(add_op);
+ eval->out(output_obj);
+
+ // Create a handle
+ this->lhs = lhs_bag;
+ this->rhs = rhs_bag;
+ this->out = output_bag;
+
+ this->eval = eval;
+}
+
+} // namespace
+
+TEST(ConstantFoldingTest, sqrt)
+{
+ auto module = coco::Module::create();
+ auto data = coco::Data::create();
+
+ // Create input bag and object
+ auto input_bag = module->entity()->bag()->create(12);
+ auto input_obj = module->entity()->object()->create<coco::FeatureObject>();
+ coco::FeatureShape ifm_shape(1, 2, 2, 3);
+ input_obj->bag(input_bag);
+ input_obj->layout(coco::FeatureLayouts::BHWC::create(ifm_shape));
+
+ // Create output bag and object
+ auto output_bag = module->entity()->bag()->create(12);
+ auto output_obj = module->entity()->object()->create<coco::FeatureObject>();
+ coco::FeatureShape ofm_shape(1, 2, 2, 3);
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_shape));
+
+ // Insert values into input bag
+ data->f32()->allocate(input_bag);
+ auto input = data->f32()->weight(input_bag);
+ for (uint32_t idx = 0; idx < input.size(); ++idx)
+ {
+ input[idx] = (float)idx;
+ }
+
+ // Create instruction and operations
+ auto block = module->entity()->block()->create();
+ auto eval = module->entity()->instr()->create<coco::Eval>();
+ auto load = module->entity()->op()->create<coco::Load>();
+ auto sqrt_op = module->entity()->op()->create<coco::Sqrt>();
+
+ module->block()->append(block);
+ block->instr()->append(eval);
+
+ load->object(input_obj);
+ sqrt_op->arg(load);
+
+ eval->op(sqrt_op);
+ eval->out(output_obj);
+
+ // Execute constant folding
+ enco::make_session(module.get(), data.get());
+ enco::Code code{module.get(), data.get()};
+ enco::fold_constants(&code);
+
+ // Validate the result
+ ASSERT_EQ(data->allocated(output_bag), true);
+ ASSERT_EQ(eval->out(), nullptr);
+
+ auto output = data->f32()->weight(output_bag);
+ for (uint32_t idx = 0; idx < output.size(); ++idx)
+ {
+ ASSERT_FLOAT_EQ(output[idx], std::sqrt(input[idx]));
+ }
+}
+
+TEST(ConstantFoldingTest, element_wise_add)
+{
+ auto module = coco::Module::create();
+ auto data = coco::Data::create();
+
+ BinaryNetwork net{module.get(), data.get()};
+
+ // Build a network
+ net.build<coco::Add>();
+
+ // Create alises
+ auto lhs_bag = net.lhs;
+ auto rhs_bag = net.rhs;
+ auto output_bag = net.out;
+ auto eval = net.eval;
+
+ // Insert values into lhs and rhs bag
+ data->f32()->allocate(lhs_bag);
+ data->f32()->allocate(rhs_bag);
+ auto lhs = data->f32()->weight(lhs_bag);
+ auto rhs = data->f32()->weight(rhs_bag);
+ for (uint32_t idx = 0; idx < lhs.size(); ++idx)
+ {
+ lhs[idx] = (float)idx;
+ rhs[idx] = 1.5;
+ }
+
+ // Execute constant folding
+ net.fold();
+
+ // Validate the result
+ ASSERT_EQ(data->allocated(output_bag), true);
+ ASSERT_EQ(eval->out(), nullptr);
+
+ auto output = data->f32()->weight(output_bag);
+ for (uint32_t idx = 0; idx < output.size(); ++idx)
+ {
+ ASSERT_FLOAT_EQ(output[idx], lhs[idx] + rhs[idx]);
+ }
+}
+
+TEST(ConstantFoldingTest, element_wise_sub)
+{
+ auto module = coco::Module::create();
+ auto data = coco::Data::create();
+
+ BinaryNetwork net{module.get(), data.get()};
+
+ // Build a network
+ net.build<coco::Sub>();
+
+ // Create alises
+ auto lhs_bag = net.lhs;
+ auto rhs_bag = net.rhs;
+ auto output_bag = net.out;
+ auto eval = net.eval;
+
+ // Insert values into lhs and rhs bag
+ data->f32()->allocate(lhs_bag);
+ data->f32()->allocate(rhs_bag);
+ auto lhs = data->f32()->weight(lhs_bag);
+ auto rhs = data->f32()->weight(rhs_bag);
+ for (uint32_t idx = 0; idx < lhs.size(); ++idx)
+ {
+ lhs[idx] = (float)idx;
+ rhs[idx] = 1.5;
+ }
+
+ // Execute constant folding
+ net.fold();
+
+ // Validate the result
+ ASSERT_EQ(data->allocated(output_bag), true);
+ ASSERT_EQ(eval->out(), nullptr);
+
+ auto output = data->f32()->weight(output_bag);
+ for (uint32_t idx = 0; idx < output.size(); ++idx)
+ {
+ ASSERT_FLOAT_EQ(output[idx], lhs[idx] - rhs[idx]);
+ }
+}
+
+TEST(ConstantFoldingTest, element_wise_mul)
+{
+ auto module = coco::Module::create();
+ auto data = coco::Data::create();
+
+ BinaryNetwork net{module.get(), data.get()};
+
+ // Build a network
+ net.build<coco::Mul>();
+
+ // Create alises
+ auto lhs_bag = net.lhs;
+ auto rhs_bag = net.rhs;
+ auto output_bag = net.out;
+ auto eval = net.eval;
+
+ // Insert values into lhs and rhs bag
+ data->f32()->allocate(lhs_bag);
+ data->f32()->allocate(rhs_bag);
+ auto lhs = data->f32()->weight(lhs_bag);
+ auto rhs = data->f32()->weight(rhs_bag);
+ for (uint32_t idx = 0; idx < lhs.size(); ++idx)
+ {
+ lhs[idx] = (float)idx;
+ rhs[idx] = 1.5;
+ }
+
+ // Execute constant folding
+ net.fold();
+
+ // Validate the result
+ ASSERT_EQ(data->allocated(output_bag), true);
+ ASSERT_EQ(eval->out(), nullptr);
+
+ auto output = data->f32()->weight(output_bag);
+ for (uint32_t idx = 0; idx < output.size(); ++idx)
+ {
+ ASSERT_FLOAT_EQ(output[idx], lhs[idx] * rhs[idx]);
+ }
+}
+
+TEST(ConstantFoldingTest, element_wise_div)
+{
+ auto module = coco::Module::create();
+ auto data = coco::Data::create();
+
+ BinaryNetwork net{module.get(), data.get()};
+
+ // Build a network
+ net.build<coco::Div>();
+
+ // Create alises
+ auto lhs_bag = net.lhs;
+ auto rhs_bag = net.rhs;
+ auto output_bag = net.out;
+ auto eval = net.eval;
+
+ // Insert values into lhs and rhs bag
+ data->f32()->allocate(lhs_bag);
+ data->f32()->allocate(rhs_bag);
+ auto lhs = data->f32()->weight(lhs_bag);
+ auto rhs = data->f32()->weight(rhs_bag);
+ for (uint32_t idx = 0; idx < lhs.size(); ++idx)
+ {
+ lhs[idx] = (float)idx;
+ rhs[idx] = 1.5;
+ }
+
+ // Execute constant folding
+ net.fold();
+
+ // Validate the result
+ ASSERT_EQ(data->allocated(output_bag), true);
+ ASSERT_EQ(eval->out(), nullptr);
+
+ auto output = data->f32()->weight(output_bag);
+ for (uint32_t idx = 0; idx < output.size(); ++idx)
+ {
+ ASSERT_FLOAT_EQ(output[idx], lhs[idx] / rhs[idx]);
+ }
+}
diff --git a/compiler/enco/core/src/Transforms/CopyLowering.cpp b/compiler/enco/core/src/Transforms/CopyLowering.cpp
new file mode 100644
index 000000000..ceb3bbd5c
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/CopyLowering.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CopyLowering.h"
+
+#include <set>
+#include <cassert>
+
+//
+// Lower Copy as Shuffle
+//
+namespace enco
+{
+
+void lower_copy(enco::Code *code)
+{
+ auto m = code->module();
+
+ std::set<coco::Copy *> lowered_copies;
+
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ auto ins = m->entity()->instr()->at(n);
+
+ assert(ins != nullptr);
+
+ if (ins->parent() == nullptr)
+ {
+ // Skip if instruction does not belong to a list
+ continue;
+ }
+
+ auto copy = ins->asCopy();
+
+ if (copy == nullptr)
+ {
+ // Skip if instruction is not a copy
+ continue;
+ }
+
+ // TODO Support non-Feature objects
+ auto ifm = copy->from()->asFeature();
+ auto ofm = copy->into()->asFeature();
+
+ if ((ifm == nullptr) || (ofm == nullptr))
+ {
+ continue;
+ }
+
+ assert(ifm->layout()->batch() == ofm->layout()->batch());
+ assert(ifm->layout()->shape() == ofm->layout()->shape());
+
+ auto shuffle = m->entity()->instr()->create<coco::Shuffle>();
+
+ shuffle->from(ifm->bag());
+ shuffle->into(ofm->bag());
+
+ const uint32_t B = ifm->layout()->batch();
+ const uint32_t C = ifm->layout()->shape().depth();
+ const uint32_t H = ifm->layout()->shape().height();
+ const uint32_t W = ifm->layout()->shape().width();
+
+ for (uint32_t b = 0; b < B; ++b)
+ {
+ for (uint32_t ch = 0; ch < C; ++ch)
+ {
+ for (uint32_t row = 0; row < H; ++row)
+ {
+ for (uint32_t col = 0; col < W; ++col)
+ {
+ const auto from = ifm->layout()->at(b, ch, row, col);
+ const auto into = ofm->layout()->at(b, ch, row, col);
+
+ shuffle->insert(from, into);
+ }
+ }
+ }
+ }
+
+ shuffle->insertBefore(copy);
+ lowered_copies.insert(copy);
+ }
+
+ // Destroy lowered copy
+ for (const auto &copy : lowered_copies)
+ {
+ copy->detach();
+ m->entity()->instr()->destroy(copy);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/CopyLowering.h b/compiler/enco/core/src/Transforms/CopyLowering.h
new file mode 100644
index 000000000..51f0f83e2
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/CopyLowering.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_LOWER_H__
+#define __ENCO_LOWER_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Lower copy(...) instruction into shuffle(...)
+ */
+void lower_copy(enco::Code *code);
+
+struct CopyLoweringPass final : public Pass
+{
+ PASS_CTOR(CopyLoweringPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { lower_copy(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_LOWER_H__
diff --git a/compiler/enco/core/src/Transforms/DataLayoutConversion.cpp b/compiler/enco/core/src/Transforms/DataLayoutConversion.cpp
new file mode 100644
index 000000000..9d65d1c0b
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DataLayoutConversion.cpp
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DataLayoutConversion.h"
+#include "Session.h"
+#include "IRUtils.h"
+
+#include "coex/IR.h"
+
+#include <coco/IR/FeatureLayouts.h>
+#include <coco/IR/KernelLayouts.h>
+
+#include <nncc/core/ADT/feature/Layout.h>
+#include <nncc/core/ADT/kernel/Layout.h>
+
+#include <nncc/core/ADT/feature/HWCLayout.h>
+#include <nncc/core/ADT/kernel/NHWCLayout.h>
+
+#include <set>
+
+namespace
+{
+
+coco::Copy *make_copy(coco::FeatureObject *from, coco::FeatureObject *into)
+{
+ auto m = from->module();
+ assert(m != nullptr);
+ assert(from->module() == into->module());
+
+ auto copy = m->entity()->instr()->create<coco::Copy>();
+
+ copy->from(from);
+ copy->into(into);
+
+ return copy;
+}
+
+coco::FeatureObject *clone_feature(const coco::FeatureObject *oldobj)
+{
+ auto module = oldobj->module();
+ auto newobj = module->entity()->object()->create<coco::FeatureObject>();
+ newobj->layout(coco::FeatureLayouts::BHWC::create(oldobj->shape()));
+
+ if (oldobj->bag() != nullptr)
+ {
+ using nncc::core::ADT::feature::num_elements;
+
+ // NOTE The size of bag should be at least "BxHxWxC" as "newobj" uses BHWC layout
+ const uint32_t batch = newobj->layout()->batch();
+ const uint32_t count = num_elements(newobj->layout()->shape());
+ const uint32_t bag_size = batch * count;
+
+ // Clone bag only when there is a backing bag for a given feature object
+ auto newbag = module->entity()->bag()->create(bag_size);
+ newobj->bag(newbag);
+ }
+
+ return newobj;
+}
+
+/**
+ * @brief Insert Copy before Load if necessary
+ *
+ * @require "load" should be bounded
+ */
+void insert_copy_before_load(coco::Load *load)
+{
+ assert(load->parent() != nullptr);
+ assert(load->parent()->parent() != nullptr);
+
+ if (auto obj = load->object())
+ {
+ if (auto ifm = obj->asFeature())
+ {
+ if (ifm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ auto oldobj = ifm;
+ auto newobj = clone_feature(oldobj);
+
+ load->object(newobj);
+
+ auto copy = make_copy(oldobj, newobj);
+ copy->insertBefore(load->parent());
+ }
+ }
+ }
+}
+
+/**
+ * @brief Insert Copy after Eval if necessary
+ */
+void insert_copy_after_eval(coco::Eval *eval)
+{
+ if (auto out = eval->out())
+ {
+ if (auto ofm = out->asFeature())
+ {
+ if (ofm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ auto oldobj = ofm;
+ auto newobj = clone_feature(oldobj);
+
+ eval->out(newobj);
+
+ auto copy = make_copy(newobj, oldobj);
+ copy->insertAfter(eval);
+ }
+ }
+ }
+}
+
+/**
+ * @brief Insert copy (for data layout change) before/after ANNDepthConcatF if necessary
+ */
+void convert_data_layout(ANNDepthConcatF *concat)
+{
+ if (auto out = concat->out())
+ {
+ if (auto ofm = out->asFeature())
+ {
+ if (ofm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ auto oldobj = ofm;
+ auto newobj = clone_feature(oldobj);
+
+ concat->out(newobj);
+
+ auto copy = make_copy(newobj, oldobj);
+ copy->insertAfter(concat);
+ }
+ }
+ }
+
+ if (auto obj = concat->fst())
+ {
+ if (auto ifm = obj->asFeature())
+ {
+ if (ifm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ auto oldobj = ifm;
+ auto newobj = clone_feature(oldobj);
+
+ concat->fst(newobj);
+
+ auto copy = make_copy(oldobj, newobj);
+ copy->insertBefore(concat);
+ }
+ }
+ }
+
+ if (auto obj = concat->snd())
+ {
+ if (auto ifm = obj->asFeature())
+ {
+ if (ifm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ auto oldobj = ifm;
+ auto newobj = clone_feature(oldobj);
+
+ concat->snd(newobj);
+
+ auto copy = make_copy(oldobj, newobj);
+ copy->insertBefore(concat);
+ }
+ }
+ }
+}
+
+/**
+ * @brief Update convolution kernel data layout
+ */
+void change_conv2d_kernel_layout(coco::Conv2D *conv)
+{
+ auto m = conv->module();
+ assert(m != nullptr);
+ auto d = enco::data(enco::session(m));
+ assert(d != nullptr);
+
+ auto old_obj = conv->ker();
+ assert(old_obj != nullptr);
+ auto old_bag = old_obj->bag();
+ assert(old_bag != nullptr);
+
+ if (old_obj->layout()->id() == coco::KernelLayouts::NHWC::uid())
+ {
+ // Skip if kernel already uses NHWC layout
+ return;
+ }
+
+ const auto &ker_shape = old_obj->shape();
+
+ assert(d->allocated(old_bag));
+
+ auto new_bag = m->entity()->bag()->create(old_bag->size());
+ auto new_obj = m->entity()->object()->create<coco::KernelObject>();
+
+ new_obj->bag(new_bag);
+ new_obj->layout(coco::KernelLayouts::NHWC::create(ker_shape));
+
+ d->f32()->allocate(new_bag);
+
+ auto src = d->f32()->read(old_obj);
+ auto dst = d->f32()->access(new_obj);
+
+ const auto ker_N = ker_shape.count();
+ const auto ker_C = ker_shape.depth();
+ const auto ker_H = ker_shape.height();
+ const auto ker_W = ker_shape.width();
+
+ for (uint32_t n = 0; n < ker_N; ++n)
+ {
+ for (uint32_t ch = 0; ch < ker_C; ++ch)
+ {
+ for (uint32_t row = 0; row < ker_H; ++row)
+ {
+ for (uint32_t col = 0; col < ker_W; ++col)
+ {
+ dst->at(n, ch, row, col) = src->at(n, ch, row, col);
+ }
+ }
+ }
+ }
+
+ conv->ker(new_obj);
+ d->release(old_bag);
+}
+
+} // namespace
+
+namespace
+{
+
+/**
+ * @brief Return the set of all of bounded Load Op(s) in a given module
+ *
+ * @note 'bounded' means it will be exectuted
+ */
+std::set<coco::Load *> loads(coco::Module *m)
+{
+ std::set<coco::Load *> res;
+
+ for (uint32_t n = 0; n < m->entity()->op()->size(); ++n)
+ {
+ auto op = m->entity()->op()->at(n);
+
+ // Skip if this op is dangling
+ if (op->parent() == nullptr)
+ {
+ continue;
+ }
+
+ // Skip if eval instruction of this op is dangling
+ if (op->parent()->parent() == nullptr)
+ {
+ continue;
+ }
+
+ if (auto load = m->entity()->op()->at(n)->asLoad())
+ {
+ res.insert(load);
+ }
+ }
+
+ return res;
+}
+
+/**
+ * @brief Return the set of every (allocated) Eval instruction in a given module
+ */
+std::set<coco::Eval *> evals(coco::Module *m)
+{
+ std::set<coco::Eval *> res;
+
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ if (auto eval = m->entity()->instr()->at(n)->asEval())
+ {
+ res.insert(eval);
+ }
+ }
+
+ return res;
+}
+
+/**
+ * @brief Return the set of allocated Conv2D op in a given module
+ */
+std::set<coco::Conv2D *> convs(coco::Module *m)
+{
+ std::set<coco::Conv2D *> res;
+
+ for (uint32_t n = 0; n < m->entity()->op()->size(); ++n)
+ {
+ if (auto op = m->entity()->op()->at(n)->asConv2D())
+ {
+ res.insert(op);
+ }
+ }
+
+ return res;
+}
+
+/**
+ * @brief Return the set of "bounded" ANNDepthConcatF instructions
+ */
+std::set<ANNDepthConcatF *> depth_concats(coco::Module *m)
+{
+ std::set<ANNDepthConcatF *> res;
+
+ for (auto ins : enco::instr_sequence(m))
+ {
+ if (auto depth_concat_f = coco::safe_cast<ANNDepthConcatF>(ins))
+ {
+ res.insert(depth_concat_f);
+ }
+ }
+
+ return res;
+}
+
+class NormalizePass
+{
+private:
+ void runOnModule(coco::Module *m) const;
+
+public:
+ void runOnCode(enco::Code *) const;
+};
+
+void NormalizePass::runOnModule(coco::Module *m) const
+{
+ // Insert Copy before all Load Op (if necessary)
+ for (auto load : loads(m))
+ {
+ insert_copy_before_load(load);
+ }
+
+ // Insert Copy after all Eval Instr (if necessary)
+ for (auto eval : evals(m))
+ {
+ insert_copy_after_eval(eval);
+ }
+
+ // Change Kernel Layout of Conv2D opertion (if necessary)
+ for (auto conv : convs(m))
+ {
+ change_conv2d_kernel_layout(conv);
+ }
+
+ // Insert Copy (for Layout Conversion) before/after ANNDepthConcatF instructions (if necessary)
+ for (auto depth_concat : depth_concats(m))
+ {
+ convert_data_layout(depth_concat);
+ }
+}
+
+void NormalizePass::runOnCode(enco::Code *code) const { runOnModule(code->module()); }
+
+} // namespace
+
+namespace enco
+{
+
+void convert_data_layout(enco::Code *code)
+{
+ NormalizePass pass;
+ pass.runOnCode(code);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/DataLayoutConversion.h b/compiler/enco/core/src/Transforms/DataLayoutConversion.h
new file mode 100644
index 000000000..ac4052c8b
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DataLayoutConversion.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_DATA_LAYOUT_CONVERSION_H__
+#define __ENCO_TRANSFORM_DATA_LAYOUT_CONVERSION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Insert data reordering if necessary
+ */
+void convert_data_layout(enco::Code *code);
+
+struct DataLayoutConversionPass final : public enco::Pass
+{
+ PASS_CTOR(DataLayoutConversionPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { convert_data_layout(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_DATA_LAYOUT_CONVERSION_H__
diff --git a/compiler/enco/core/src/Transforms/DataLayoutConversion.test.cpp b/compiler/enco/core/src/Transforms/DataLayoutConversion.test.cpp
new file mode 100644
index 000000000..812e38a78
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DataLayoutConversion.test.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DataLayoutConversion.h"
+
+#include <gtest/gtest.h>
+
+TEST(DataLayoutConversionTest, case_000)
+{
+ auto m = coco::Module::create();
+
+ // Create a "free" Load op
+ m->entity()->instr()->create<coco::Eval>();
+
+ enco::Code code{m.get(), nullptr};
+ ASSERT_EQ(m->entity()->instr()->size(), 1);
+
+ // "conver_data_layout" SHOULD NOT crash even if there is a "free" Load op
+ enco::convert_data_layout(&code);
+}
diff --git a/compiler/enco/core/src/Transforms/DeadBagElimination.cpp b/compiler/enco/core/src/Transforms/DeadBagElimination.cpp
new file mode 100644
index 000000000..b3c598a55
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DeadBagElimination.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DeadBagElimination.h"
+
+#include <set>
+
+namespace
+{
+
+/// @brief Return true if a given bag is marked as either input or output
+bool is_public(const coco::Bag *b) { return b->isInput() || b->isOutput(); }
+
+/// @brief Return the set of "dead" bags in a given module
+std::set<coco::Bag *> dead_bags(const coco::Module *m)
+{
+ std::set<coco::Bag *> res;
+
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ if (coco::readers(bag).empty() && !is_public(bag))
+ {
+ res.insert(bag);
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void eliminate_dead_bag(enco::Code *code)
+{
+ auto m = code->module();
+
+ // Destroy a dead bag and its updaters
+ for (auto bag : dead_bags(m))
+ {
+ for (auto updater : coco::updaters(bag))
+ {
+ auto ins = updater->loc();
+
+ assert(ins != nullptr);
+
+ ins->detach();
+ m->entity()->instr()->destroy(ins);
+ }
+
+ bag->replaceWith(nullptr);
+ m->entity()->bag()->destroy(bag);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/DeadBagElimination.h b/compiler/enco/core/src/Transforms/DeadBagElimination.h
new file mode 100644
index 000000000..87e03e8ac
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DeadBagElimination.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_DEAD_BAG_ELIMINATION_H__
+#define __ENCO_TRANSFORM_DEAD_BAG_ELIMINATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Eliminate dead bags
+ *
+ * A bag is referred to as dead if it is neither input nor output, and has no read. If a bag is
+ * dead, it is unnecessary to updates its values as these values are never used.
+ *
+ * "eliminate_dead_bag" removes all the dead bags and its updaters from IR.
+ */
+void eliminate_dead_bag(enco::Code *code);
+
+struct DeadBagEliminationPass final : public Pass
+{
+ PASS_CTOR(DeadBagEliminationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { eliminate_dead_bag(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_DEAD_BAG_ELIMINATION_H__
diff --git a/compiler/enco/core/src/Transforms/DeadObjectElimination.cpp b/compiler/enco/core/src/Transforms/DeadObjectElimination.cpp
new file mode 100644
index 000000000..df8cc628a
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DeadObjectElimination.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DeadObjectElimination.h"
+
+#include <set>
+
+namespace
+{
+
+std::set<coco::Object *> dead_objects(const coco::Module *m)
+{
+ std::set<coco::Object *> res;
+
+ for (uint32_t n = 0; n < m->entity()->object()->size(); ++n)
+ {
+ auto obj = m->entity()->object()->at(n);
+
+ if (auto bag = obj->bag())
+ {
+ if (coco::readers(bag).empty() && !(bag->isOutput()))
+ {
+ res.insert(obj);
+ }
+ }
+ else
+ {
+ // NOTE Just in case if there are Objects not related to Bags
+ if (obj->uses()->size() == 0)
+ {
+ res.insert(obj);
+ }
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void eliminate_dead_object(enco::Code *code)
+{
+ auto m = code->module();
+
+ // Destroy a dead object and its producer
+ for (auto obj : dead_objects(m))
+ {
+ if (auto producer = coco::producer(obj))
+ {
+ auto ins = producer->loc();
+ assert(ins != nullptr);
+
+ ins->detach();
+ m->entity()->instr()->destroy(ins);
+ }
+
+ m->entity()->object()->destroy(obj);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/DeadObjectElimination.h b/compiler/enco/core/src/Transforms/DeadObjectElimination.h
new file mode 100644
index 000000000..4923e56fd
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DeadObjectElimination.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_DEAD_OBJECT_ELIMINATION_H__
+#define __ENCO_TRANSFORM_DEAD_OBJECT_ELIMINATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Eliminate dead objects in IR
+ *
+ * An object whose backing bag is unused is referred to as a dead object.
+ *
+ * Dead Object Elimination (DOE) eliminates such dead objects along with their producer.
+ */
+void eliminate_dead_object(enco::Code *code);
+
+struct DeadObjectEliminationPass final : public Pass
+{
+ PASS_CTOR(DeadObjectEliminationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { eliminate_dead_object(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_DEAD_OBJECT_ELIMINATION_H__
diff --git a/compiler/enco/core/src/Transforms/Duplicate.cpp b/compiler/enco/core/src/Transforms/Duplicate.cpp
new file mode 100644
index 000000000..91f64a0ad
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Duplicate.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Duplicate.h"
+
+#include <map>
+#include <set>
+
+#include <cassert>
+
+namespace
+{
+
+coco::Block *find_or_create_first_block(coco::Module *m)
+{
+ if (m->block()->empty())
+ {
+ auto blk = m->entity()->block()->create();
+ m->block()->append(blk);
+ return blk;
+ }
+
+ return m->block()->head();
+}
+
+} // namespace
+
+namespace
+{
+
+class DuplicatePass
+{
+private:
+ void runOnModule(coco::Module *m) const;
+
+public:
+ void runOnCode(enco::Code *) const;
+};
+
+void DuplicatePass::runOnModule(coco::Module *m) const
+{
+ // Let's find candidates
+ std::set<coco::Bag *> candidates;
+
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ if (bag->isInput() && bag->isOutput())
+ {
+ candidates.insert(bag);
+ }
+ }
+
+ // Return if there is no candidate
+ if (candidates.empty())
+ {
+ return;
+ }
+
+ std::map<const coco::Bag *, coco::Input *> input_map;
+ std::map<const coco::Bag *, coco::Output *> output_map;
+
+ for (uint32_t n = 0; n < m->input()->size(); ++n)
+ {
+ auto input = m->input()->at(n);
+ assert(input->bag() != nullptr);
+ input_map[input->bag()] = input;
+ }
+
+ for (uint32_t n = 0; n < m->output()->size(); ++n)
+ {
+ auto output = m->output()->at(n);
+ assert(output->bag() != nullptr);
+ output_map[output->bag()] = output;
+ }
+
+ // For each in/out bag,
+ // 1. Create a new bag of the same size
+ // 2. Copy the content from the original bag
+ // 3. Mark the newly created bag as an output
+ for (const auto &candidate : candidates)
+ {
+ assert(coco::updaters(candidate).empty());
+ assert(input_map.find(candidate) != input_map.end());
+ assert(output_map.find(candidate) != output_map.end());
+
+ auto src = candidate;
+ auto dst = m->entity()->bag()->create(src->size());
+
+ // Create a copy instruction
+ auto shuffle = m->entity()->instr()->create<coco::Shuffle>();
+
+ shuffle->from(src);
+ shuffle->into(dst);
+
+ for (uint32_t n = 0; n < src->size(); ++n)
+ {
+ shuffle->insert(coco::ElemID{n} /* FROM */, coco::ElemID{n} /* INTO */);
+ }
+
+ find_or_create_first_block(m)->instr()->prepend(shuffle);
+
+ // Let's use the new bag as an output
+ output_map.at(src)->bag(dst);
+ }
+}
+
+void DuplicatePass::runOnCode(enco::Code *code) const { runOnModule(code->module()); }
+
+} // namespace
+
+namespace enco
+{
+
+void duplicate_inout_bag(enco::Code *code)
+{
+ DuplicatePass duplicate;
+ duplicate.runOnCode(code);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/Duplicate.h b/compiler/enco/core/src/Transforms/Duplicate.h
new file mode 100644
index 000000000..93baa4589
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Duplicate.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DUPLICATE_H__
+#define __DUPLICATE_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Eliminate in/out bags by duplication
+ */
+void duplicate_inout_bag(enco::Code *code);
+
+struct BagDuplicationPass final : public Pass
+{
+ PASS_CTOR(BagDuplicationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { duplicate_inout_bag(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __DUPLICATE_H__
diff --git a/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.cpp b/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.cpp
new file mode 100644
index 000000000..fa84c005c
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DuplicatedObjectReduction.h"
+
+#include "CodeIndex.h"
+#include "IRUtils.h"
+
+#include <set>
+
+namespace
+{
+
+/**
+ * @brief Collect feature objects in coco IR
+ */
+std::set<coco::FeatureObject *> features(const coco::Module *m)
+{
+ std::set<coco::FeatureObject *> res;
+
+ for (uint32_t n = 0; n < m->entity()->object()->size(); ++n)
+ {
+ if (auto feature = m->entity()->object()->at(n)->asFeature())
+ {
+ res.insert(feature);
+ }
+ }
+
+ return res;
+}
+
+std::set<coco::FeatureObject *> candidates(const coco::FeatureObject *src)
+{
+ std::set<coco::FeatureObject *> res;
+
+ for (auto consumer : coco::consumers(src))
+ {
+ if (auto copy = consumer->loc()->asCopy())
+ {
+ auto dst = copy->into()->asFeature();
+ assert(dst != nullptr);
+
+ if (dst->layout()->id() == coco::FeatureLayouts::BHWC::uid())
+ {
+ res.insert(dst);
+ }
+ }
+ }
+
+ return res;
+}
+
+CodeIndex code_index(coco::Object::Producer *p)
+{
+ if (auto ins = p->loc())
+ {
+ return ::code_index(ins);
+ }
+
+ return CodeIndex{};
+}
+
+} // namespace
+
+namespace enco
+{
+
+void reduce_duplicated_object(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (const auto &src : features(m))
+ {
+ auto copied = candidates(src);
+
+ if (copied.size() <= 1)
+ {
+ continue;
+ }
+
+ // Find the dominator
+ coco::FeatureObject *dominator = nullptr;
+
+ for (auto candidate : copied)
+ {
+ if (dominator == nullptr)
+ {
+ dominator = candidate;
+ }
+ else if (code_index(coco::producer(candidate)) < code_index(coco::producer(dominator)))
+ {
+ dominator = candidate;
+ }
+ }
+
+ // Replace all the occurunce of dominated objects with its dominator
+ copied.erase(dominator);
+
+ for (auto dominatee : copied)
+ {
+ subst(dominatee, dominator);
+ }
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.h b/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.h
new file mode 100644
index 000000000..3aa20058e
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/DuplicatedObjectReduction.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_DUPLICATED_OBJECT_REDUCTION_H__
+#define __ENCO_TRANSFORM_DUPLICATED_OBJECT_REDUCTION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Reduce duplicated feature objects as its dominating feature object
+ *
+ * >>> BEFORE <<<
+ * %obj_0 = Feature(layout: ???) at ...
+ * %obj_1 = Feature(layout: BHWC) at ...
+ * %obj_2 = Feature(layout: BHWC) at ...
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * copy(from: %obj_0, into: %obj_2)
+ *
+ * ...
+ * Use(%obj_1)
+ * Use(%obj_2)
+ * ...
+ *
+ * >>> AFTER <<<
+ * %obj_0 = Feature(layout: ???) at ...
+ * %obj_1 = Feature(layout: BHWC) at ...
+ * %obj_2 = Feature(layout: BHWC) at ...
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * copy(from: %obj_0, into: %obj_2)
+ *
+ * ...
+ * Use(%obj_1)
+ * Use(%obj_1) <-- CHANGED
+ * ...
+ *
+ * NOTE Given a set of feature objects, a feature object referred to as a dominating
+ * feature object if its producer proceeds the producer of every feature object
+ * in the given set
+ */
+void reduce_duplicated_object(enco::Code *code);
+
+struct DuplicatedObjectReductionPass final : public Pass
+{
+ PASS_CTOR(DuplicatedObjectReductionPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { reduce_duplicated_object(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_DUPLICATED_OBJECT_REDUCTION_H__
diff --git a/compiler/enco/core/src/Transforms/FeatureUnification.cpp b/compiler/enco/core/src/Transforms/FeatureUnification.cpp
new file mode 100644
index 000000000..1a7a0a8a4
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FeatureUnification.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FeatureUnification.h"
+#include "IRUtils.h"
+
+#include <stdex/Memory.h>
+
+#include <set>
+#include <vector>
+
+#include <cassert>
+
+using stdex::make_unique;
+
+namespace
+{
+
+bool is_static_layout(const coco::FeatureLayout::ID *id)
+{
+ if (id == coco::FeatureLayouts::BHWC::uid())
+ {
+ return true;
+ }
+
+ if (id == coco::FeatureLayouts::BCHW::uid())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+bool is_static_layout(const coco::FeatureLayout *l) { return is_static_layout(l->id()); }
+bool is_static_layout(const coco::FeatureObject *f) { return is_static_layout(f->layout()); }
+
+/**
+ * @brief Return ture if a given 'feature' is the candidate of unification
+ */
+bool candidate(const coco::FeatureObject *f) { return is_static_layout(f); }
+
+/**
+ * @brief Return true if two features are compatible
+ *
+ * Two features are referred to as compatible if these feature are interchangeable.
+ *
+ * NOTE The current implementation of "compatible" is sound, but incomplete.
+ *
+ * Soundness:
+ * For all feature objects "lhs" and "rhs" that "compatible(lhs, rhs)" returns true,
+ * "lhs" and "rhs" are interchangeable.
+ *
+ * Completeness:
+ * For all interchangeable feature objects "lhs" and "rhs", "compatible(lhs, rhs)" returns true.
+ */
+bool compatible(const coco::FeatureObject *lhs, const coco::FeatureObject *rhs)
+{
+ assert(candidate(lhs) && candidate(rhs));
+
+ if (lhs->layout()->id() != rhs->layout()->id())
+ {
+ return false;
+ }
+
+ if (lhs->layout()->batch() != rhs->layout()->batch())
+ {
+ return false;
+ }
+
+ if (!(lhs->layout()->shape() == rhs->layout()->shape()))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * @brief A FeatureGroup denotes a group of FeatureObject(s)
+ *
+ * Each FeatureGroup includes at most 1 DEF FeatureObject (a FeatureObject that has a producer),
+ * and may include multiple USE FeatureObject(s) (a FeatureObject that has no producer).
+ *
+ * NOTE FeatureUnification pass internally uses this FeatureGroup to store a group of compatible
+ * FeatureObject(s)
+ */
+class FeatureGroup
+{
+public:
+ explicit FeatureGroup(coco::FeatureObject *feature) { insert(feature); }
+
+public:
+ uint32_t size(void) const { return _uses.size() + (_def ? 1 : 0); }
+
+public:
+ void insert(coco::FeatureObject *feature)
+ {
+ if (feature->def() != nullptr)
+ {
+ assert(_def == nullptr);
+ _def = feature;
+ }
+ else
+ {
+ _uses.insert(feature);
+ }
+ }
+
+public:
+ coco::FeatureObject *parent(void) const
+ {
+ if (_def)
+ {
+ return _def;
+ }
+
+ assert(_uses.size() > 0);
+ return *(_uses.begin());
+ }
+
+public:
+ std::set<coco::FeatureObject *> children(void) const
+ {
+ auto res = _uses;
+ res.erase(parent());
+ return res;
+ }
+
+private:
+ coco::FeatureObject *_def = nullptr;
+ std::set<coco::FeatureObject *> _uses;
+};
+
+} // namespace
+
+namespace enco
+{
+
+void unify_feature(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ std::vector<std::unique_ptr<FeatureGroup>> groups;
+
+ auto assign_group = [&](coco::FeatureObject *feature) {
+ // Find a compatible FeatureGroup
+ FeatureGroup *group = nullptr;
+
+ for (const auto &g : groups)
+ {
+ FeatureGroup *candidate = g.get();
+
+ if (!compatible(candidate->parent(), feature))
+ {
+ continue;
+ }
+
+ group = candidate;
+ break;
+ }
+
+ if (group == nullptr)
+ {
+ // Insert FeatureObject into a new FeatureGroup
+ groups.emplace_back(make_unique<FeatureGroup>(feature));
+ }
+ else
+ {
+ // Insert FeatureObject into the compatible FeatureGroup
+ group->insert(feature);
+ }
+ };
+
+ auto bag = m->entity()->bag()->at(n);
+
+ for (auto o : coco::dependent_objects(bag))
+ {
+ if (auto feature = o->asFeature())
+ {
+ if (candidate(feature))
+ {
+ assign_group(feature);
+ }
+ }
+ }
+
+ for (const auto &g : groups)
+ {
+ auto group = g.get();
+ for (const auto child : group->children())
+ {
+ subst(child, group->parent());
+ assert(child->def() == nullptr);
+ assert(child->uses()->size() == 0);
+ m->entity()->object()->destroy(child);
+ }
+ }
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/FeatureUnification.h b/compiler/enco/core/src/Transforms/FeatureUnification.h
new file mode 100644
index 000000000..5ab0f9d7a
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FeatureUnification.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_FEATURE_UNIFICATION_H__
+#define __ENCO_TRANSFORM_FEATURE_UNIFICATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Remove duplicated feature objects inside each bag
+ *
+ * >>> BEFORE <<<
+ * %b = Bag(...)
+ *
+ * %feature_0 = Feature(...) at %b
+ * %feature_1 = Feature(...) at %b
+ *
+ * ...
+ * Use(%feature_0)
+ * ...
+ * Use(%feature_1)
+ * ...
+ *
+ * >>> AFTER <<<
+ * %b = Bag(...)
+ *
+ * %feature_0 = Feature(...) at %b
+ * ~~%feature_1 = Feature(...) at %b~~ <- REMOVED
+ *
+ * ...
+ * Use(%feature_0)
+ * ...
+ * Use(%feature_0)
+ * ...
+ *
+ * Note that all the occurrences of "%feature_1" are replaced with "%feature_0"
+ */
+void unify_feature(enco::Code *code);
+
+struct FeatureUnificationPass final : public Pass
+{
+ PASS_CTOR(FeatureUnificationPass)
+ {
+ // DO NOTHING
+ }
+ void run(const SessionID &sess) const override { unify_feature(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_FEATURE_UNIFICATION_H__
diff --git a/compiler/enco/core/src/Transforms/FreeInstrElimination.cpp b/compiler/enco/core/src/Transforms/FreeInstrElimination.cpp
new file mode 100644
index 000000000..a62324b28
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeInstrElimination.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FreeInstrElimination.h"
+
+#include <cassert>
+#include <set>
+
+namespace
+{
+
+/**
+ * @brief Return the set of "free" instructions in a given module
+ */
+std::set<coco::Instr *> free_instrs(const coco::Module *m)
+{
+ std::set<coco::Instr *> res;
+
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ if (auto ins = m->entity()->instr()->at(n))
+ {
+ if (ins->parent() == nullptr)
+ {
+ res.insert(ins);
+ }
+ }
+ }
+
+ return res;
+}
+
+void destroy(coco::Instr *ins)
+{
+ auto m = ins->module();
+ m->entity()->instr()->destroy(ins);
+}
+
+} // namespace
+
+namespace enco
+{
+
+void eliminate_free_instr(coco::Module *m)
+{
+ for (auto ins : free_instrs(m))
+ {
+ destroy(ins);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/FreeInstrElimination.h b/compiler/enco/core/src/Transforms/FreeInstrElimination.h
new file mode 100644
index 000000000..1d311cd35
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeInstrElimination.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_FREE_INSTR_ELIMINATION_H__
+#define __ENCO_TRANSFORM_FREE_INSTR_ELIMINATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Eliminate free instructions
+ *
+ * An instruction is referred to as "free" if it is not bound to any "block"
+ */
+void eliminate_free_instr(coco::Module *mod);
+
+/**
+ * @brief Eliminate free instructions
+ */
+static inline void eliminate_free_instr(enco::Code *code)
+{
+ // This function is just a wrapper of the above "void eliminate_free_instr(coco::Module *mod)"
+ eliminate_free_instr(code->module());
+}
+
+struct FreeInstrEliminationPass final : public Pass
+{
+ PASS_CTOR(FreeInstrEliminationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { eliminate_free_instr(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_FREE_INSTR_ELIMINATION_H__
diff --git a/compiler/enco/core/src/Transforms/FreeInstrElimination.test.cpp b/compiler/enco/core/src/Transforms/FreeInstrElimination.test.cpp
new file mode 100644
index 000000000..c15f32e7d
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeInstrElimination.test.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FreeInstrElimination.h"
+
+#include <gtest/gtest.h>
+
+TEST(FreeInstrEliminationTest, case_000)
+{
+ auto m = coco::Module::create();
+
+ // Create a "free" Eval instruction
+ m->entity()->instr()->create<coco::Eval>();
+
+ ASSERT_EQ(m->entity()->instr()->size(), 1);
+
+ // Apply "Free Instruction Elimination"
+ enco::eliminate_free_instr(m.get());
+
+ ASSERT_EQ(m->entity()->instr()->size(), 0);
+}
diff --git a/compiler/enco/core/src/Transforms/FreeOpElimination.cpp b/compiler/enco/core/src/Transforms/FreeOpElimination.cpp
new file mode 100644
index 000000000..25f2f44d0
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeOpElimination.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FreeOpElimination.h"
+
+#include <cassert>
+#include <set>
+
+namespace
+{
+
+/**
+ * @brief Return the set of Free Op Elimination candidates
+ */
+std::set<coco::Op *> candidates(const coco::Module *m)
+{
+ std::set<coco::Op *> res;
+
+ for (uint32_t n = 0; n < m->entity()->op()->size(); ++n)
+ {
+ if (auto op = m->entity()->op()->at(n))
+ {
+ if ((op->parent() == nullptr) && (op->up() == nullptr))
+ {
+ res.insert(op);
+ }
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void eliminate_free_op(coco::Module *m)
+{
+ for (auto op : candidates(m))
+ {
+ m->entity()->op()->destroy_all(op);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/FreeOpElimination.h b/compiler/enco/core/src/Transforms/FreeOpElimination.h
new file mode 100644
index 000000000..3aeacada5
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeOpElimination.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_FREE_OP_ELIMINATION_H__
+#define __ENCO_TRANSFORM_FREE_OP_ELIMINATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Eliminate free op
+ *
+ * An op is referred to as "free" if it is not bound to any "instruction"
+ */
+void eliminate_free_op(coco::Module *mod);
+
+/**
+ * @brief Eliminate free op
+ */
+static inline void eliminate_free_op(enco::Code *code)
+{
+ // This function is just a wrapper of the above "void eliminate_free_op(coco::Module *mod)"
+ eliminate_free_op(code->module());
+}
+
+struct FreeOpEliminationPass final : public Pass
+{
+ PASS_CTOR(FreeOpEliminationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { eliminate_free_op(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_FREE_OP_ELIMINATION_H__
diff --git a/compiler/enco/core/src/Transforms/FreeOpElimination.test.cpp b/compiler/enco/core/src/Transforms/FreeOpElimination.test.cpp
new file mode 100644
index 000000000..41600526b
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/FreeOpElimination.test.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FreeOpElimination.h"
+
+#include <gtest/gtest.h>
+
+TEST(FreeOpEliminationTest, case_000)
+{
+ auto m = coco::Module::create();
+
+ // Create a "free" Load op
+ m->entity()->op()->create<coco::Load>();
+
+ ASSERT_EQ(m->entity()->op()->size(), 1);
+
+ // Apply "Free Op Elimination"
+ enco::eliminate_free_op(m.get());
+
+ ASSERT_EQ(m->entity()->op()->size(), 0);
+}
diff --git a/compiler/enco/core/src/Transforms/GlobalDataGeneration.cpp b/compiler/enco/core/src/Transforms/GlobalDataGeneration.cpp
new file mode 100644
index 000000000..152477a51
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/GlobalDataGeneration.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GlobalDataGeneration.h"
+#include "Split.h"
+#include "Dims.h"
+
+#include <stdex/Memory.h>
+
+#include <map>
+
+using stdex::make_unique;
+
+namespace
+{
+
+/**
+ * @brief Manage global variable declarations
+ */
+class Global
+{
+public:
+ Global(std::ostream &os) : _os(os)
+ {
+ // DO NOTHING
+ }
+
+public:
+ /// @brief Create a global constant string (const char *) literal, and return variable name
+ enco::GlobalOffset constant(const std::string &value);
+
+ /// @brief Create a global constant array variable of type T
+ template <typename T> enco::GlobalOffset constant(const std::vector<T> &values);
+
+ /// @brief Create a global constant array variable of byte (uint8_t) type
+ enco::GlobalOffset constant(const uint8_t *base, uint32_t size);
+
+private:
+ uint32_t _offset = 0;
+ std::ostream &_os;
+};
+
+enco::GlobalOffset Global::constant(const std::string &s)
+{
+ auto const base = reinterpret_cast<const uint8_t *>(s.c_str());
+ auto const size = s.size() + 1 /* NUL */;
+ return constant(base, size);
+}
+
+template <> enco::GlobalOffset Global::constant(const std::vector<uint32_t> &values)
+{
+ auto const base = reinterpret_cast<const uint8_t *>(values.data());
+ auto const size = sizeof(uint32_t) * values.size();
+ return constant(base, size);
+}
+
+enco::GlobalOffset Global::constant(const uint8_t *base, uint32_t size)
+{
+ auto pos = _os.tellp();
+ assert(pos != -1);
+
+ _os.write(reinterpret_cast<const char *>(base), size);
+
+ return static_cast<enco::GlobalOffset>(pos);
+}
+
+} // namespace
+
+namespace
+{
+
+std::map<const ann::Operand *, enco::GlobalOffset> data_offset_ctx;
+std::map<const coco::Bag *, enco::GlobalOffset> bag_data_offset_ctx;
+
+std::map<const coco::Arg *, enco::GlobalOffset> name_offset_ctx;
+std::map<const coco::Arg *, enco::GlobalOffset> dims_offset_ctx;
+
+} // namespace
+
+namespace enco
+{
+
+GlobalOffset GlobalData::data_offset(const ann::Operand *o) { return data_offset_ctx.at(o); }
+
+GlobalOffset GlobalData::data_offset(const coco::Bag *bag)
+{
+ assert(bag_data_offset_ctx.find(bag) != bag_data_offset_ctx.end());
+ return bag_data_offset_ctx.at(bag);
+}
+
+GlobalOffset GlobalData::name_offset(const coco::Input *in) { return name_offset_ctx.at(in); }
+GlobalOffset GlobalData::dims_offset(const coco::Input *in) { return dims_offset_ctx.at(in); }
+
+GlobalOffset GlobalData::name_offset(const coco::Output *out) { return name_offset_ctx.at(out); }
+GlobalOffset GlobalData::dims_offset(const coco::Output *out) { return dims_offset_ctx.at(out); }
+
+void generate_global_data(std::ostream &os, enco::Code *code)
+{
+ auto m = code->module();
+ auto d = code->data();
+
+ auto ann_ctx = enco::SubnetManager::context(m);
+
+ auto global = make_unique<Global>(os);
+
+ //
+ // Emit Bag's weight
+ //
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ if (!d->allocated(bag))
+ {
+ // Skip if the weight value does not exist for a given bag
+ continue;
+ }
+
+ // NOTE The current implementation assumes that all the values are of float(fp32) type
+ // TODO Support non-float values
+ auto span = d->f32()->weight(bag);
+
+ assert(span.data() != nullptr);
+ assert(span.size() > 0);
+
+ auto const base = reinterpret_cast<const uint8_t *>(span.data());
+ uint32_t const size = span.size() * sizeof(float);
+
+ assert(bag_data_offset_ctx.find(bag) == bag_data_offset_ctx.end());
+ bag_data_offset_ctx[bag] = global->constant(base, size);
+ }
+
+ for (uint32_t n = 0; n < ann_ctx->count(); ++n)
+ {
+ auto binder = ann_ctx->nth(n);
+
+ auto emit = [&](const ann::OperandID & /*id*/, const ann::Operand *info) {
+ if (info->weight())
+ {
+ auto base = info->weight()->base();
+ auto size = info->weight()->size();
+
+ data_offset_ctx[info] = global->constant(base, size);
+ }
+ };
+ binder->module()->operand()->each(emit);
+ }
+
+ for (uint32_t n = 0; n < m->input()->size(); ++n)
+ {
+ auto input = m->input()->at(n);
+ auto dims = as_dims(input->shape());
+
+ name_offset_ctx[input] = global->constant(input->name());
+ dims_offset_ctx[input] = global->constant<uint32_t>(dims);
+ }
+
+ for (uint32_t n = 0; n < m->output()->size(); ++n)
+ {
+ auto output = m->output()->at(n);
+ auto dims = as_dims(output->shape());
+
+ name_offset_ctx[output] = global->constant(output->name());
+ dims_offset_ctx[output] = global->constant<uint32_t>(dims);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/GlobalDataGeneration.h b/compiler/enco/core/src/Transforms/GlobalDataGeneration.h
new file mode 100644
index 000000000..433431401
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/GlobalDataGeneration.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_GLOBAL_DATA_GENERATION_H__
+#define __ENCO_TRANSFORM_GLOBAL_DATA_GENERATION_H__
+
+#include "Code.h"
+
+#include <ostream>
+
+namespace enco
+{
+
+using GlobalOffset = uint32_t;
+
+struct GlobalData
+{
+ static GlobalOffset data_offset(const ann::Operand *);
+ /**
+ * @brief Return the weight offset of a given bag
+ *
+ * @note The behavior of "data_offset" is undefined if a bag has no weight.
+ */
+ static GlobalOffset data_offset(const coco::Bag *);
+
+ static GlobalOffset name_offset(const coco::Input *);
+ static GlobalOffset dims_offset(const coco::Input *);
+ static GlobalOffset name_offset(const coco::Output *);
+ static GlobalOffset dims_offset(const coco::Output *);
+};
+
+/**
+ * @brief Generate 'Global' weight array.
+ *
+ * NOTE Succeeding passes can access offsets via "GlobalData"
+ */
+void generate_global_data(std::ostream &, enco::Code *);
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_GLOBAL_DATA_GENERATION_H__
diff --git a/compiler/enco/core/src/Transforms/IdenticalObjectReduction.cpp b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.cpp
new file mode 100644
index 000000000..cb996d2ac
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IdenticalObjectReduction.h"
+#include "IRUtils.h"
+
+#include <set>
+
+namespace enco
+{
+
+void reduce_identical_object(enco::Code *code)
+{
+ auto m = code->module();
+
+ std::set<coco::Copy *> detached;
+
+ // Preceding optimizations may generate "free" instructions.
+ // - i.e. an instruction not linked to a block
+ //
+ // Let's iterate over only a sequence of "bounded" instructions.
+ for (auto ins : instr_sequence(m))
+ {
+ assert(ins != nullptr);
+ assert(ins->parent() != nullptr);
+
+ auto copy = ins->asCopy();
+
+ if (copy == nullptr)
+ {
+ // Skip if instruction is not a copy
+ continue;
+ }
+
+ // TODO Support non-Feature Objects
+ auto ifm = copy->from()->asFeature();
+ auto ofm = copy->into()->asFeature();
+
+ assert(ofm->bag() != nullptr);
+
+ if (ifm->layout()->id() != ofm->layout()->id())
+ {
+ continue;
+ }
+
+ if (ifm->layout()->id() != coco::FeatureLayouts::BHWC::uid())
+ {
+ continue;
+ }
+
+ // Skip if this copy produces network output
+ if (ofm->bag()->output())
+ {
+ // TODO Optimize this case
+ //
+ // Note that the code under optimization is of the following form:
+ //
+ // %ifm <- Instr(...)
+ // %ofm <- Copy(%ifm)
+ //
+ // Let's assume that "Copy" is the only reader of %ifm (to be precise, its bag).
+ //
+ // Then, it is possible to rewrite the above fragment as follows:
+ //
+ // %ofm <- Instr(...)
+ //
+ continue;
+ }
+
+ if (ofm->bag()->reads()->size() > 0)
+ {
+ // Let us consider the following code:
+ //
+ // Bag:
+ // %bag_0 = Bag(...)
+ // %bag_1 = Bag(...)
+ // %bag_2 = Bag(...)
+ //
+ // Object:
+ // %obj_0 = FeatureObject(bag: %bag_0)
+ // %obj_1 = FeatureObject(bag: %bag_1)
+ //
+ // Instr:
+ // copy an object from %obj_0 into %obj_1
+ // shuffle values from %bag_1 into %bag_2
+ // eval Conv2D with %obj_1
+ //
+ // Identical Object Reduction (IOR) tries to eliminate the first copy via
+ // substitution (substitute all the occurrence of %obj_1 as use with %obj_0).
+ //
+ // Here is the code transformed by IOR:
+ //
+ // Bag:
+ // %bag_0 = Bag(...)
+ // %bag_1 = Bag(...)
+ // %bag_2 = Bag(...)
+ //
+ // Object:
+ // %obj_0 = FeatureObject(bag: %bag_0)
+ // %obj_1 = FeatureObject(bag: %bag_1)
+ //
+ // Instr:
+ // shuffle values from %bag_1 into %bag_2
+ // eval Conv2D with %obj_0
+ //
+ // Note that there is no updater of %bag_1 after IOR, and thus the behavior
+ // of the first shuffle instruction has changed.
+ //
+ // This examples shows that it is impossible to simply substitute %obj_1
+ // with %obj_0 in the presence of readers over its backing bag.
+ continue;
+ }
+
+ subst(copy->into(), copy->from());
+
+ copy->detach();
+ detached.insert(copy);
+ }
+
+ for (auto copy : detached)
+ {
+ m->entity()->instr()->destroy(copy);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/IdenticalObjectReduction.h b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.h
new file mode 100644
index 000000000..b5bb25d7c
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_IDENTICAL_OBJECT_REDUCTION_H__
+#define __ENCO_TRANSFORM_IDENTICAL_OBJECT_REDUCTION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Reduce identically copied objects as its original object
+ *
+ * >>> BEFORE <<<
+ * %bag_0 = Bag(size: N)
+ * %bag_1 = Bag(size: N)
+ *
+ * %obj_0 = Feature(layout: BHWC) at %bag_0
+ * %obj_1 = Feature(layout: BHWC) at %bag_1
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * ...
+ * Use(%obj_0)
+ * Use(%obj_1)
+ * ...
+ *
+ * >>> AFTER <<<
+ * %bag_0 = Bag(size: N)
+ * %bag_1 = Bag(size: N)
+ *
+ * %obj_0 = Feature(layout: BHWC) at %bag_0
+ * %obj_1 = Feature(layout: BHWC) at %bag_1
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * ...
+ * Use(%obj_0)
+ * Use(%obj_0) <- %obj_1 is replaced
+ * ...
+ */
+void reduce_identical_object(enco::Code *code);
+
+struct IdenticalObjectReductionPass final : public Pass
+{
+ PASS_CTOR(IdenticalObjectReductionPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { reduce_identical_object(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_IDENTICAL_OBJECT_REDUCTION_H__
diff --git a/compiler/enco/core/src/Transforms/IdenticalObjectReduction.test.cpp b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.test.cpp
new file mode 100644
index 000000000..772bea08e
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IdenticalObjectReduction.test.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IdenticalObjectReduction.h"
+
+#include <gtest/gtest.h>
+
+TEST(IdenticalObjectReductionTest, case_000)
+{
+ auto m = coco::Module::create();
+
+ // Create a "free" Eval instruction
+ m->entity()->instr()->create<coco::Eval>();
+
+ enco::Code code{m.get(), nullptr};
+
+ // NOTE This code SHOULD NOT crash
+ enco::reduce_identical_object(&code);
+}
diff --git a/compiler/enco/core/src/Transforms/IndirectCopyElimination.cpp b/compiler/enco/core/src/Transforms/IndirectCopyElimination.cpp
new file mode 100644
index 000000000..b36620f61
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IndirectCopyElimination.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IndirectCopyElimination.h"
+
+#include <cassert>
+
+namespace
+{
+
+coco::Copy *as_copy(coco::Instr *ins) { return ins ? ins->asCopy() : nullptr; }
+
+/**
+ * @brief Return a set of copy instructions that are accessible from top-level module
+ */
+std::set<coco::Copy *> linked_copy_instrs(coco::Module *m)
+{
+ std::set<coco::Copy *> res;
+
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ auto ins = m->entity()->instr()->at(n);
+ assert(ins != nullptr);
+
+ if (ins->parent() && ins->parent()->parent())
+ {
+ if (auto copy = ins->asCopy())
+ {
+ res.insert(copy);
+ }
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void eliminate_indirect_copy(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (auto child : linked_copy_instrs(m))
+ {
+ auto from = child->from();
+ assert(from != nullptr);
+
+ // Find the irreducible origin
+ while (true)
+ {
+ if (auto producer = coco::producer(from))
+ {
+ if (auto parent = as_copy(producer->loc()))
+ {
+ assert(parent->from() != nullptr);
+ from = parent->from();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ child->from(from);
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/IndirectCopyElimination.h b/compiler/enco/core/src/Transforms/IndirectCopyElimination.h
new file mode 100644
index 000000000..acfdf569b
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IndirectCopyElimination.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_TRANSFORM_INDIRECT_COPY_ELIMINATION_H__
+#define __ENCO_TRANSFORM_INDIRECT_COPY_ELIMINATION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Convert all the indirect copies as a direct copy
+ *
+ * >>> BEFORE <<<
+ * %obj_0 = ...
+ * %obj_1 = ...
+ * %obj_2 = ...
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * copy(from: %obj_1, into: %obj_2)
+ *
+ * >>> AFTER <<<
+ * %obj_0 = ...
+ * %obj_1 = ...
+ * %obj_2 = ...
+ *
+ * copy(from: %obj_0, into: %obj_1)
+ * copy(from: %obj_0, into: %obj_2)
+ *
+ */
+void eliminate_indirect_copy(enco::Code *code);
+
+struct IndirectCopyEliminationPass final : public enco::Pass
+{
+ PASS_CTOR(IndirectCopyEliminationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { eliminate_indirect_copy(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __ENCO_TRANSFORM_INDIRECT_COPY_ELIMINATION_H__
diff --git a/compiler/enco/core/src/Transforms/IntrinsicSelection.cpp b/compiler/enco/core/src/Transforms/IntrinsicSelection.cpp
new file mode 100644
index 000000000..7bf1c4926
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IntrinsicSelection.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntrinsicSelection.h"
+
+#include "coex/IR.h"
+
+namespace
+{
+
+/**
+ * @brief Return a backend-speicific coco (extend) instruction
+ *
+ * @note rewrite(ins) returns nullptr if selection fails
+ */
+coco::Instr *rewrite(coco::Instr *curr)
+{
+ auto m = curr->module();
+ assert(m != nullptr);
+
+ if (auto eval = coco::safe_cast<coco::Eval>(curr))
+ {
+ if (auto concat_f = eval->op()->asConcatF())
+ {
+ auto fst_load = concat_f->left()->asLoad();
+ auto snd_load = concat_f->right()->asLoad();
+
+ if (fst_load && snd_load && (concat_f->axis() == coco::ConcatF::Axis::Depth))
+ {
+ // Here is the pattern of interest
+ //
+ // %ofm = eval(ConcatF(Depth, Load(%left), Load(%right)))
+ //
+ auto fst_feature = fst_load->object()->asFeature();
+ auto snd_feature = snd_load->object()->asFeature();
+ assert((fst_feature != nullptr) && (snd_feature != nullptr));
+
+ auto out_feature = eval->out()->asFeature();
+ assert(out_feature != nullptr);
+
+ eval->out(nullptr);
+
+ auto depth_concat = m->entity()->instr()->create<ANNDepthConcatF>();
+
+ depth_concat->out(out_feature);
+ depth_concat->fst(fst_feature);
+ depth_concat->snd(snd_feature);
+
+ return depth_concat;
+ }
+
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void select_intrinsic(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (auto blk = m->block()->head(); blk; blk = blk->next())
+ {
+ auto ins = blk->instr()->head();
+
+ while (ins)
+ {
+ if (auto rewritten_ins = rewrite(ins))
+ {
+ rewritten_ins->insertBefore(ins);
+ ins->detach();
+
+ ins = rewritten_ins;
+ }
+
+ ins = ins->next();
+ }
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/IntrinsicSelection.h b/compiler/enco/core/src/Transforms/IntrinsicSelection.h
new file mode 100644
index 000000000..67d38eaeb
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/IntrinsicSelection.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTRINSIC_SELECTION_H__
+#define __INTRINSIC_SELECTION_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Select Intricsic (API) to be used
+ *
+ * This pass is analogue of "Instruction Selection" pass. This "Intrisic Selection" pass
+ * will replace a general coco IR instruction into a backend-specific coco (extended) IR
+ * instruction.
+ */
+void select_intrinsic(enco::Code *);
+
+struct IntrinsicSelectionPass final : public Pass
+{
+ PASS_CTOR(IntrinsicSelectionPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { select_intrinsic(code(sess)); }
+};
+
+} // namespace enco
+
+#endif // __INTRINSIC_SELECTION_H__
diff --git a/compiler/enco/core/src/Transforms/Optimizations.cpp b/compiler/enco/core/src/Transforms/Optimizations.cpp
new file mode 100644
index 000000000..7f0974dd0
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Optimizations.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Optimizations.h"
+#include "CodeIndex.h"
+
+#include <cassert>
+
+namespace enco
+{
+
+void generate_bypass_shuffle(enco::Code *code)
+{
+ auto m = code->module();
+
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ // NOTE The current implementation assumes that all the updates occurs before the first read
+ // TODO Remove this assumption
+ for (auto u : coco::updaters(bag))
+ {
+ if ((u->loc() == nullptr) || (u->loc()->asShuffle() == nullptr))
+ {
+ // Skip if updater is not a Shuffle instruction
+ continue;
+ }
+
+ for (auto r : coco::readers(bag))
+ {
+ if ((r->loc() == nullptr) || (r->loc()->asShuffle() == nullptr))
+ {
+ // Skip if reader is not a Shuffle instruction
+ continue;
+ }
+
+ auto shuffle_1 = u->loc()->asShuffle();
+ auto shuffle_2 = r->loc()->asShuffle();
+
+ // Construct a shuffle instruction
+ auto shuffle_3 = m->entity()->instr()->create<coco::Shuffle>();
+
+ shuffle_3->from(shuffle_1->from());
+ shuffle_3->into(shuffle_2->into());
+
+ // Attempt to construct a valid bypass shuffle instruction
+ bool valid = true;
+
+ for (const auto &C : shuffle_2->range())
+ {
+ auto B = shuffle_2->at(C);
+
+ if (!shuffle_1->defined(B))
+ {
+ valid = false;
+ break;
+ }
+
+ auto A = shuffle_1->at(B);
+
+ shuffle_3->insert(A, C);
+ }
+
+ if (valid)
+ {
+ // Insert shuffle_3 before shuffle_2 if shuffle_3 is a valid bypass of shuffle_2
+ shuffle_3->insertBefore(shuffle_2);
+
+ // NOTE shuffle_2 SHOULD BE detached and destroyed after shuffle_3 is inserted
+ shuffle_2->detach();
+ m->entity()->instr()->destroy(shuffle_2);
+ }
+ else
+ {
+ // Destroy shuffle_3 (bypass shuffle) if it is invalid
+ m->entity()->instr()->destroy(shuffle_3);
+ }
+ }
+ }
+ }
+}
+
+} // namespace enco
+
+//
+// Hoist Object
+//
+namespace
+{
+
+bool hoistable(const coco::Shuffle *shuffle)
+{
+ auto range = shuffle->range();
+
+ if (range.size() != shuffle->into()->size())
+ {
+ return false;
+ }
+
+ for (const auto &dst : range)
+ {
+ if (shuffle->at(dst).value() != dst.value())
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool complete(const coco::Shuffle *s) { return s->range().size() == s->into()->size(); }
+
+bool compatible(const coco::Shuffle *s1, const coco::Shuffle *s2)
+{
+ if (s1->from() != s2->from())
+ {
+ return false;
+ }
+
+ if (s1->into()->size() != s2->into()->size())
+ {
+ return false;
+ }
+
+ auto range_1 = s1->range();
+ auto range_2 = s2->range();
+
+ if (range_1.size() != range_2.size())
+ {
+ return false;
+ }
+
+ bool res = true;
+
+ for (const auto &dst : range_2)
+ {
+ if (!s1->defined(dst))
+ {
+ res = false;
+ break;
+ }
+
+ auto src_1 = s1->at(dst);
+ auto src_2 = s2->at(dst);
+
+ if (src_1.value() != src_2.value())
+ {
+ res = false;
+ break;
+ }
+ }
+
+ return res;
+}
+
+} // namespace
+
+namespace enco
+{
+
+void hoist_object(enco::Code *code)
+{
+ auto m = code->module();
+
+ //
+ // Case 1
+ //
+ for (uint32_t n = 0; n < m->entity()->instr()->size(); ++n)
+ {
+ if (auto shuffle = m->entity()->instr()->at(n)->asShuffle())
+ {
+ if (shuffle->parent() == nullptr)
+ {
+ continue;
+ }
+
+ if (hoistable(shuffle))
+ {
+ auto from = shuffle->from();
+ auto into = shuffle->into();
+
+ into->replaceAllDepsWith(from);
+ }
+ }
+ }
+
+ //
+ // Case 2
+ //
+ for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n)
+ {
+ auto bag = m->entity()->bag()->at(n);
+
+ std::map<CodeIndex, coco::Shuffle *> collected;
+
+ for (auto reader : coco::readers(bag))
+ {
+ if (auto ins = reader->loc())
+ {
+ if (auto shuffle = ins->asShuffle())
+ {
+ collected[code_index(shuffle)] = shuffle;
+ }
+ }
+ }
+
+ std::vector<coco::Shuffle *> sorted;
+
+ for (auto it = collected.begin(); it != collected.end(); ++it)
+ {
+ sorted.emplace_back(it->second);
+ }
+
+ for (uint32_t curr = 0; curr < sorted.size(); ++curr)
+ {
+ auto const curr_ins = sorted.at(curr);
+ auto const curr_bag = curr_ins->into();
+
+ if (!complete(curr_ins))
+ {
+ continue;
+ }
+
+ for (uint32_t next = curr + 1; next < sorted.size(); ++next)
+ {
+ auto const next_ins = sorted.at(next);
+ auto const next_bag = next_ins->into();
+
+ if (!complete(next_ins))
+ {
+ continue;
+ }
+
+ if (compatible(curr_ins, next_ins))
+ {
+ next_bag->replaceAllDepsWith(curr_bag);
+ }
+ }
+ }
+ }
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/Optimizations.h b/compiler/enco/core/src/Transforms/Optimizations.h
new file mode 100644
index 000000000..7cfc2305c
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Optimizations.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_OPTIMIZATIONS_H__
+#define __ENCO_OPTIMIZATIONS_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+/**
+ * @brief Add a bypass Shuffle if two continued Shuffles map same from-into
+ *
+ * %bag_1 = Bag(size: N)
+ * %bag_2 = Bag(size: N)
+ * %bag_3 = Bag(size: N)
+ *
+ * >>> BEFORE <<<
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0])
+ * Shuffle(from: %bag_2, into: %bag_3, [0 -> 0])
+ *
+ * Let's refer to the former shuffle as Shuffle 1 and the latter one as Shuffle 2.
+ * We can replace Shuffle 2 with new Shuffle 3 as follows when Shuffle 1 and
+ * Shuffle 2 map to the same position.
+ *
+ * >>> AFTER <<<
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0]) <- Shuffle 1
+ * Shuffle(from: %bag_1, into: %bag_3, [0 -> 0]) <- Shuffle 3
+ *
+ * Note that Shuffle 1 can be eliminated when %bag_2 is not used
+ */
+void generate_bypass_shuffle(enco::Code *code);
+
+struct BypassGenerationPass final : public Pass
+{
+ PASS_CTOR(BypassGenerationPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { generate_bypass_shuffle(code(sess)); }
+};
+
+/**
+ * @brief Update the base bag of each object if possible
+ *
+ * --- Case 1 ---
+ * Let us consider the following code:
+ *
+ * %bag_1 = Bag(size: 4)
+ * %bag_2 = Bag(size: 1)
+ *
+ * %obj_1 = ... at %bag_1
+ * %obj_2 = ... at %bag_2
+ *
+ * ...
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0]) <- shuffle
+ * ...
+ *
+ * Note that the content of %bag_2 after shuffle is identical to a part of %bag_1, so
+ * the following code is identical to the above code
+ *
+ * %bag_1 = Bag(size: 4)
+ * %bag_2 = Bag(size: 1)
+ *
+ * %obj_1 = ... at %bag_1
+ * %obj_2 = ... at %bag_1
+ *
+ * ...
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0])
+ * ...
+ *
+ * --- Case 2 ---
+ * Let us consider the following code:
+ *
+ * %bag_1 = Bag(size: 4)
+ * %bag_2 = Bag(size: 1)
+ * %bag_3 = Bag(size: 1)
+ *
+ * %obj_1 = ... at %bag_2
+ * %obj_2 = ... at %bag_3
+ *
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0]) <- shuffle_1
+ * Shuffle(from: %bag_1, into: %bag_3, [0 -> 0]) <- shuffle_2
+ *
+ * Note that the content of %bag_3 after shuffle_2 is identical to that of %bag_2 after shuffle_1,
+ * so the following code is identical to the above one:
+ *
+ * %bag_1 = Bag(size: 4)
+ * %bag_2 = Bag(size: 1)
+ * %bag_3 = Bag(size: 1)
+ *
+ * %obj_1 = ... at %bag_2
+ * %obj_2 = ... at %bag_2 <- HERE
+ *
+ * Shuffle(from: %bag_1, into: %bag_2, [0 -> 0]) <- shuffle_1
+ * Shuffle(from: %bag_1, into: %bag_3, [0 -> 0]) <- shuffle_2
+ *
+ * "hoist_object" optimization rewrites the former code as the latter one.
+ *
+ * NOTE "hoist_object" DOES NOT change any instruction. It just updates the base bag of objects of
+ * interest.
+ */
+void hoist_object(enco::Code *code);
+
+} // namespace enco
+
+#endif // __ENCO_OPTIMIZATIONS_H__
diff --git a/compiler/enco/core/src/Transforms/Split.cpp b/compiler/enco/core/src/Transforms/Split.cpp
new file mode 100644
index 000000000..b57b8f882
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Split.cpp
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Split.h"
+#include "Usage.h"
+#include "Session.h"
+#include "coex/IR.h"
+
+#include <coco/IR.h>
+
+#include <nncc/core/ADT/kernel/NHWCLayout.h>
+#include <stdex/Memory.h>
+
+#include <map>
+#include <stdexcept>
+#include <functional>
+
+using stdex::make_unique;
+
+namespace
+{
+
+std::map<const coco::Module *, std::unique_ptr<ANNContext>> _subnet_contexts;
+
+} // namespace
+
+namespace enco
+{
+
+const ANNContext *SubnetManager::context(const coco::Module *m)
+{
+ return _subnet_contexts.at(m).get();
+}
+
+} // namespace enco
+
+namespace
+{
+
+using Appender = std::function<void(ANNBinder *binder)>;
+
+struct ANNOpAppender
+{
+ virtual ~ANNOpAppender() = default;
+
+ virtual void append(ANNBinder *binder) const = 0;
+};
+
+class ANNAddAppender final : public ANNOpAppender
+{
+public:
+ void left(coco::FeatureObject *o) { _left = o; }
+ void right(coco::FeatureObject *o) { _right = o; }
+ void out(coco::FeatureObject *o) { _out = o; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto left = binder->addOperand<float>(_left);
+ auto right = binder->addOperand<float>(_right);
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto out = binder->addOperand<float>(_out);
+
+ binder->addOperation(ann::Operation::Code::ADD, {left, right, fuse}, {out});
+ }
+
+private:
+ coco::FeatureObject *_left = nullptr;
+ coco::FeatureObject *_right = nullptr;
+ coco::FeatureObject *_out = nullptr;
+};
+
+class ANNMulAppender final : public ANNOpAppender
+{
+public:
+ void left(coco::FeatureObject *o) { _left = o; }
+ void right(coco::FeatureObject *o) { _right = o; }
+ void out(coco::FeatureObject *o) { _out = o; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto left = binder->addOperand<float>(_left);
+ auto right = binder->addOperand<float>(_right);
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto out = binder->addOperand<float>(_out);
+
+ binder->addOperation(ann::Operation::Code::MUL, {left, right, fuse}, {out});
+ }
+
+private:
+ coco::FeatureObject *_left = nullptr;
+ coco::FeatureObject *_right = nullptr;
+ coco::FeatureObject *_out = nullptr;
+};
+
+/**
+ * WARN The current implementation supports concatenation along depth only
+ */
+class ANNConcatAppender final : public ANNOpAppender
+{
+public:
+ void left(coco::FeatureObject *o) { _left = o; }
+ void right(coco::FeatureObject *o) { _right = o; }
+ void out(coco::FeatureObject *o) { _out = o; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto left = binder->addOperand<float>(_left);
+ auto right = binder->addOperand<float>(_right);
+ auto axis = binder->addOperand<int32_t>();
+ binder->setOperand(axis, 3 /* DEPTH */);
+
+ auto out = binder->addOperand<float>(_out);
+
+ binder->addOperation(ann::Operation::Code::CONCAT, {left, right, axis}, {out});
+ }
+
+private:
+ coco::FeatureObject *_left = nullptr;
+ coco::FeatureObject *_right = nullptr;
+ coco::FeatureObject *_out = nullptr;
+};
+
+class ANNConv2DAppender final : public ANNOpAppender
+{
+public:
+ void session(const enco::SessionID &sess) { _sess = sess; }
+
+ void pad(const coco::Padding2D *pad) { _pad = *pad; }
+ void stride(const coco::Stride2D *stride) { _stride = *stride; }
+
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ker(coco::KernelObject *ker) { _ker = ker; }
+ // Q: Should we take a bias as a feature object?
+ // NOTE This interface is subject to change
+ void bias(coco::FeatureObject *bias) { _bias = bias; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto data = enco::data(_sess);
+
+ auto ifm = binder->addOperand<float>(_ifm);
+ auto ker = binder->addOperand<float>(_ker);
+
+ // Fill kernel data
+ {
+ auto ker_bag = _ker->bag();
+ auto ker_weight = data->f32()->weight(ker_bag);
+
+ assert(ker_weight.data() != nullptr);
+
+ binder->setOperand(ker, ker_weight.data(), ker_weight.data() + ker_weight.size());
+ }
+
+ // Conv2D in coco IR has no bias, but bias is mandatory in Android NN API
+ auto bias = binder->addOperand<float>(nncc::core::ADT::tensor::Shape{_ker->shape().count()});
+
+ // Fill bias data
+ if (_bias == nullptr)
+ {
+ // Use a fresh empty bias if "bias" is not specified
+ auto length = _ker->shape().count();
+
+ std::vector<float> values;
+ values.resize(length, 0.0f);
+
+ binder->setOperand(bias, values.begin(), values.end());
+ }
+ else
+ {
+ // Use specified "bias"
+ auto bias_bag = _bias->bag();
+ auto bias_weight = data->f32()->weight(bias_bag);
+
+ assert(bias_weight.data() != nullptr);
+ assert(bias_weight.size() == _ker->shape().count());
+
+ binder->setOperand(bias, bias_weight.data(), bias_weight.data() + bias_weight.size());
+ }
+
+ auto left = binder->addOperand<int32_t>();
+ binder->setOperand(left, _pad.left());
+ auto right = binder->addOperand<int32_t>();
+ binder->setOperand(right, _pad.right());
+ auto top = binder->addOperand<int32_t>();
+ binder->setOperand(top, _pad.top());
+ auto bottom = binder->addOperand<int32_t>();
+ binder->setOperand(bottom, _pad.bottom());
+ auto hstride = binder->addOperand<int32_t>();
+ binder->setOperand(hstride, _stride.horizontal());
+ auto vstride = binder->addOperand<int32_t>();
+ binder->setOperand(vstride, _stride.vertical());
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::CONV_2D,
+ {ifm, ker, bias, left, right, top, bottom, hstride, vstride, fuse}, {ofm});
+ }
+
+private:
+ enco::SessionID _sess;
+
+private:
+ coco::Padding2D _pad;
+ coco::Stride2D _stride;
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::KernelObject *_ker = nullptr;
+ coco::FeatureObject *_bias = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNDepthwiseConv2DAppender final : public ANNOpAppender
+{
+public:
+ void session(const enco::SessionID &sess) { _sess = sess; }
+
+ void multiplier(const uint32_t &multiplier) { _multiplier = multiplier; }
+ void pad(const coco::Padding2D *pad) { _pad = *pad; }
+ void stride(const coco::Stride2D *stride) { _stride = *stride; }
+
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ker(coco::KernelObject *ker) { _ker = ker; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ using namespace nncc::core::ADT;
+
+ auto data = enco::data(_sess);
+
+ const uint32_t ker_N = _ker->shape().count();
+ const uint32_t ker_H = _ker->shape().height();
+ const uint32_t ker_W = _ker->shape().width();
+
+ assert(ker_N % _multiplier == 0);
+ const uint32_t group = ker_N / _multiplier;
+
+ auto ifm = binder->addOperand<float>(_ifm);
+ auto ker = binder->addOperand<float>(tensor::Shape{1, ker_H, ker_W, ker_N});
+
+ // Fill kernel data
+ {
+ auto obj = _ker;
+ auto shape = obj->shape();
+
+ auto ovl = data->f32()->read(obj);
+ assert(ovl != nullptr);
+
+ // Flatten?
+ std::vector<float> values;
+
+ /**
+ * Android NN computes DEPTHWISE_CONV_2D as follows:
+ *
+ * output[b, i, j, k * channel_multiplier + q] =
+ * sum_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[1, di, dj, k * channel_multiplier + q]
+ * ) + bias[k * channel_multiplier + q]
+ *
+ */
+ for (uint32_t row = 0; row < shape.height(); ++row)
+ {
+ for (uint32_t col = 0; col < shape.width(); ++col)
+ {
+ for (uint32_t g = 0; g < group; ++g)
+ {
+ for (uint32_t m = 0; m < _multiplier; ++m)
+ {
+ const auto value = ovl->at(g * _multiplier + m, 0, row, col);
+ values.emplace_back(value);
+ }
+ }
+ }
+ }
+
+ assert(values.size() == nncc::core::ADT::kernel::num_elements(shape));
+ binder->setOperand(ker, values.begin(), values.end());
+ }
+
+ // Conv2D in coco IR has no bias, but bias is mandatory in Android NN API
+ auto bias = binder->addOperand<float>(nncc::core::ADT::tensor::Shape{_ker->shape().count()});
+
+ // Fill bias data
+ {
+ auto length = _ker->shape().count();
+
+ std::vector<float> values;
+ values.resize(length, 0.0f);
+
+ binder->setOperand(bias, values.begin(), values.end());
+ }
+
+ auto left = binder->addOperand<int32_t>();
+ binder->setOperand(left, _pad.left());
+ auto right = binder->addOperand<int32_t>();
+ binder->setOperand(right, _pad.right());
+ auto top = binder->addOperand<int32_t>();
+ binder->setOperand(top, _pad.top());
+ auto bottom = binder->addOperand<int32_t>();
+ binder->setOperand(bottom, _pad.bottom());
+ auto hstride = binder->addOperand<int32_t>();
+ binder->setOperand(hstride, _stride.horizontal());
+ auto vstride = binder->addOperand<int32_t>();
+ binder->setOperand(vstride, _stride.vertical());
+ auto multiplier = binder->addOperand<int32_t>();
+ binder->setOperand(multiplier, _multiplier);
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(
+ ann::Operation::Code::DEPTHWISE_CONV_2D,
+ {ifm, ker, bias, left, right, top, bottom, hstride, vstride, multiplier, fuse}, {ofm});
+ }
+
+private:
+ enco::SessionID _sess;
+
+private:
+ uint32_t _multiplier;
+ coco::Padding2D _pad;
+ coco::Stride2D _stride;
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::KernelObject *_ker = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNReLUAppender final : public ANNOpAppender
+{
+public:
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto ifm = binder->addOperand<float>(_ifm);
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::RELU, {ifm}, {ofm});
+ }
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNReLU6Appender final : public ANNOpAppender
+{
+public:
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto ifm = binder->addOperand<float>(_ifm);
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::RELU6, {ifm}, {ofm});
+ }
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNMaxPool2DAppender final : public ANNOpAppender
+{
+public:
+ void pad(const coco::Padding2D *pad) { _pad = *pad; }
+ void stride(const coco::Stride2D *stride) { _stride = *stride; }
+ void window(const coco::Window2D *window) { _window = *window; }
+
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto ifm = binder->addOperand<float>(_ifm);
+
+ // Set padding
+ auto left = binder->addOperand<int32_t>();
+ binder->setOperand(left, _pad.left());
+ auto right = binder->addOperand<int32_t>();
+ binder->setOperand(right, _pad.right());
+ auto top = binder->addOperand<int32_t>();
+ binder->setOperand(top, _pad.top());
+ auto bottom = binder->addOperand<int32_t>();
+ binder->setOperand(bottom, _pad.bottom());
+
+ // Set horizontal/vertical stride
+ auto hstride = binder->addOperand<int32_t>();
+ binder->setOperand(hstride, _stride.horizontal());
+ auto vstride = binder->addOperand<int32_t>();
+ binder->setOperand(vstride, _stride.vertical());
+
+ // Set receptive field size
+ auto width = binder->addOperand<int32_t>();
+ binder->setOperand(width, _window.width());
+ auto height = binder->addOperand<int32_t>();
+ binder->setOperand(height, _window.height());
+
+ // Set fuse code
+ // TODO Suport operation fusion
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::MAX_POOL_2D,
+ {ifm, left, right, top, bottom, hstride, vstride, width, height, fuse},
+ {ofm});
+ }
+
+private:
+ coco::Padding2D _pad;
+ coco::Stride2D _stride;
+ coco::Window2D _window;
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNAvgPool2DAppender final : public ANNOpAppender
+{
+public:
+ void pad(const coco::Padding2D *pad) { _pad = *pad; }
+ void stride(const coco::Stride2D *stride) { _stride = *stride; }
+ void window(const coco::Window2D *window) { _window = *window; }
+
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto ifm = binder->addOperand<float>(_ifm);
+
+ // Set padding
+ auto left = binder->addOperand<int32_t>();
+ binder->setOperand(left, _pad.left());
+ auto right = binder->addOperand<int32_t>();
+ binder->setOperand(right, _pad.right());
+ auto top = binder->addOperand<int32_t>();
+ binder->setOperand(top, _pad.top());
+ auto bottom = binder->addOperand<int32_t>();
+ binder->setOperand(bottom, _pad.bottom());
+
+ // Set horizontal/vertical stride
+ auto hstride = binder->addOperand<int32_t>();
+ binder->setOperand(hstride, _stride.horizontal());
+ auto vstride = binder->addOperand<int32_t>();
+ binder->setOperand(vstride, _stride.vertical());
+
+ // Set receptive field size
+ auto width = binder->addOperand<int32_t>();
+ binder->setOperand(width, _window.width());
+ auto height = binder->addOperand<int32_t>();
+ binder->setOperand(height, _window.height());
+
+ // Set fuse code
+ // TODO Suport operation fusion
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::AVG_POOL_2D,
+ {ifm, left, right, top, bottom, hstride, vstride, width, height, fuse},
+ {ofm});
+ }
+
+private:
+ coco::Padding2D _pad;
+ coco::Stride2D _stride;
+ coco::Window2D _window;
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNPadFAppender final : public ANNOpAppender
+{
+public:
+ void pad(const coco::Padding2D *pad) { _pad = *pad; }
+
+public:
+ void ifm(coco::FeatureObject *ifm) { _ifm = ifm; }
+ void ofm(coco::FeatureObject *ofm) { _ofm = ofm; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ using nncc::core::ADT::tensor::Shape;
+
+ auto ifm = binder->addOperand<float>(_ifm);
+ auto pad = binder->addOperand<int32_t>(Shape{4, 2});
+ {
+ std::vector<int32_t> values;
+ values.resize(8);
+ // For 'N'
+ values.at(0) = values.at(1) = 0;
+ // For 'H'
+ values.at(2) = _pad.top();
+ values.at(3) = _pad.bottom();
+ // For 'W'
+ values.at(4) = _pad.left();
+ values.at(5) = _pad.right();
+ // For 'C'
+ values.at(6) = values.at(7) = 0;
+
+ binder->setOperand(pad, values.begin(), values.end());
+ }
+
+ auto ofm = binder->addOperand<float>(_ofm);
+
+ binder->addOperation(ann::Operation::Code::PAD, {ifm, pad}, {ofm});
+ }
+
+private:
+ coco::Padding2D _pad;
+
+private:
+ coco::FeatureObject *_ifm = nullptr;
+ coco::FeatureObject *_ofm = nullptr;
+};
+
+class ANNOpFunctionalAppender final : public ANNOpAppender
+{
+public:
+ ANNOpFunctionalAppender(const Appender &fun) : _fun{fun}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void append(ANNBinder *binder) const { _fun(binder); }
+
+private:
+ Appender _fun;
+};
+
+class ANNSubAppender final : public ANNOpAppender
+{
+public:
+ void left(coco::FeatureObject *o) { _left = o; }
+ void right(coco::FeatureObject *o) { _right = o; }
+ void out(coco::FeatureObject *o) { _out = o; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto left = binder->addOperand<float>(_left);
+ auto right = binder->addOperand<float>(_right);
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto out = binder->addOperand<float>(_out);
+
+ binder->addOperation(ann::Operation::Code::SUB, {left, right, fuse}, {out});
+ }
+
+private:
+ coco::FeatureObject *_left = nullptr;
+ coco::FeatureObject *_right = nullptr;
+ coco::FeatureObject *_out = nullptr;
+};
+
+class ANNDivAppender final : public ANNOpAppender
+{
+public:
+ void left(coco::FeatureObject *o) { _left = o; }
+ void right(coco::FeatureObject *o) { _right = o; }
+ void out(coco::FeatureObject *o) { _out = o; }
+
+public:
+ void append(ANNBinder *binder) const override
+ {
+ auto left = binder->addOperand<float>(_left);
+ auto right = binder->addOperand<float>(_right);
+ auto fuse = binder->addOperand<int32_t>();
+ binder->setOperand(fuse, 0);
+
+ auto out = binder->addOperand<float>(_out);
+
+ binder->addOperation(ann::Operation::Code::DIV, {left, right, fuse}, {out});
+ }
+
+private:
+ coco::FeatureObject *_left = nullptr;
+ coco::FeatureObject *_right = nullptr;
+ coco::FeatureObject *_out = nullptr;
+};
+
+class ANNOpBuilder : public coco::Instr::Visitor<std::unique_ptr<ANNOpAppender>>
+{
+public:
+ std::unique_ptr<ANNOpAppender> visit(const coco::Eval *eval)
+ {
+ if (auto conv = eval->op()->asConv2D())
+ {
+ if (auto load = conv->arg()->asLoad())
+ {
+ auto sess = enco::session(eval->module());
+
+ auto ifm = load->object()->asFeature();
+ auto ker = conv->ker();
+ auto ofm = eval->out()->asFeature();
+
+ const auto group = conv->group();
+
+ if (group == 1)
+ {
+ auto app = make_unique<ANNConv2DAppender>();
+
+ app->session(sess);
+
+ app->pad(conv->pad());
+ app->stride(conv->stride());
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+ app->ker(ker);
+
+ return std::move(app);
+ }
+ else
+ {
+ assert(ifm->shape().depth() == group);
+ assert(ker->shape().count() % group == 0);
+ assert(ker->shape().depth() == 1);
+
+ auto app = make_unique<ANNDepthwiseConv2DAppender>();
+
+ app->session(sess);
+
+ app->multiplier(ker->shape().count() / group);
+ app->pad(conv->pad());
+ app->stride(conv->stride());
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+ app->ker(ker);
+
+ return std::move(app);
+ }
+ }
+ }
+ else if (auto op = eval->op()->asAdd())
+ {
+ auto left_load = op->left()->asLoad();
+ auto right_load = op->right()->asLoad();
+
+ if (left_load && right_load)
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(Add(Load(%left), Load(%right)))
+ //
+ auto left = left_load->object()->asFeature();
+ auto right = right_load->object()->asFeature();
+ assert(left != nullptr && right != nullptr);
+
+ auto out = eval->out()->asFeature();
+ assert(out != nullptr);
+
+ auto app = make_unique<ANNAddAppender>();
+
+ app->left(left);
+ app->right(right);
+ app->out(out);
+
+ return std::move(app);
+ }
+ }
+ else if (auto op = eval->op()->asMul())
+ {
+ auto left_load = op->left()->asLoad();
+ auto right_load = op->right()->asLoad();
+
+ if (left_load && right_load)
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(Mul(Load(%left), Load(%right)))
+ //
+ auto left = left_load->object()->asFeature();
+ auto right = right_load->object()->asFeature();
+ assert(left != nullptr && right != nullptr);
+
+ auto out = eval->out()->asFeature();
+ assert(out != nullptr);
+
+ auto app = make_unique<ANNMulAppender>();
+
+ app->left(left);
+ app->right(right);
+ app->out(out);
+
+ return std::move(app);
+ }
+ }
+ else if (auto op = eval->op()->asPadF())
+ {
+ if (auto load = op->arg()->asLoad())
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(PadF(Load(%ifm))
+ //
+ auto ifm = load->object()->asFeature();
+ auto ofm = eval->out()->asFeature();
+
+ assert(ifm != nullptr && ofm != nullptr);
+
+ auto app = make_unique<ANNPadFAppender>();
+
+ app->pad(op->pad());
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+
+ return std::move(app);
+ }
+ }
+ else if (auto maxpool = eval->op()->asMaxPool2D())
+ {
+ if (auto load = maxpool->arg()->asLoad())
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(MaxPool2D(Load(%ifm))
+ //
+ auto ifm = load->object()->asFeature();
+ auto ofm = eval->out()->asFeature();
+
+ assert(ifm != nullptr && ofm != nullptr);
+
+ auto app = make_unique<ANNMaxPool2DAppender>();
+
+ app->pad(maxpool->pad());
+ app->stride(maxpool->stride());
+ app->window(maxpool->window());
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+
+ return std::move(app);
+ }
+ }
+ else if (auto avgpool = eval->op()->asAvgPool2D())
+ {
+ if (auto load = avgpool->arg()->asLoad())
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(AvgPool2D(Load(%ifm))
+ //
+ if (avgpool->divisor() == coco::AvgPool2D::Divisor::PaddingExcluded)
+ {
+ // When ANN runtime computes the average of each receptive field,
+ // it uses the number of valid(=non-padding) elements as a divisor.
+ auto ifm = load->object()->asFeature();
+ auto ofm = eval->out()->asFeature();
+
+ assert(ifm != nullptr && ofm != nullptr);
+
+ auto app = make_unique<ANNAvgPool2DAppender>();
+
+ app->pad(avgpool->pad());
+ app->stride(avgpool->stride());
+ app->window(avgpool->window());
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+
+ return std::move(app);
+ }
+ }
+ }
+ else if (auto relu = eval->op()->asReLU())
+ {
+ if (auto load = relu->arg()->asLoad())
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(ReLU(Load(%ifm))
+ //
+ // TODO Support objects of other kinds, such as Tensor
+ auto ifm = load->object()->asFeature();
+ auto ofm = eval->out()->asFeature();
+
+ assert(ifm != nullptr && ofm != nullptr);
+
+ auto app = make_unique<ANNReLUAppender>();
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+
+ return std::move(app);
+ }
+ }
+ else if (auto relu6 = eval->op()->asReLU6())
+ {
+ if (auto load = relu6->arg()->asLoad())
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(ReLU6(Load(%ifm))
+ //
+ // TODO Support objects of other kinds, such as Tensor
+ auto ifm = load->object()->asFeature();
+ auto ofm = eval->out()->asFeature();
+
+ assert(ifm != nullptr && ofm != nullptr);
+
+ auto app = make_unique<ANNReLU6Appender>();
+
+ app->ifm(ifm);
+ app->ofm(ofm);
+
+ return std::move(app);
+ }
+ }
+ else if (auto op = eval->op()->asConcatF())
+ {
+ auto left_load = op->left()->asLoad();
+ auto right_load = op->right()->asLoad();
+
+ if (left_load && right_load && (op->axis() == coco::ConcatF::Axis::Depth))
+ {
+ // Let's compile the following code fragment:
+ //
+ // %ofm = eval(ConcatF(Depth, Load(%left), Load(%right)))
+ //
+ auto left = left_load->object()->asFeature();
+ auto right = right_load->object()->asFeature();
+ assert(left != nullptr && right != nullptr);
+
+ auto out = eval->out()->asFeature();
+ assert(out != nullptr);
+
+ auto app = make_unique<ANNConcatAppender>();
+
+ app->left(left);
+ app->right(right);
+ app->out(out);
+
+ return std::move(app);
+ }
+ }
+ else if (auto op = eval->op()->asSub())
+ {
+ auto left_load = op->left()->asLoad();
+ auto right_load = op->right()->asLoad();
+
+ if (left_load && right_load)
+ {
+ // Let's compile the following code fragment:
+ //
+ // %out = eval(Sub(Load(%left), Load(%right)))
+ //
+ auto left = left_load->object()->asFeature();
+ auto right = right_load->object()->asFeature();
+ assert(left != nullptr && right != nullptr);
+
+ auto out = eval->out()->asFeature();
+ assert(out != nullptr);
+
+ auto app = make_unique<ANNSubAppender>();
+
+ app->left(left);
+ app->right(right);
+ app->out(out);
+
+ return std::move(app);
+ }
+ }
+ else if (auto op = eval->op()->asDiv())
+ {
+ auto left_load = op->left()->asLoad();
+ auto right_load = op->right()->asLoad();
+
+ if (left_load && right_load)
+ {
+ // Let's compile the following code fragment:
+ //
+ // %out = eval(Div(Load(%left), Load(%right)))
+ //
+ auto left = left_load->object()->asFeature();
+ auto right = right_load->object()->asFeature();
+ assert(left != nullptr && right != nullptr);
+
+ auto out = eval->out()->asFeature();
+ assert(out != nullptr);
+
+ auto app = make_unique<ANNDivAppender>();
+
+ app->left(left);
+ app->right(right);
+ app->out(out);
+
+ return std::move(app);
+ }
+ }
+
+ // Return nullptr if a given Eval instruction is incompatible
+ return nullptr;
+ }
+
+public:
+ std::unique_ptr<ANNOpAppender> visit(const coco::Shuffle *) { return nullptr; }
+};
+
+namespace
+{
+
+std::unique_ptr<ANNOpAppender> make_appender(coco::Instr *ins)
+{
+ ANNOpBuilder op_builder;
+
+ if (auto eval = coco::safe_cast<coco::Eval>(ins))
+ {
+ return eval->accept(op_builder);
+ }
+
+ if (auto depth_concat = coco::safe_cast<ANNDepthConcatF>(ins))
+ {
+ auto app = make_unique<ANNConcatAppender>();
+
+ app->out(depth_concat->out()->asFeature());
+
+ app->left(depth_concat->fst()->asFeature());
+ app->right(depth_concat->snd()->asFeature());
+
+ return std::move(app);
+ }
+
+ // Build ANN IR from ANNConv2D instruction
+ if (auto conv2d = coco::safe_cast<ANNConv2D>(ins))
+ {
+ auto sess = enco::session(conv2d->module());
+ auto app = make_unique<ANNConv2DAppender>();
+
+ app->session(sess);
+
+ app->pad(conv2d->pad());
+ app->stride(conv2d->stride());
+
+ app->ofm(conv2d->ofm()->asFeature());
+ app->ifm(conv2d->ifm()->asFeature());
+ app->ker(conv2d->ker()->asKernel());
+ app->bias(coco::safe_cast<coco::FeatureObject>(conv2d->bias()));
+
+ return std::move(app);
+ }
+
+ return nullptr;
+}
+
+enum Compatibility
+{
+ COMPATIBLE,
+ INCOMPATIBLE
+};
+
+class ANNGroupBuilder
+{
+public:
+ ANNGroupBuilder(ANNContext *ctx) : _ctx{ctx}
+ {
+ // DO NOTHING
+ }
+
+public:
+ Compatibility kind(const coco::Block *blk) const;
+ Compatibility kind(const std::unique_ptr<ANNOpAppender> &appender) const;
+
+public:
+ void build(enco::Code *code) const;
+
+private:
+ ANNContext *_ctx;
+};
+
+Compatibility ANNGroupBuilder::kind(const std::unique_ptr<ANNOpAppender> &app) const
+{
+ return app ? COMPATIBLE : INCOMPATIBLE;
+}
+
+Compatibility ANNGroupBuilder::kind(const coco::Block *blk) const
+{
+ return (_ctx->find(blk) != nullptr) ? COMPATIBLE : INCOMPATIBLE;
+}
+
+void ANNGroupBuilder::build(enco::Code *code) const
+{
+ auto m = code->module();
+
+ // ANNGroupBuilder will construct a sequence of blocks from the original block sequence, and
+ // a destination block (that dst_blk points to) is the tail of the generated sequence.
+ coco::Block *dst_blk = nullptr;
+
+ auto append = [&](const Compatibility &t) {
+ auto blk = m->entity()->block()->create();
+
+ if (dst_blk == nullptr)
+ {
+ m->block()->prepend(blk);
+ }
+ else
+ {
+ blk->insertAfter(dst_blk);
+ }
+
+ dst_blk = blk;
+
+ if (COMPATIBLE == t)
+ {
+ _ctx->create(blk);
+ }
+ };
+
+ for (auto blk = m->block()->head(); blk;)
+ {
+ // Let's move instructions from a block of interest (referred to as source block) into
+ // a destination block
+ auto src_blk = blk;
+ blk = src_blk->next();
+ src_blk->detach();
+
+ for (auto ins = src_blk->instr()->head(); ins;)
+ {
+ auto cur_ins = ins;
+ ins = cur_ins->next();
+ cur_ins->detach();
+
+ auto cur_append = make_appender(cur_ins);
+
+ // Create a new compatible block and use it as a destination block if the current
+ // destination block is absent or incompatible with the instruction of intereset.
+ if ((dst_blk == nullptr) || (kind(cur_append) != kind(dst_blk)))
+ {
+ append(kind(cur_append));
+ }
+
+ assert(dst_blk != nullptr);
+ assert(kind(cur_append) == kind(dst_blk));
+
+ // Append ins to the dst_blk block
+ dst_blk->instr()->append(cur_ins);
+
+ if (cur_append)
+ {
+ // Update Android NN IR if the current instruction is compatible
+ auto binder = _ctx->find(dst_blk);
+ assert(binder != nullptr);
+ cur_append->append(binder);
+ }
+ }
+
+ // Destroy the source block
+ assert(src_blk->instr()->empty());
+ m->entity()->block()->destroy(src_blk);
+ }
+}
+
+} // namespace
+
+class ANNModuleBuilder
+{
+private:
+ std::set<coco::Bag *> inputs(ANNBinder *binder) const;
+ std::set<coco::Bag *> outputs(ANNBinder *binder) const;
+
+public:
+ void build(ANNContext *ann_ctx) const;
+};
+
+std::set<coco::Bag *> ANNModuleBuilder::inputs(ANNBinder *binder) const
+{
+ std::set<coco::Bag *> res;
+
+ for (auto bag : binder->bags())
+ {
+ auto u = enco::updaters(bag);
+ u.erase(binder->block());
+
+ /**
+ * A bag is the input of this block if
+ * 1. it is an input of the whole network, or
+ * 2. it is updated by preceding blocks during execution
+ */
+ if (bag->isInput() || (u.size() > 0))
+ {
+ res.insert(bag);
+ }
+ }
+
+ return res;
+}
+
+std::set<coco::Bag *> ANNModuleBuilder::outputs(ANNBinder *binder) const
+{
+ std::set<coco::Bag *> res;
+
+ for (auto bag : binder->bags())
+ {
+ auto u = enco::updaters(bag);
+ auto r = enco::readers(bag);
+ r.erase(binder->block());
+
+ /**
+ * Only a bag that this block updates can be the output of this block
+ */
+ if (u.find(binder->block()) == u.end())
+ {
+ continue;
+ }
+
+ /**
+ * A bag is the output of this block if
+ * 1. it is an output of the whole network, or
+ * 2. it is read by following blocks during execution
+ */
+ if (bag->isOutput() || (r.size() > 0))
+ {
+ res.insert(bag);
+ }
+ }
+
+ return res;
+}
+
+void ANNModuleBuilder::build(ANNContext *ann_ctx) const
+{
+ for (uint32_t n = 0; n < ann_ctx->count(); ++n)
+ {
+ auto binder = ann_ctx->nth(n);
+
+ // NOTE binder->module() returns an ANN IR module (not coco IR module)
+ auto m = binder->block()->module();
+ auto d = enco::data(m);
+
+ // Let's identify operands with initial values
+ for (auto bag : binder->bags())
+ {
+ if (binder->associated(bag) && d->allocated(bag))
+ {
+ // TODO Support other datatype
+ auto span = d->f32()->weight(bag);
+ assert(span.data() != nullptr);
+
+ binder->setOperand(binder->operand(bag), span.data(), span.data() + span.size());
+ }
+ }
+
+ // Let's identify input/output bags
+ binder->identifyInputs(inputs(binder));
+ binder->identifyOutputs(outputs(binder));
+ }
+}
+
+} // namespace
+
+namespace
+{
+
+class SplitPass
+{
+public:
+ void runOnCode(enco::Code *code) const;
+};
+
+void SplitPass::runOnCode(enco::Code *code) const
+{
+ auto ann_ctx = make_unique<ANNContext>();
+
+ ANNGroupBuilder group_builder{ann_ctx.get()};
+ group_builder.build(code);
+
+ ANNModuleBuilder module_builder;
+ module_builder.build(ann_ctx.get());
+
+ _subnet_contexts[code->module()] = std::move(ann_ctx);
+}
+
+} // namespace
+
+namespace enco
+{
+
+void split_into_phases(enco::Code *code)
+{
+ SplitPass split;
+ split.runOnCode(code);
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Transforms/Split.h b/compiler/enco/core/src/Transforms/Split.h
new file mode 100644
index 000000000..b4e1d7baf
--- /dev/null
+++ b/compiler/enco/core/src/Transforms/Split.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SPLIT_H__
+#define __SPLIT_H__
+
+#include "Code.h"
+#include "Pass.h"
+
+namespace enco
+{
+
+struct SubnetManager
+{
+ static const ANNContext *context(const coco::Module *m);
+};
+
+/**
+ * @brief Split instructions into a set of phases
+ */
+void split_into_phases(enco::Code *code);
+
+struct PhaseConstructionPass final : public Pass
+{
+ PASS_CTOR(PhaseConstructionPass)
+ {
+ // DO NOTHING
+ }
+
+ void run(const SessionID &sess) const override { split_into_phases(code(sess)); }
+};
+
+} // namespace enco;
+
+#endif // __SPLIT_H__
diff --git a/compiler/enco/core/src/Usage.cpp b/compiler/enco/core/src/Usage.cpp
new file mode 100644
index 000000000..92ccba5a0
--- /dev/null
+++ b/compiler/enco/core/src/Usage.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Usage.h"
+
+namespace enco
+{
+
+std::set<coco::Block *> readers(const coco::Bag *bag)
+{
+ std::set<coco::Block *> res;
+
+ for (auto read : coco::readers(bag))
+ {
+ assert(read != nullptr);
+ auto instr = read->loc();
+ assert(instr != nullptr);
+ auto block = instr->parent();
+ assert(block != nullptr);
+
+ res.insert(block);
+ }
+
+ return res;
+}
+
+std::set<coco::Block *> updaters(const coco::Bag *bag)
+{
+ std::set<coco::Block *> res;
+
+ for (auto update : coco::updaters(bag))
+ {
+ assert(update != nullptr);
+ auto instr = update->loc();
+ assert(instr != nullptr);
+ auto block = instr->parent();
+ assert(block != nullptr);
+
+ res.insert(block);
+ }
+
+ return res;
+}
+
+} // namespace enco
diff --git a/compiler/enco/core/src/Usage.h b/compiler/enco/core/src/Usage.h
new file mode 100644
index 000000000..8fa05f9b9
--- /dev/null
+++ b/compiler/enco/core/src/Usage.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_USAGE_H__
+#define __ENCO_USAGE_H__
+
+#include "coco/IR.h"
+
+#include <set>
+
+namespace enco
+{
+
+/// @brief Returns the set of blocks that reads a given bag
+std::set<coco::Block *> readers(const coco::Bag *bag);
+/// @brief Return the set of blocks that updates a given bag
+std::set<coco::Block *> updaters(const coco::Bag *bag);
+
+} // namespace enco
+
+#endif // __ENCO_USAGE_H__
diff --git a/compiler/enco/core/src/coex/IR.h b/compiler/enco/core/src/coex/IR.h
new file mode 100644
index 000000000..e81943f18
--- /dev/null
+++ b/compiler/enco/core/src/coex/IR.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCO_COEX_IR_H__
+#define __ENCO_COEX_IR_H__
+
+#include <coco/IR.h>
+
+/**
+ * @brief 2D Convolution through Andoird NN API
+ *
+ * TODO Support FusedActivation
+ */
+class ANNConv2D : public coco::Instr, public coco::Object::Producer, public coco::Object::Consumer
+{
+public:
+ ANNConv2D() : _ofm{this}, _ifm{this}, _ker{this}, _bias{this}
+ {
+ // DO NOTHING
+ }
+
+public:
+ coco::Instr *loc(void) override { return this; }
+
+public:
+ coco::Object *ofm(void) const { return _ofm.value(); }
+ void ofm(coco::Object *o) { _ofm.value(o); }
+
+ coco::Object *ifm(void) const { return _ifm.value(); }
+ void ifm(coco::Object *o) { _ifm.value(o); }
+
+ coco::Object *ker(void) const { return _ker.value(); }
+ void ker(coco::Object *o) { _ker.value(o); }
+
+ /**
+ * Currently, this "bias" is a Feature object with channel-wise layout
+ *
+ * NOTE This design is subject to change
+ */
+ coco::Object *bias(void) const { return _bias.value(); }
+ void bias(coco::Object *o) { _bias.value(o); }
+
+public:
+ coco::Padding2D *pad(void) { return &_pad; }
+ const coco::Padding2D *pad(void) const { return &_pad; }
+
+ coco::Stride2D *stride(void) { return &_stride; }
+ const coco::Stride2D *stride(void) const { return &_stride; }
+
+private:
+ coco::Def _ofm;
+
+ coco::Use _ifm;
+ coco::Use _ker;
+ coco::Use _bias;
+
+private:
+ coco::Padding2D _pad;
+ coco::Stride2D _stride;
+};
+
+/**
+ * @brief Concatenate feature maps along "depth" dimension through Andoird NN API
+ */
+class ANNDepthConcatF : public coco::Instr,
+ public coco::Object::Producer,
+ public coco::Object::Consumer
+{
+public:
+ ANNDepthConcatF() : _out{this}, _fst{this}, _snd{this}
+ {
+ // DO NOTHING
+ }
+
+public:
+ coco::Instr *loc(void) override { return this; }
+
+public:
+ coco::Object *out(void) const { return _out.value(); }
+ void out(coco::Object *o) { _out.value(o); }
+
+ coco::Object *fst(void) const { return _fst.value(); }
+ void fst(coco::Object *o) { _fst.value(o); }
+
+ coco::Object *snd(void) const { return _snd.value(); }
+ void snd(coco::Object *o) { _snd.value(o); }
+
+private:
+ coco::Def _out;
+
+ // TODO Support variadic-length inputs
+ coco::Use _fst;
+ coco::Use _snd;
+};
+
+#endif // __ENCO_COEX_IR_H__
diff --git a/compiler/enco/core/src/coex/IR.test.cpp b/compiler/enco/core/src/coex/IR.test.cpp
new file mode 100644
index 000000000..e20cbe4fd
--- /dev/null
+++ b/compiler/enco/core/src/coex/IR.test.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IR.h"
+
+#include <gtest/gtest.h>
+
+TEST(IRTest, ANNConv2D_default_constructor)
+{
+ ANNConv2D ins;
+
+ ASSERT_EQ(ins.ofm(), nullptr);
+ ASSERT_EQ(ins.ifm(), nullptr);
+ ASSERT_EQ(ins.ker(), nullptr);
+ ASSERT_EQ(ins.bias(), nullptr);
+}
+
+TEST(IRTest, ANNDepthConcatF_default_constructor)
+{
+ ANNDepthConcatF ins;
+
+ ASSERT_EQ(ins.out(), nullptr);
+ ASSERT_EQ(ins.fst(), nullptr);
+ ASSERT_EQ(ins.snd(), nullptr);
+}
diff --git a/compiler/enco/frontend/CMakeLists.txt b/compiler/enco/frontend/CMakeLists.txt
new file mode 100644
index 000000000..5ea6cdadd
--- /dev/null
+++ b/compiler/enco/frontend/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectories()
diff --git a/compiler/enco/frontend/caffe/CMakeLists.txt b/compiler/enco/frontend/caffe/CMakeLists.txt
new file mode 100644
index 000000000..ce43a41d3
--- /dev/null
+++ b/compiler/enco/frontend/caffe/CMakeLists.txt
@@ -0,0 +1,39 @@
+nnas_find_package(CaffeProto QUIET)
+
+if(NOT CaffeProto_FOUND)
+ return()
+endif(NOT CaffeProto_FOUND)
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+add_library(enco_caffe_frontend SHARED ${SOURCES})
+target_include_directories(enco_caffe_frontend PRIVATE src)
+target_link_libraries(enco_caffe_frontend coco_core)
+target_link_libraries(enco_caffe_frontend coco_generic)
+target_link_libraries(enco_caffe_frontend enco_intf_frontend)
+target_link_libraries(enco_caffe_frontend enco_intf_cmdline)
+target_link_libraries(enco_caffe_frontend morph)
+target_link_libraries(enco_caffe_frontend caffeproto)
+target_link_libraries(enco_caffe_frontend stdex)
+
+nnas_find_package(GTest QUIET)
+
+if(NOT GTest_FOUND)
+ return()
+endif(NOT GTest_FOUND)
+
+nnas_find_package(Caffe QUIET)
+
+if(NOT Caffe_FOUND)
+ return()
+endif(NOT Caffe_FOUND)
+
+add_executable(enco_caffe_frontend_test ${TESTS})
+target_include_directories(enco_caffe_frontend_test PRIVATE src)
+target_link_libraries(enco_caffe_frontend_test gtest_main)
+target_link_libraries(enco_caffe_frontend_test enco_caffe_frontend)
+target_link_libraries(enco_caffe_frontend_test morph)
+target_link_libraries(enco_caffe_frontend_test caffe)
+add_test(enco_caffe_frontend_test enco_caffe_frontend_test)
diff --git a/compiler/enco/frontend/caffe/src/ConcatSpec.cpp b/compiler/enco/frontend/caffe/src/ConcatSpec.cpp
new file mode 100644
index 000000000..b83a1f902
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConcatSpec.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConcatSpec.h"
+
+#include <cassert>
+
+using namespace nncc::core::ADT::tensor;
+
+nncc::core::ADT::tensor::Shape ConcatSpec::forward(const ShapeList &inputs) const
+{
+ assert(inputs.size() > 0);
+
+ Shape output_shape = inputs.at(0);
+
+ for (uint32_t n = 1; n < inputs.size(); ++n)
+ {
+ // The current implementation assumes that "inputs" is well-formed
+ // TODO Verify whether "inputs" is really well-formed
+ const auto &input_shape = inputs.at(n);
+ output_shape.dim(_axis) += input_shape.dim(_axis);
+ }
+
+ return output_shape;
+}
+
+ConcatSpec concat_spec(uint32_t axis) { return ConcatSpec{axis}; }
diff --git a/compiler/enco/frontend/caffe/src/ConcatSpec.h b/compiler/enco/frontend/caffe/src/ConcatSpec.h
new file mode 100644
index 000000000..cc636c778
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConcatSpec.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONCAT_SPEC_H__
+#define __CONCAT_SPEC_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <vector>
+
+using ShapeList = std::vector<nncc::core::ADT::tensor::Shape>;
+
+class ConcatSpec
+{
+public:
+ explicit ConcatSpec(uint32_t axis) : _axis{axis}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Return the output shape when inputs of given shape are
+ * concatenated along _axis
+ */
+ nncc::core::ADT::tensor::Shape forward(const ShapeList &) const;
+
+private:
+ uint32_t _axis;
+};
+
+ConcatSpec concat_spec(uint32_t axis);
+
+#endif // __CONCAT_SPEC_H__
diff --git a/compiler/enco/frontend/caffe/src/ConcatSpec.test.cpp b/compiler/enco/frontend/caffe/src/ConcatSpec.test.cpp
new file mode 100644
index 000000000..1cb2ea5af
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConcatSpec.test.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConcatSpec.h"
+
+#include <gtest/gtest.h>
+
+using nncc::core::ADT::tensor::Shape;
+
+namespace
+{
+class ConcatSpecTest : public ::testing::Test
+{
+ // FOR FUTURE USE
+};
+} // namespace
+
+TEST_F(ConcatSpecTest, ifm_shape)
+{
+ const Shape in_1{1, 1, 4, 4};
+ const Shape in_2{1, 2, 4, 4};
+ const Shape in_3{1, 3, 4, 4};
+ const Shape in_4{1, 4, 4, 4};
+
+ auto expected = Shape{1, 10, 4, 4};
+ auto obtained = concat_spec(1).forward({in_1, in_2, in_3, in_4});
+
+ ASSERT_EQ(expected, obtained);
+}
diff --git a/compiler/enco/frontend/caffe/src/Context.cpp b/compiler/enco/frontend/caffe/src/Context.cpp
new file mode 100644
index 000000000..9f7204b25
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Context.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @note: This cpp file exist to check compilation integrity
+ */
+
+#include "Context.h"
diff --git a/compiler/enco/frontend/caffe/src/Context.h b/compiler/enco/frontend/caffe/src/Context.h
new file mode 100644
index 000000000..aca57ce6f
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Context.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <coco/IR.h>
+#include <coco/IR/Data.h>
+
+#include <cassert>
+#include <map>
+#include <string>
+
+namespace caffeimport
+{
+
+using LayerName = std::string;
+using BlobName = std::string;
+// Note: these two maybe evolved to a class
+using ShapeContext = std::map<BlobName, nncc::core::ADT::tensor::Shape>;
+using StoreContext = std::map<BlobName, coco::Bag *>;
+
+class WeightContext
+{
+public:
+ WeightContext(::caffe::NetParameter *caffemodel) : _caffemodel(caffemodel)
+ {
+ for (uint32_t n = 0; n < _caffemodel->layer_size(); ++n)
+ {
+ auto layer = _caffemodel->mutable_layer(n);
+
+ if (layer->has_name())
+ {
+ _data[layer->name()] = layer;
+ }
+ }
+ }
+
+public:
+ int blob_count(const LayerName &name)
+ {
+ if (_data.find(name) != _data.end())
+ return _data.at(name)->blobs_size();
+
+ assert(false);
+ return 0;
+ }
+
+ ::caffe::BlobProto *blob_get(const LayerName &name, uint32_t n)
+ {
+ if (_data.find(name) != _data.end())
+ return _data.at(name)->mutable_blobs(n);
+
+ assert(false);
+ return nullptr;
+ };
+
+private:
+ ::caffe::NetParameter *_caffemodel;
+ std::map<LayerName, ::caffe::LayerParameter *> _data;
+};
+
+class GraphBuilderContext
+{
+public:
+ explicit GraphBuilderContext(coco::Module *module, coco::Data *data, coco::Block *block,
+ ShapeContext &shape_ctx, StoreContext &bag_ctx,
+ WeightContext &weight_ctx)
+ : _module(module), _data(data), _block(block), _shape_ctx(shape_ctx), _bag_ctx(bag_ctx),
+ _weight_ctx(weight_ctx)
+ {
+ // DO NOTHING
+ }
+
+ GraphBuilderContext(const GraphBuilderContext &) = delete;
+ GraphBuilderContext(GraphBuilderContext &&) = delete;
+
+public:
+ coco::Module *module() { return _module; }
+ coco::Data *data() { return _data; }
+ coco::Block *block() { return _block; }
+ ShapeContext &shape_ctx() { return _shape_ctx; }
+ StoreContext &bag_ctx() { return _bag_ctx; }
+ WeightContext &weight_ctx() { return _weight_ctx; }
+
+private:
+ coco::Module *_module;
+ coco::Data *_data;
+ coco::Block *_block;
+ ShapeContext &_shape_ctx;
+ StoreContext &_bag_ctx;
+ WeightContext &_weight_ctx;
+};
+
+} // namespace caffeimport
+
+#endif // __CONTEXT_H__
diff --git a/compiler/enco/frontend/caffe/src/Convert.cpp b/compiler/enco/frontend/caffe/src/Convert.cpp
new file mode 100644
index 000000000..d697b1bd8
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Convert.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+using namespace nncc::core::ADT;
+
+namespace caffeimport
+{
+
+tensor::Shape as_tensor_shape(const ::caffe::BlobShape &blob_shape)
+{
+ const uint32_t rank = blob_shape.dim_size();
+
+ tensor::Shape res;
+
+ res.resize(rank);
+
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = blob_shape.dim(axis);
+ }
+
+ return res;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Convert.h b/compiler/enco/frontend/caffe/src/Convert.h
new file mode 100644
index 000000000..9f6f9f104
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Convert.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVERT_H__
+#define __CONVERT_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <caffe/proto/caffe.pb.h>
+
+namespace caffeimport
+{
+
+nncc::core::ADT::tensor::Shape as_tensor_shape(const ::caffe::BlobShape &blob_shape);
+
+inline nncc::core::ADT::tensor::Shape as_tensor_shape(const ::caffe::BlobProto *blob_proto)
+{
+ return as_tensor_shape(blob_proto->shape());
+}
+
+} // namespace caffeimport
+
+#endif // __CONVERT_H__
diff --git a/compiler/enco/frontend/caffe/src/ConvolutionSpec.cpp b/compiler/enco/frontend/caffe/src/ConvolutionSpec.cpp
new file mode 100644
index 000000000..e13ada836
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConvolutionSpec.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvolutionSpec.h"
+#include "PaddingUtils.h"
+#include "ShapeQuery.h"
+
+#include <cassert>
+
+ConvolutionSpec::ConvolutionSpec(const ::caffe::ConvolutionParameter &param) : _param(param)
+{
+ // NOTE Dilation is not supported, yet
+ // TODO Support dilation
+ assert(param.dilation().size() == 0);
+}
+
+uint32_t ConvolutionSpec::group(void) const { return _param.group(); }
+
+uint32_t ConvolutionSpec::channel_axis(void) const
+{
+ return query_on(ifm_shape()).axis(axis_specifier(_param.axis()));
+}
+
+uint32_t ConvolutionSpec::pad(uint32_t spatial_axis) const
+{
+ assert(spatial_axis < num_spatial_axes());
+
+ auto raw_padding = build_raw_padding().with(_param);
+ auto spatial_padding = build_spatial_padding(num_spatial_axes()).with(raw_padding);
+
+ return spatial_padding.value(spatial_axis);
+}
+
+uint32_t ConvolutionSpec::stride(uint32_t spatial_axis) const
+{
+ assert(spatial_axis < num_spatial_axes());
+
+ // TODO Support stride_h/stride_w parameters
+ assert(!_param.has_stride_h());
+ assert(!_param.has_stride_w());
+
+ if (_param.stride().size() == 0)
+ {
+ // NOTE default stride is 1
+ return 1;
+ }
+
+ if (_param.stride().size() == 1)
+ {
+ return _param.stride(0);
+ }
+
+ assert(_param.stride().size() == num_spatial_axes());
+ return _param.stride(spatial_axis);
+}
+
+uint32_t ConvolutionSpec::ker_dim(uint32_t spatial_axis) const
+{
+ assert(spatial_axis < num_spatial_axes());
+ if (_param.kernel_size().size() == 0)
+ {
+ if (_param.has_kernel_h() && (spatial_axis == 0))
+ {
+ assert(num_spatial_axes() == 2);
+ return _param.kernel_h();
+ }
+
+ if (_param.has_kernel_w() && (spatial_axis == 1))
+ {
+ assert(num_spatial_axes() == 2);
+ return _param.kernel_w();
+ }
+
+ return 0;
+ }
+
+ assert(!_param.has_kernel_h());
+ assert(!_param.has_kernel_w());
+ if (_param.kernel_size().size() == 1)
+ {
+ return _param.kernel_size(0);
+ }
+ else
+ {
+ assert(_param.kernel_size().size() == num_spatial_axes());
+ return _param.kernel_size(spatial_axis);
+ }
+}
+
+nncc::core::ADT::tensor::Shape ConvolutionSpec::ker_shape(void) const
+{
+ nncc::core::ADT::tensor::Shape res;
+
+ res.resize(2 + num_spatial_axes());
+
+ res.dim(0) = ker_count();
+ assert(ifm_dim(channel_axis()) % group() == 0);
+ res.dim(1) = ifm_dim(channel_axis()) / group();
+ for (uint32_t axis = 0; axis < num_spatial_axes(); ++axis)
+ {
+ res.dim(2 + axis) = ker_dim(axis);
+ }
+
+ return res;
+}
+
+nncc::core::ADT::tensor::Shape ConvolutionSpec::ofm_shape(void) const
+{
+ nncc::core::ADT::tensor::Shape res;
+
+ res.resize(num_batch_axes() + 1 + num_spatial_axes());
+
+ for (uint32_t axis = 0; axis < num_batch_axes(); ++axis)
+ {
+ res.dim(axis) = ifm_dim(axis);
+ }
+
+ res.dim(num_batch_axes()) = ker_count();
+
+ for (uint32_t spatial_axis = 0; spatial_axis < num_spatial_axes(); ++spatial_axis)
+ {
+ const uint32_t full_axis = num_batch_axes() + 1 + spatial_axis;
+
+ uint32_t dim = 0;
+
+ dim += ifm_dim(full_axis) - ker_dim(spatial_axis) + 2 * pad(spatial_axis);
+ dim /= stride(spatial_axis);
+ dim += 1;
+
+ res.dim(full_axis) = dim;
+ }
+
+ return res;
+}
diff --git a/compiler/enco/frontend/caffe/src/ConvolutionSpec.h b/compiler/enco/frontend/caffe/src/ConvolutionSpec.h
new file mode 100644
index 000000000..c5c7c9024
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConvolutionSpec.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVOLUTION_SPEC_H__
+#define __CONVOLUTION_SPEC_H__
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+class ConvolutionSpec
+{
+public:
+ ConvolutionSpec(const ::caffe::ConvolutionParameter &param);
+
+public:
+ uint32_t ifm_rank(void) const { return _ifm_shape.rank(); }
+ uint32_t ifm_dim(uint32_t axis) const { return _ifm_shape.dim(axis); }
+
+ uint32_t group(void) const;
+
+ uint32_t channel_axis(void) const;
+
+ uint32_t num_batch_axes(void) const { return channel_axis(); }
+ uint32_t num_spatial_axes(void) const { return ifm_rank() - channel_axis() - 1; }
+
+ uint32_t pad(uint32_t spatial_axis) const;
+ uint32_t stride(uint32_t spatial_axis) const;
+ uint32_t ker_dim(uint32_t spatial_axis) const;
+
+public:
+ const nncc::core::ADT::tensor::Shape &ifm_shape(void) const { return _ifm_shape; }
+ void ifm_shape(const nncc::core::ADT::tensor::Shape &shape) { _ifm_shape = shape; }
+
+public:
+ uint32_t ker_count(void) const { return _param.num_output(); }
+ nncc::core::ADT::tensor::Shape ker_shape(void) const;
+
+public:
+ nncc::core::ADT::tensor::Shape ofm_shape(void) const;
+
+private:
+ const ::caffe::ConvolutionParameter &_param;
+ nncc::core::ADT::tensor::Shape _ifm_shape;
+};
+#endif // __CONVOLUTION_SPEC_H__
diff --git a/compiler/enco/frontend/caffe/src/ConvolutionSpec.test.cpp b/compiler/enco/frontend/caffe/src/ConvolutionSpec.test.cpp
new file mode 100644
index 000000000..02670b0cc
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ConvolutionSpec.test.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvolutionSpec.h"
+#include "Importer.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <caffe/net.hpp>
+
+#include <sstream>
+#include <stdexcept>
+
+#include <gtest/gtest.h>
+
+using namespace nncc::core::ADT;
+
+#define STRING(content) #content
+
+namespace
+{
+class ConvolutionSpecTest : public ::testing::Test
+{
+protected:
+ tensor::Shape as_tensor_shape(const std::vector<int> &dims)
+ {
+ const uint32_t rank = dims.size();
+
+ tensor::Shape res;
+
+ res.resize(rank);
+
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = dims.at(axis);
+ }
+
+ return res;
+ }
+
+ bool load(const std::string &prototxt, ::caffe::NetParameter &param)
+ {
+ std::stringstream ss{prototxt};
+
+ return from_txt(ss, param);
+ }
+};
+} // namespace
+
+TEST_F(ConvolutionSpecTest, ifm_shape)
+{
+ ::caffe::ConvolutionParameter param;
+ ConvolutionSpec spec{param};
+
+ const tensor::Shape ifm_shape{1, 3, 244, 244};
+
+ spec.ifm_shape(ifm_shape);
+
+ ASSERT_EQ(spec.ifm_shape(), ifm_shape);
+ ASSERT_EQ(spec.num_batch_axes(), 1);
+ ASSERT_EQ(spec.num_spatial_axes(), 2);
+}
+
+namespace
+{
+// clang-format off
+const char *conv_0 = STRING(
+layer {
+ name: "data"
+ type : "Input"
+ top : "data"
+ input_param { shape: { dim: 1 dim : 3 dim : 244 dim : 244 } }
+}
+layer{
+ name : "conv"
+ type : "Convolution"
+ bottom : "data"
+ top : "conv"
+ convolution_param {
+ bias_term : false
+ num_output : 1
+ kernel_size : 1
+ }
+});
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_0)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_0, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 244, 244};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'ker_shape'
+ {
+ auto expected = as_tensor_shape(net.layer_by_name("conv")->blobs().at(0)->shape());
+ auto obtained = spec.ker_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+namespace
+{
+// clang-format off
+const char *conv_1 = STRING(
+layer {
+ name: "data"
+ type : "Input"
+ top : "data"
+ input_param { shape: { dim: 1 dim : 3 dim : 244 dim : 244 } }
+}
+layer{
+ name : "conv"
+ type : "Convolution"
+ bottom : "data"
+ top : "conv"
+ convolution_param {
+ bias_term : false
+ num_output : 1
+ kernel_size : 1
+ kernel_size : 3
+ }
+});
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_1)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_1, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 244, 244};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'ker_shape'
+ {
+ auto expected = as_tensor_shape(net.layer_by_name("conv")->blobs().at(0)->shape());
+ auto obtained = spec.ker_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+namespace
+{
+// NOTE This example is derived from conv1_3x3_s2 layer in reference inception v3 layer
+// clang-format off
+const char *conv_2 = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 299 dim: 299 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 2
+ stride: 2
+ kernel_size: 3
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_2)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_2, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 299, 299};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'stride'
+ ASSERT_EQ(spec.stride(0), 2);
+ ASSERT_EQ(spec.stride(1), 2);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+namespace
+{
+// clang-format off
+const char *conv_pad = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 2
+ pad: 2
+ kernel_size: 3
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_pad)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_pad, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'pad'
+ ASSERT_EQ(spec.pad(0), 2);
+ ASSERT_EQ(spec.pad(1), 2);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+namespace
+{
+// clang-format off
+const char *conv_ker_hw = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 2
+ kernel_h: 3
+ kernel_w: 1
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_ker_hw)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_ker_hw, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'pad'
+ ASSERT_EQ(spec.ker_dim(0), 3);
+ ASSERT_EQ(spec.ker_dim(1), 1);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+namespace
+{
+// clang-format off
+const char *dconv = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 3
+ kernel_size: 3
+ group: 3
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, dconv)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(dconv, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'ker_shape'
+ {
+ auto expected = as_tensor_shape(net.layer_by_name("conv")->blobs().at(0)->shape());
+ auto obtained = spec.ker_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
diff --git a/compiler/enco/frontend/caffe/src/Entry.cpp b/compiler/enco/frontend/caffe/src/Entry.cpp
new file mode 100644
index 000000000..2bdb73eac
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Entry.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "Importer.h"
+
+#include <cmdline/View.h>
+
+#include <stdex/Memory.h>
+
+#include <fstream>
+#include <cassert>
+
+extern "C" std::unique_ptr<enco::Frontend> make_frontend(const cmdline::View &cmdline)
+{
+ assert(cmdline.size() == 2);
+
+ auto frontend = stdex::make_unique<Frontend>();
+
+ // Fill prototxt
+ {
+ std::ifstream ifs{cmdline.at(0)};
+ if (!ifs.is_open())
+ {
+ throw std::runtime_error("Prototxt file open fail");
+ }
+
+ if (!from_txt(ifs, *frontend->prototxt()))
+ {
+ throw std::runtime_error("Filling prototxt fail");
+ }
+ }
+
+ // Fill caffemodel
+ {
+ std::ifstream ifs{cmdline.at(1), std::ios::binary};
+ if (!ifs.is_open())
+ {
+ throw std::runtime_error("Caffemodel file open fail");
+ }
+
+ if (!from_bin(ifs, *frontend->caffemodel()))
+ {
+ throw std::runtime_error("Filling caffemodel fail");
+ }
+ }
+
+ return std::move(frontend);
+}
diff --git a/compiler/enco/frontend/caffe/src/Frontend.cpp b/compiler/enco/frontend/caffe/src/Frontend.cpp
new file mode 100644
index 000000000..7d2b3d36c
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Frontend.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "Context.h"
+#include "GraphBuilderRegistry.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+#include <map>
+#include <set>
+#include <string>
+
+#include <cassert>
+#include <stdexcept>
+
+using namespace nncc::core::ADT;
+
+using tensor::LexicalLayout;
+
+Frontend::Frontend() : _prototxt{new ::caffe::NetParameter}, _caffemodel{new ::caffe::NetParameter}
+{
+ // DO NOTHING
+}
+
+enco::Bundle Frontend::load(void) const
+{
+ auto module = coco::Module::create();
+ auto blk = module->entity()->block()->create();
+ module->block()->append(blk);
+
+ auto data = coco::Data::create();
+
+ // For weight access
+ caffeimport::WeightContext weight_ctx(_caffemodel.get());
+
+ // For inter-layer communication
+ std::map<std::string, tensor::Shape> shape_ctx;
+ std::map<std::string, coco::Bag *> bag_ctx;
+
+ std::set<std::string> bags;
+ std::map<std::string, uint32_t> def_count;
+ std::map<std::string, uint32_t> use_count;
+
+ auto def = [&bags, &def_count, &use_count](const std::string &name) {
+ if (bags.find(name) == bags.end())
+ {
+ bags.insert(name);
+ def_count[name] = 0;
+ use_count[name] = 0;
+ }
+
+ def_count.at(name) += 1;
+ };
+
+ auto use = [&use_count](const std::string &name) { use_count.at(name) += 1; };
+
+ auto outputs = [&bags, &def_count, &use_count](void) {
+ std::set<std::string> res;
+
+ for (const auto &bag : bags)
+ {
+ if (def_count.at(bag) > use_count.at(bag))
+ {
+ res.insert(bag);
+ }
+ }
+
+ return res;
+ };
+
+ caffeimport::GraphBuilderContext opbuilder_context(module.get(), data.get(), blk, shape_ctx,
+ bag_ctx, weight_ctx);
+
+ for (const auto &layer : _prototxt->layer())
+ {
+ assert(layer.has_name());
+ assert(layer.has_type());
+
+ for (uint32_t n = 0; n < layer.top().size(); ++n)
+ {
+ def(layer.top(n));
+ }
+
+ for (uint32_t n = 0; n < layer.bottom().size(); ++n)
+ {
+ use(layer.bottom(n));
+ }
+
+ if (const auto *graph_builder = caffeimport::GraphBuilderRegistry::get().lookup(layer.type()))
+ {
+ graph_builder->build(layer, &opbuilder_context);
+ }
+ else
+ {
+ throw std::runtime_error{"Not supported: " + layer.type()};
+ }
+ }
+
+ // Finalize: Create output for each top blob
+ for (const auto &name : outputs())
+ {
+ const auto &shape = shape_ctx.at(name);
+ auto bag = bag_ctx.at(name);
+
+ auto output = module->entity()->output()->create(shape);
+
+ output->bag(bag);
+ output->name(name);
+ output->reorder<LexicalLayout>();
+
+ module->output()->insert(output);
+ }
+
+ enco::Bundle bundle;
+
+ bundle.module(std::move(module));
+ bundle.data(std::move(data));
+
+ return std::move(bundle);
+}
diff --git a/compiler/enco/frontend/caffe/src/Frontend.h b/compiler/enco/frontend/caffe/src/Frontend.h
new file mode 100644
index 000000000..34fe90eba
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Frontend.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FRONTEND_H__
+#define __FRONTEND_H__
+
+#include <enco/Frontend.h>
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <memory>
+
+class Frontend final : public enco::Frontend
+{
+public:
+ Frontend();
+
+public:
+ ::caffe::NetParameter *prototxt(void) { return _prototxt.get(); }
+ ::caffe::NetParameter *caffemodel(void) { return _caffemodel.get(); }
+
+public:
+ enco::Bundle load(void) const override;
+
+private:
+ std::unique_ptr<::caffe::NetParameter> _prototxt;
+ std::unique_ptr<::caffe::NetParameter> _caffemodel;
+};
+
+#endif // __FRONTEND_H__
diff --git a/compiler/enco/frontend/caffe/src/GraphBuilder.cpp b/compiler/enco/frontend/caffe/src/GraphBuilder.cpp
new file mode 100644
index 000000000..18ba10c08
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/GraphBuilder.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @note: This cpp file exist to check compilation integrity
+ */
+
+#include "GraphBuilder.h"
diff --git a/compiler/enco/frontend/caffe/src/GraphBuilder.h b/compiler/enco/frontend/caffe/src/GraphBuilder.h
new file mode 100644
index 000000000..04adb96f4
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/GraphBuilder.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_H__
+#define __GRAPH_BUILDER_H__
+
+#include "Context.h"
+
+#include <caffe/proto/caffe.pb.h>
+
+namespace caffeimport
+{
+
+class GraphBuilder
+{
+public:
+ virtual void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const = 0;
+ virtual ~GraphBuilder() {}
+};
+
+} // namespace caffeimport
+
+#endif // __GRAPH_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.cpp b/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.cpp
new file mode 100644
index 000000000..e9db31177
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GraphBuilderRegistry.h"
+
+#include "Layer/Concatenation.h"
+#include "Layer/Convolution.h"
+#include "Layer/Eltwise.h"
+#include "Layer/Input.h"
+#include "Layer/Pooling.h"
+#include "Layer/ReLU.h"
+#include "Layer/Scale.h"
+#include "Layer/BatchNorm.h"
+
+#include <stdex/Memory.h>
+
+using stdex::make_unique;
+
+namespace caffeimport
+{
+
+GraphBuilderRegistry::GraphBuilderRegistry()
+{
+ _builder_map["Concat"] = make_unique<ConcatBuilder>();
+ _builder_map["Convolution"] = make_unique<ConvolutionBuilder>();
+ _builder_map["Eltwise"] = make_unique<EltwiseBuilder>();
+ _builder_map["Input"] = make_unique<InputBuilder>();
+ _builder_map["Pooling"] = make_unique<PoolingBuilder>();
+ _builder_map["ReLU"] = make_unique<ReLUBuilder>();
+ _builder_map["Scale"] = make_unique<ScaleBuilder>();
+ _builder_map["BatchNorm"] = make_unique<BatchNormBuilder>();
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.h b/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.h
new file mode 100644
index 000000000..035d32a4b
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/GraphBuilderRegistry.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_REGISTRY_H__
+#define __GRAPH_BUILDER_REGISTRY_H__
+
+#include "GraphBuilder.h"
+
+#include <map>
+#include <string>
+
+namespace caffeimport
+{
+
+class GraphBuilderRegistry
+{
+public:
+ const GraphBuilder *lookup(const std::string &layer) const
+ {
+ if (_builder_map.find(layer) == _builder_map.end())
+ return nullptr;
+
+ return _builder_map.at(layer).get();
+ }
+
+ static GraphBuilderRegistry &get()
+ {
+ static GraphBuilderRegistry me;
+ return me;
+ }
+
+private:
+ GraphBuilderRegistry();
+
+private:
+ std::map<std::string, std::unique_ptr<GraphBuilder>> _builder_map;
+};
+
+} // namespace caffeimport
+
+#endif // __GRAPH_BUILDER_REGISTRY_H__
diff --git a/compiler/enco/frontend/caffe/src/IRBuilder.h b/compiler/enco/frontend/caffe/src/IRBuilder.h
new file mode 100644
index 000000000..fe34328af
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/IRBuilder.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IR_BUILDER_H__
+#define __IR_BUILDER_H__
+
+#include "coco/IR/Module.h"
+
+#include <deque>
+
+/**
+ * coco IR builders
+ */
+
+class OpBuilder
+{
+public:
+ OpBuilder(coco::Module *module) : _module{module}
+ {
+ // module SHOULD BE valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Return true if the internal stack is empty
+ */
+ bool empty(void) const { return _stack.empty(); }
+
+ /**
+ * @brief Return the operation at the top of the internal stack
+ */
+ coco::Op *top(void) const
+ {
+ assert(_stack.size() > 0);
+ return _stack.front();
+ }
+
+ /**
+ * @brief Push op onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Op; Stack
+ */
+ OpBuilder &push(coco::Op *op)
+ {
+ _stack.push_front(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Load" op and push it onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Load(obj); Stack
+ */
+ OpBuilder &load(coco::Object *obj)
+ {
+ auto op = _module->entity()->op()->create<coco::Load>();
+ op->object(obj);
+ push(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Add" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Add(Left, Right); Stack
+ */
+ OpBuilder &add(void) { return binary<coco::Add>(); }
+
+ /**
+ * @brief Create "Sub" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Sub(Left, Right); Stack
+ */
+ OpBuilder &sub(void) { return binary<coco::Sub>(); }
+
+ /**
+ * @brief Create "Mul" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Mul(Left, Right); Stack
+ */
+ OpBuilder &mul(void) { return binary<coco::Mul>(); }
+
+ /**
+ * @brief Create "Div" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Div(Left, Right); Stack
+ */
+ OpBuilder &div(void) { return binary<coco::Div>(); }
+
+ /**
+ * @brief Pop op from the internal stack
+ *
+ * BEFORE| Op; Stack
+ * AFTER | Stack
+ */
+ coco::Op *pop(void)
+ {
+ assert(_stack.size() > 0);
+ auto op = _stack.front();
+ _stack.pop_front();
+ return op;
+ }
+
+private:
+ template <typename ConcreteOp> OpBuilder &binary()
+ {
+ assert(_stack.size() >= 2);
+ auto left = pop();
+ auto right = pop();
+
+ auto op = _module->entity()->op()->create<ConcreteOp>();
+ op->left(left);
+ op->right(right);
+ push(op);
+
+ return (*this);
+ }
+
+private:
+ coco::Module *_module;
+ std::deque<coco::Op *> _stack;
+};
+
+inline OpBuilder op_builder(coco::Module *m) { return OpBuilder{m}; }
+inline OpBuilder op_builder(const std::unique_ptr<coco::Module> &m) { return op_builder(m.get()); }
+
+class InstrBuilder
+{
+public:
+ InstrBuilder(coco::Module *module) : _module{module}
+ {
+ // NOTE _module SHOULD be valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Create "Eval" instruction with a given "Object" and "Op"
+ *
+ * @note "eval(out, op)" will create "%out <- Eval(op)" instruction
+ */
+ coco::Eval *eval(coco::Object *out, coco::Op *op) const
+ {
+ auto ins = _module->entity()->instr()->create<coco::Eval>();
+ ins->op(op);
+ ins->out(out);
+ return ins;
+ }
+
+private:
+ coco::Module *_module;
+};
+
+inline InstrBuilder instr_builder(coco::Module *m) { return InstrBuilder{m}; }
+inline InstrBuilder instr_builder(const std::unique_ptr<coco::Module> &m)
+{
+ return instr_builder(m.get());
+}
+
+#endif // __IR_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Importer.cpp b/compiler/enco/frontend/caffe/src/Importer.cpp
new file mode 100644
index 000000000..943a54e5d
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Importer.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Importer.h"
+
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/text_format.h>
+
+bool from_txt(std::istream &is, ::caffe::NetParameter &param)
+{
+ google::protobuf::io::IstreamInputStream iis{&is};
+
+ if (!google::protobuf::TextFormat::Parse(&iis, &param))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool from_bin(std::istream &is, ::caffe::NetParameter &param)
+{
+ google::protobuf::io::IstreamInputStream iis{&is};
+ google::protobuf::io::CodedInputStream cis{&iis};
+
+ if (!param.ParseFromCodedStream(&cis))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool from_txt(std::istream &is, ::caffe::PoolingParameter &param)
+{
+ ::google::protobuf::io::IstreamInputStream iis{&is};
+ return google::protobuf::TextFormat::Parse(&iis, &param);
+}
diff --git a/compiler/enco/frontend/caffe/src/Importer.h b/compiler/enco/frontend/caffe/src/Importer.h
new file mode 100644
index 000000000..ac83c0b27
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Importer.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IMPORTER_H__
+#define __IMPORTER_H__
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <istream>
+
+bool from_txt(std::istream &is, ::caffe::NetParameter &param);
+bool from_bin(std::istream &is, ::caffe::NetParameter &param);
+
+bool from_txt(std::istream &is, ::caffe::PoolingParameter &param);
+
+#endif // __IMPORTER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/BatchNorm.cpp b/compiler/enco/frontend/caffe/src/Layer/BatchNorm.cpp
new file mode 100644
index 000000000..ff1e86570
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/BatchNorm.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchNorm.h"
+#include "IRBuilder.h"
+
+#include <morph/caffe.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+using tensor::num_elements;
+
+namespace caffeimport
+{
+
+void BatchNormBuilder::build(const ::caffe::LayerParameter &layer,
+ GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Data *data = context->data();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+ WeightContext &weight_ctx = context->weight_ctx();
+
+ assert(layer.bottom().size() == 1);
+ assert(layer.top().size() == 1);
+
+ assert(layer.has_batch_norm_param());
+ const auto &param = layer.batch_norm_param();
+
+ // TODO Support training case
+ assert(param.use_global_stats() == true);
+
+ // Create an object for an input feature map
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ auto ifm_bag = bag_ctx.at(ifm_name);
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ const auto ofm_name = layer.top(0);
+ const auto ofm_shape = ifm_shape;
+ auto ofm_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Create an object for the scaled mean estimates data
+ auto mean_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto mean_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ mean_obj->bag(mean_bag);
+ mean_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Create an object for the scaled variance estimates data
+ auto variance_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto variance_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ variance_obj->bag(variance_bag);
+ variance_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ if (param.use_global_stats())
+ {
+ // Use the stored mean/variance estimates.
+ assert(weight_ctx.blob_count(layer.name()) == 3);
+
+ // Create an object for scale factor data
+ auto factor_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto factor_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ factor_obj->bag(factor_bag);
+ factor_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Fill "scale factor" data
+ {
+ data->f32()->allocate(factor_bag);
+
+ auto dst = data->f32()->weight(factor_bag);
+ // Calculate scale factor
+ auto blob = weight_ctx.blob_get(layer.name(), 2);
+ const auto scale_factor = blob->data(0) == 0 ? 0.f : 1 / blob->data(0);
+
+ for (uint32_t ch = 0; ch < factor_obj->shape().depth(); ++ch)
+ {
+ dst[ch] = scale_factor;
+ }
+ }
+
+ // Create an object for saved mean data
+ auto saved_mean_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto saved_mean_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ saved_mean_obj->bag(saved_mean_bag);
+ saved_mean_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Fill "saved mean estimates" data
+ {
+ data->f32()->allocate(saved_mean_bag);
+
+ auto dst = data->f32()->weight(saved_mean_bag);
+ auto blob = weight_ctx.blob_get(layer.name(), 0);
+
+ for (uint32_t ch = 0; ch < saved_mean_obj->shape().depth(); ++ch)
+ {
+ dst[ch] = blob->data(ch);
+ }
+ }
+
+ // Multiply scale factor to mean data
+ {
+ auto mul_op = op_builder(module).load(factor_obj).load(saved_mean_obj).mul().pop();
+ auto mul_ins = instr_builder(module).eval(mean_obj, mul_op);
+
+ blk->instr()->append(mul_ins);
+ }
+
+ // Create an object for saved variance data
+ auto saved_variance_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto saved_variance_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ saved_variance_obj->bag(saved_variance_bag);
+ saved_variance_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Fill "saved variance estimates" data
+ {
+ data->f32()->allocate(saved_variance_bag);
+
+ auto dst = data->f32()->weight(saved_variance_bag);
+ auto blob = weight_ctx.blob_get(layer.name(), 1);
+
+ for (uint32_t ch = 0; ch < saved_variance_obj->shape().depth(); ++ch)
+ {
+ dst[ch] = blob->data(ch);
+ }
+ }
+
+ // Multiply scale factor to variance data
+ {
+ auto mul_op = op_builder(module).load(factor_obj).load(saved_variance_obj).mul().pop();
+ auto mul_ins = instr_builder(module).eval(variance_obj, mul_op);
+
+ blk->instr()->append(mul_ins);
+ }
+ }
+ else
+ {
+ // TODO use_global_stats() == false case
+ }
+
+ // Create an object for subtraction
+ auto sub_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto sub_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ sub_obj->bag(sub_bag);
+ sub_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Subtract mean
+ {
+ auto sub_op = op_builder(module).load(mean_obj).load(ifm_obj).sub().pop();
+ auto sub_ins = instr_builder(module).eval(sub_obj, sub_op);
+
+ blk->instr()->append(sub_ins);
+ }
+
+ // Create an object for normalize variance data
+ auto norm_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto norm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ norm_obj->bag(norm_bag);
+ norm_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Normalize variance
+ {
+ // Create an object for epsilon data
+ auto eps_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto eps_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ eps_obj->bag(eps_bag);
+ eps_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Fill "epsilon" data
+ {
+ data->f32()->allocate(eps_bag);
+
+ auto dst = data->f32()->weight(eps_bag);
+ auto eps = param.eps();
+
+ for (uint32_t ch = 0; ch < eps_obj->shape().depth(); ++ch)
+ {
+ dst[ch] = eps;
+ }
+ }
+
+ // Create a temp object
+ auto temp_bag = module->entity()->bag()->create(ofm_shape.dim(1));
+ auto temp_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ temp_obj->bag(temp_bag);
+ temp_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ // Add epsilon to variance
+ {
+ auto add_op = op_builder(module).load(variance_obj).load(eps_obj).add().pop();
+ auto add_ins = instr_builder(module).eval(temp_obj, add_op);
+
+ blk->instr()->append(add_ins);
+ }
+
+ // Sqrt variance
+ {
+ auto load = op_builder(module).load(temp_obj).pop();
+ auto sqrt_op = module->entity()->op()->create<coco::Sqrt>();
+ sqrt_op->arg(load);
+ auto sqrt_ins = instr_builder(module).eval(norm_obj, sqrt_op);
+
+ blk->instr()->append(sqrt_ins);
+ }
+ }
+
+ // Replicate variance to input size
+ {
+ auto div_op = op_builder(module).load(norm_obj).load(sub_obj).div().pop();
+ auto div_ins = instr_builder(module).eval(ofm_obj, div_op);
+
+ blk->instr()->append(div_ins);
+ }
+
+ // Update bag and shape context
+ bag_ctx[ofm_name] = ofm_bag;
+ shape_ctx[ofm_name] = ofm_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/BatchNorm.h b/compiler/enco/frontend/caffe/src/Layer/BatchNorm.h
new file mode 100644
index 000000000..613b6687e
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/BatchNorm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BATCHNORM_BUILDER_H__
+#define __BATCHNORM_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class BatchNormBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __BATCHNORM_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Concatenation.cpp b/compiler/enco/frontend/caffe/src/Layer/Concatenation.cpp
new file mode 100644
index 000000000..f05f5908a
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Concatenation.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concatenation.h"
+#include "IRBuilder.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+namespace caffeimport
+{
+
+void ConcatBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+
+ assert(layer.bottom().size() > 0);
+ assert(layer.top().size() == 1);
+
+ // Assume default concat axis
+ // - Please refer to http://caffe.berkeleyvision.org/tutorial/layers/concat.html for details
+ // TODO Get concat axis from concat param
+ assert(!layer.has_concat_param());
+ const uint32_t concat_axis = 1;
+
+ // Construct a vector of input objects
+ std::vector<coco::FeatureObject *> input_objects;
+
+ for (const auto &input_name : layer.bottom())
+ {
+ const auto input_shape = as_feature_shape(shape_ctx.at(input_name));
+
+ auto input_bag = bag_ctx.at(input_name);
+ auto input_feature = module->entity()->object()->create<coco::FeatureObject>();
+
+ input_feature->bag(input_bag);
+ input_feature->layout(coco::FeatureLayouts::BCHW::create(input_shape));
+
+ input_objects.emplace_back(input_feature);
+ }
+
+ coco::FeatureObject *last_feature = input_objects.at(0);
+
+ assert(last_feature != nullptr);
+ assert(last_feature->bag() != nullptr);
+
+ // Update coco IR
+ //
+ // Given a sequence of input features %in[0] / %in[1] / ... / %in[N]
+ // the below code constructs a sequence of eval instructions
+ // - Load is omitted for simplicity
+ //
+ // %out[0] = eval(ConcatF(%in[0], %in[1]))
+ // %out[1] = eval(ConcatF(%out[0], %in[2]))
+ // ...
+ // %out[N - 1] = eval(ConcatF(%out[N - 2], %in[N]))
+ //
+ for (uint32_t n = 1; n < input_objects.size(); ++n)
+ {
+ auto const left_feature = last_feature;
+ auto const left_shape = left_feature->layout()->shape();
+
+ auto right_feature = input_objects.at(n);
+ auto right_shape = right_feature->layout()->shape();
+
+ // Batch is not supported, yet
+ assert(left_feature->layout()->batch() == 1);
+ assert(right_feature->layout()->batch() == 1);
+
+ // Height and Width SHOULD BE IDENTICAL for depth concat
+ assert(left_shape.height() == right_shape.height());
+ assert(left_shape.width() == right_shape.width());
+
+ const uint32_t C = left_shape.depth() + right_shape.depth();
+ const uint32_t H = left_shape.height();
+ const uint32_t W = left_shape.width();
+
+ const nncc::core::ADT::feature::Shape out_shape{C, H, W};
+
+ auto out_bag = module->entity()->bag()->create(num_elements(out_shape));
+ auto out_feature = module->entity()->object()->create<coco::FeatureObject>();
+
+ out_feature->bag(out_bag);
+ out_feature->layout(coco::FeatureLayouts::BCHW::create(out_shape));
+
+ auto left_load = op_builder(module).load(left_feature).pop();
+ auto right_load = op_builder(module).load(right_feature).pop();
+
+ auto concat_f = module->entity()->op()->create<coco::ConcatF>();
+
+ concat_f->axis(coco::ConcatF::Axis::Depth);
+ concat_f->left(left_load);
+ concat_f->right(right_load);
+
+ auto eval = instr_builder(module).eval(out_feature, concat_f);
+
+ // Append the constructed Shuffle instruction
+ blk->instr()->append(eval);
+
+ // Update 'last_feature'
+ last_feature = out_feature;
+ }
+
+ assert(last_feature != nullptr);
+ assert(last_feature->bag() != nullptr);
+
+ // Update bag and shape context
+ auto const out_name = layer.top(0);
+ auto const out_shape = as_tensor_shape(last_feature->layout()->shape());
+ auto const out_bag = last_feature->bag();
+
+ bag_ctx[out_name] = out_bag;
+ shape_ctx[out_name] = out_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Concatenation.h b/compiler/enco/frontend/caffe/src/Layer/Concatenation.h
new file mode 100644
index 000000000..85e04000d
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Concatenation.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONCAT_BUILDER_H__
+#define __CONCAT_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class ConcatBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __CONCAT_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Convolution.cpp b/compiler/enco/frontend/caffe/src/Layer/Convolution.cpp
new file mode 100644
index 000000000..9fb096d49
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Convolution.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convolution.h"
+#include "ConvolutionSpec.h"
+#include "Convert.h"
+#include "IRBuilder.h"
+
+#include <nncc/core/ADT/kernel/Overlay.h>
+#include <nncc/core/ADT/kernel/NCHWLayout.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+using tensor::num_elements;
+
+namespace caffeimport
+{
+
+void ConvolutionBuilder::build(const ::caffe::LayerParameter &layer,
+ GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Data *data = context->data();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+ WeightContext &weight_ctx = context->weight_ctx();
+
+ assert(layer.bottom().size() == 1);
+ assert(layer.top().size() == 1);
+
+ assert(layer.has_convolution_param());
+ const auto &param = layer.convolution_param();
+
+ ConvolutionSpec spec{param};
+ {
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ spec.ifm_shape(ifm_shape);
+ }
+
+ // NOTE The current implementation focuses on 2D convolution
+ // TODO Support general ND convolution
+ assert(spec.num_batch_axes() == 1);
+ assert(spec.num_spatial_axes() == 2);
+
+ // Create an object for an input feature map
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ auto ifm_bag = bag_ctx.at(ifm_name);
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ const auto ofm_name = layer.top(0);
+ const auto ofm_shape = spec.ofm_shape();
+ auto ofm_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Create an object for kernel
+ using namespace coco::KernelLayouts;
+
+ const auto ker_shape = spec.ker_shape();
+ auto ker_bag = module->entity()->bag()->create(num_elements(ker_shape));
+ auto ker_obj = module->entity()->object()->create<coco::KernelObject>();
+
+ ker_obj->bag(ker_bag);
+ ker_obj->layout(NCHW::create(as_kernel_shape(ker_shape)));
+
+ // Create a kernel overlay for the kernel object
+ data->f32()->allocate(ker_bag);
+
+ // Initialize the kernel overlay
+ assert(weight_ctx.blob_count(layer.name()) >= 1);
+ auto ker_blob = weight_ctx.blob_get(layer.name(), 0);
+
+ assert(ker_shape == caffeimport::as_tensor_shape(ker_blob));
+
+ auto ker_dst = data->f32()->access(ker_obj);
+ auto ker_src = kernel::OverlayFactory<float, kernel::NCHWLayout>::make(
+ ker_obj->shape(), ker_blob->mutable_data()->begin());
+
+ for (uint32_t n = 0; n < ker_obj->shape().count(); ++n)
+ {
+ for (uint32_t ch = 0; ch < ker_obj->shape().depth(); ++ch)
+ {
+ for (uint32_t row = 0; row < ker_obj->shape().height(); ++row)
+ {
+ for (uint32_t col = 0; col < ker_obj->shape().width(); ++col)
+ {
+ ker_dst->at(n, ch, row, col) = ker_src.at(n, ch, row, col);
+ }
+ }
+ }
+ }
+
+ // Create a Load op
+ auto load = op_builder(module).load(ifm_obj).pop();
+
+ // Create a Conv2D op
+ auto op = module->entity()->op()->create<coco::Conv2D>();
+
+ op->group(spec.group());
+
+ op->ker(ker_obj);
+ op->stride()->vertical(spec.stride(0));
+ op->stride()->horizontal(spec.stride(1));
+
+ op->pad()->top(spec.pad(0));
+ op->pad()->bottom(spec.pad(0));
+ op->pad()->left(spec.pad(1));
+ op->pad()->right(spec.pad(1));
+
+ op->arg(load);
+
+ // Create an Eval instruction
+ auto ins = instr_builder(module).eval(ofm_obj, op);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ //
+ // coco IR allows Conv2D fused with Add, but the current implementation of enco backend
+ // is unable to process such a tree.
+ //
+ // As a workaround, caffe frontend constructs a instruction for Conv2D and Add.
+ //
+ if (param.bias_term())
+ {
+ assert(weight_ctx.blob_count(layer.name()) >= 2);
+
+ // Create Bag & Object
+ auto bias_bag = module->entity()->bag()->create(ker_shape.dim(0));
+ auto bias_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(ofm_shape)));
+
+ auto added_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto added_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ added_obj->bag(added_bag);
+ added_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Create Op
+ auto bias_add = op_builder(module).load(bias_obj).load(ofm_obj).add().pop();
+
+ // Create Instr
+ auto bias_add_ins = instr_builder(module).eval(added_obj, bias_add);
+
+ // Append the instruction
+ blk->instr()->append(bias_add_ins);
+
+ // Fill bias data
+ data->f32()->allocate(bias_bag);
+
+ auto bias_span = data->f32()->weight(bias_bag);
+ auto bias_blob = weight_ctx.blob_get(layer.name(), 1);
+
+ for (uint32_t ch = 0; ch < ker_obj->shape().count(); ++ch)
+ {
+ bias_span[ch] = bias_blob->data(ch);
+ }
+
+ // Update output
+ ofm_bag = added_bag;
+ }
+
+ // Update bag and shape context
+ bag_ctx[ofm_name] = ofm_bag;
+ shape_ctx[ofm_name] = ofm_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Convolution.h b/compiler/enco/frontend/caffe/src/Layer/Convolution.h
new file mode 100644
index 000000000..a944f12a3
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Convolution.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVOLUTION_BUILDER_H__
+#define __CONVOLUTION_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class ConvolutionBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __CONVOLUTION_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Eltwise.cpp b/compiler/enco/frontend/caffe/src/Layer/Eltwise.cpp
new file mode 100644
index 000000000..6a5d4f196
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Eltwise.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Eltwise.h"
+#include "IRBuilder.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+#include <functional>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+namespace caffeimport
+{
+
+void EltwiseBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+
+ using coco::FeatureLayouts::BCHW;
+
+ assert(layer.bottom().size() > 1);
+ assert(layer.top().size() == 1);
+
+ assert(layer.has_eltwise_param());
+ const auto &param = layer.eltwise_param();
+
+ using ::caffe::EltwiseParameter_EltwiseOp;
+ using ::caffe::EltwiseParameter_EltwiseOp_SUM;
+ using ::caffe::EltwiseParameter_EltwiseOp_PROD;
+
+ using Reducer = std::function<coco::Op *(coco::Op * lhs, coco::Op * rhs)>;
+ using ReducerRegistry = std::map<EltwiseParameter_EltwiseOp, Reducer>;
+
+ ReducerRegistry registry;
+
+ // MAX are not supported, yet
+ registry[EltwiseParameter_EltwiseOp_SUM] = [](coco::Op *lhs, coco::Op *rhs) -> coco::Op * {
+ if (lhs == nullptr)
+ {
+ assert(rhs != nullptr);
+ return rhs;
+ }
+
+ assert(lhs != nullptr && rhs != nullptr);
+ assert(lhs->module() == rhs->module());
+ assert(lhs->module() != nullptr);
+
+ auto m = lhs->module();
+ return op_builder(m).push(rhs).push(lhs).add().pop();
+ };
+
+ registry[EltwiseParameter_EltwiseOp_PROD] = [](coco::Op *lhs, coco::Op *rhs) -> coco::Op * {
+ if (lhs == nullptr)
+ {
+ assert(rhs != nullptr);
+ return rhs;
+ }
+
+ assert(lhs != nullptr && rhs != nullptr);
+ assert(lhs->module() == rhs->module());
+ assert(lhs->module() != nullptr);
+
+ auto m = lhs->module();
+ return op_builder(m).push(rhs).push(lhs).mul().pop();
+ };
+
+ // coeff is not supported, yet
+ assert(!param.coeff().size());
+
+ // Decide appropriate reduce function
+ auto reduce = registry.at(param.operation());
+
+ coco::Op *op = nullptr;
+
+ for (const auto &ifm_name : layer.bottom())
+ {
+ auto ifm_shape = shape_ctx.at(ifm_name);
+
+ // NOTE The current implementation does not work in general
+ auto ifm_bag = bag_ctx.at(ifm_name);
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(BCHW::create(as_feature_shape(ifm_shape)));
+
+ auto load = op_builder(module).load(ifm_obj).pop();
+
+ op = reduce(op, load);
+ }
+
+ assert(op != nullptr);
+
+ const auto ofm_name = layer.top(0);
+ const auto ofm_shape = shape_ctx.at(layer.bottom(0));
+
+ auto ofm_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Create "Eval" instruction
+ auto eval = instr_builder(module).eval(ofm_obj, op);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval);
+
+ // Update bag and shape context
+ bag_ctx[ofm_name] = ofm_bag;
+ shape_ctx[ofm_name] = ofm_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Eltwise.h b/compiler/enco/frontend/caffe/src/Layer/Eltwise.h
new file mode 100644
index 000000000..e717077ec
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Eltwise.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ELTWISE_BUILDER_H__
+#define __ELTWISE_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class EltwiseBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __ELTWISE_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Input.cpp b/compiler/enco/frontend/caffe/src/Layer/Input.cpp
new file mode 100644
index 000000000..39e44fa31
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Input.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Input.h"
+#include "Convert.h"
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+using tensor::num_elements;
+using tensor::LexicalLayout;
+
+namespace caffeimport
+{
+
+void InputBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+
+ assert(layer.has_input_param());
+ const auto &param = layer.input_param();
+
+ for (uint32_t n = 0; n < layer.top_size(); ++n)
+ {
+ const auto &name = layer.top(n);
+ const auto shape = as_tensor_shape(param.shape(n));
+
+ auto bag = module->entity()->bag()->create(num_elements(shape));
+ auto input = module->entity()->input()->create(shape);
+
+ input->bag(bag);
+ input->name(name);
+ input->reorder<LexicalLayout>();
+
+ module->input()->insert(input);
+
+ bag_ctx[name] = bag;
+ shape_ctx[name] = shape;
+ }
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Input.h b/compiler/enco/frontend/caffe/src/Layer/Input.h
new file mode 100644
index 000000000..2f464748d
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Input.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INPUT_BUILDER_H__
+#define __INPUT_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class InputBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __INPUT_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Pooling.cpp b/compiler/enco/frontend/caffe/src/Layer/Pooling.cpp
new file mode 100644
index 000000000..36220d841
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Pooling.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pooling.h"
+#include "PoolingSpec.h"
+#include "IRBuilder.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+#include <functional>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+namespace caffeimport
+{
+
+void PoolingBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+
+ assert(layer.bottom().size() == 1);
+ assert(layer.top().size() == 1);
+
+ assert(layer.has_pooling_param());
+ const auto &param = layer.pooling_param();
+
+ PoolingSpec spec{param};
+ {
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ spec.ifm_shape(ifm_shape);
+ }
+
+ // Create an object for an input feature map
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ auto ifm_bag = bag_ctx.at(ifm_name);
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ const auto ofm_name = layer.top(0);
+ const auto ofm_shape = spec.ofm_shape();
+ auto ofm_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ using PoolingOpBuilder = std::function<coco::Op *(coco::Module * m, const PoolingSpec &spec)>;
+
+ std::map<PoolingMethod, PoolingOpBuilder> builders;
+
+ // MaxPool2D op builder
+ builders[PoolingMethod::Max] = [ifm_obj](coco::Module *module, const PoolingSpec &spec) {
+ auto load = op_builder(module).load(ifm_obj).pop();
+
+ auto op = module->entity()->op()->create<coco::MaxPool2D>();
+
+ op->arg(load);
+
+ op->window()->height(spec.window_height());
+ op->window()->width(spec.window_width());
+
+ op->stride()->vertical(spec.vertical_stride());
+ op->stride()->horizontal(spec.horizontal_stride());
+
+ op->pad()->top(spec.vertical_pad());
+ op->pad()->bottom(spec.vertical_pad());
+ op->pad()->left(spec.horizontal_pad());
+ op->pad()->right(spec.horizontal_pad());
+
+ return op;
+ };
+
+ // AvgPool2D op builder
+ builders[PoolingMethod::Avg] = [ifm_obj](coco::Module *module, const PoolingSpec &spec) {
+ auto load = op_builder(module).load(ifm_obj).pop();
+
+ auto op = module->entity()->op()->create<coco::AvgPool2D>();
+
+ op->arg(load);
+
+ // NOTE Caffe use static divisor on average pooling
+ op->divisor(coco::AvgPool2D::Divisor::Static);
+
+ op->window()->height(spec.window_height());
+ op->window()->width(spec.window_width());
+
+ op->stride()->vertical(spec.vertical_stride());
+ op->stride()->horizontal(spec.horizontal_stride());
+
+ op->pad()->top(spec.vertical_pad());
+ op->pad()->bottom(spec.vertical_pad());
+ op->pad()->left(spec.horizontal_pad());
+ op->pad()->right(spec.horizontal_pad());
+
+ return op;
+ };
+
+ // Create a pooling op
+ auto builder = builders.at(spec.method());
+ auto op = builder(module, spec);
+
+ // Create a UnitF instruction
+ auto ins = instr_builder(module).eval(ofm_obj, op);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // Update bag and shape context
+ bag_ctx[ofm_name] = ofm_bag;
+ shape_ctx[ofm_name] = ofm_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Pooling.h b/compiler/enco/frontend/caffe/src/Layer/Pooling.h
new file mode 100644
index 000000000..e72fd7aef
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Pooling.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __POOLING_BUILDER_H__
+#define __POOLING_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class PoolingBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __POOLING_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/ReLU.cpp b/compiler/enco/frontend/caffe/src/Layer/ReLU.cpp
new file mode 100644
index 000000000..61e206dc2
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/ReLU.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU.h"
+#include "IRBuilder.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+namespace caffeimport
+{
+
+void ReLUBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+
+ assert(layer.bottom().size() == 1);
+ assert(layer.top().size() == 1);
+
+ // PReLU is not supported, yet
+ // TODO Support PReLU
+ assert(!layer.has_relu_param());
+
+ // NOTE The current implementation treats ReLU as Feature op
+ // TODO Support ReLU over general tensor
+ const auto ifm_name = layer.bottom(0);
+ const auto ifm_shape = shape_ctx.at(ifm_name);
+ auto ifm_bag = bag_ctx.at(ifm_name);
+ auto ifm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ifm_shape)));
+
+ const auto ofm_name = layer.top(0);
+ const auto ofm_shape = ifm_shape;
+ auto ofm_bag = module->entity()->bag()->create(num_elements(ofm_shape));
+ auto ofm_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load Op
+ auto load = op_builder(module).load(ifm_obj).pop();
+
+ // Create a ReLU op
+ auto op = module->entity()->op()->create<coco::ReLU>();
+
+ op->arg(load);
+
+ // Create a Eval instruction
+ auto ins = instr_builder(module).eval(ofm_obj, op);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // Update bag and shape context
+ bag_ctx[ofm_name] = ofm_bag;
+ shape_ctx[ofm_name] = ofm_shape;
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/ReLU.h b/compiler/enco/frontend/caffe/src/Layer/ReLU.h
new file mode 100644
index 000000000..94836fd8e
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/ReLU.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RELU_BUILDER_H__
+#define __RELU_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class ReLUBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __RELU_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Layer/Scale.cpp b/compiler/enco/frontend/caffe/src/Layer/Scale.cpp
new file mode 100644
index 000000000..b9925978c
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Scale.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scale.h"
+#include "IRBuilder.h"
+
+#include <coco/IR/FeatureLayouts.h>
+
+#include <morph/caffe.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::caffe;
+
+namespace caffeimport
+{
+
+void ScaleBuilder::build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const
+{
+ coco::Module *module = context->module();
+ coco::Data *data = context->data();
+ coco::Block *blk = context->block();
+ std::map<std::string, tensor::Shape> &shape_ctx = context->shape_ctx();
+ std::map<std::string, coco::Bag *> &bag_ctx = context->bag_ctx();
+ WeightContext &weight_ctx = context->weight_ctx();
+
+ // TODO Support Scale layer with 2 bottoms
+ assert(layer.bottom().size() == 1);
+ assert(layer.top().size() == 1);
+
+ assert(layer.has_scale_param());
+ const auto &param = layer.scale_param();
+
+ assert(param.axis() == 1);
+ assert(!param.has_num_axes());
+
+ assert(weight_ctx.blob_count(layer.name()) >= 1);
+
+ // NOTE The shape of "Scale" output is same as that of its input
+ // NOTE The current implementation assumes that input/output is of feature type
+ // TODO Support generic tensor arguments
+ auto shape = shape_ctx.at(layer.bottom(0));
+
+ coco::Bag *last_bag = bag_ctx.at(layer.bottom(0));
+
+ // Create channel-wise multiplication
+ {
+ auto in_bag = last_bag;
+ auto in_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ in_obj->bag(in_bag);
+ in_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(shape)));
+
+ auto factor_bag = module->entity()->bag()->create(num_elements(shape));
+ auto factor_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ factor_obj->bag(factor_bag);
+ factor_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(shape)));
+
+ auto out_bag = module->entity()->bag()->create(num_elements(shape));
+ auto out_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ out_obj->bag(out_bag);
+ out_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(shape)));
+
+ auto mul_op = op_builder(module).load(factor_obj).load(in_obj).mul().pop();
+ auto mul_ins = instr_builder(module).eval(out_obj, mul_op);
+
+ blk->instr()->append(mul_ins);
+
+ // Fill "factor" data
+ {
+ data->f32()->allocate(factor_bag);
+
+ auto span = data->f32()->weight(factor_bag);
+ auto blob = weight_ctx.blob_get(layer.name(), 0);
+
+ for (uint32_t ch = 0; ch < factor_obj->shape().depth(); ++ch)
+ {
+ span[ch] = blob->data(ch);
+ }
+ }
+
+ // Update "last_bag"
+ last_bag = out_bag;
+ }
+
+ assert(last_bag != nullptr);
+
+ // Create bias addition (as channel-wise addition)
+ if (param.bias_term())
+ {
+ assert(weight_ctx.blob_count(layer.name()) >= 2);
+
+ auto in_bag = last_bag; /* Use the output of the last computation as an input */
+ auto in_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ in_obj->bag(in_bag);
+ in_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(shape)));
+
+ auto bias_bag = module->entity()->bag()->create(num_elements(shape));
+ auto bias_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(as_feature_shape(shape)));
+
+ auto out_bag = module->entity()->bag()->create(num_elements(shape));
+ auto out_obj = module->entity()->object()->create<coco::FeatureObject>();
+
+ out_obj->bag(out_bag);
+ out_obj->layout(coco::FeatureLayouts::BCHW::create(as_feature_shape(shape)));
+
+ auto add_op = op_builder(module).load(bias_obj).load(in_obj).add().pop();
+ auto add_ins = instr_builder(module).eval(out_obj, add_op);
+
+ blk->instr()->append(add_ins);
+
+ // Fill bias data
+ {
+ data->f32()->allocate(bias_bag);
+
+ auto bias_span = data->f32()->weight(bias_bag);
+ auto bias_blob = weight_ctx.blob_get(layer.name(), 1);
+
+ for (uint32_t ch = 0; ch < bias_obj->shape().depth(); ++ch)
+ {
+ bias_span[ch] = bias_blob->data(ch);
+ }
+ }
+
+ // Update "last_bag"
+ last_bag = out_bag;
+ }
+
+ // Update bag and shape context
+ {
+ const auto &out_name = layer.top(0);
+ const auto &out_bag = last_bag;
+ const auto &out_shape = shape;
+
+ bag_ctx[out_name] = out_bag;
+ shape_ctx[out_name] = out_shape;
+ }
+}
+
+} // namespace caffeimport
diff --git a/compiler/enco/frontend/caffe/src/Layer/Scale.h b/compiler/enco/frontend/caffe/src/Layer/Scale.h
new file mode 100644
index 000000000..491cc31cf
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Layer/Scale.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SCALE_BUILDER_H__
+#define __SCALE_BUILDER_H__
+
+#include "GraphBuilder.h"
+
+#include "Context.h"
+
+namespace caffeimport
+{
+
+class ScaleBuilder final : public GraphBuilder
+{
+public:
+ void build(const ::caffe::LayerParameter &layer, GraphBuilderContext *context) const override;
+};
+
+} // namespace caffeimport
+
+#endif // __SCALE_BUILDER_H__
diff --git a/compiler/enco/frontend/caffe/src/Padding.h b/compiler/enco/frontend/caffe/src/Padding.h
new file mode 100644
index 000000000..98b018117
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Padding.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Padding.h
+ * @brief This file declares padding-related data structures.
+ */
+#ifndef __PADDING_H__
+#define __PADDING_H__
+
+#include <cstdint>
+#include <vector>
+
+/**
+ * @brief A PaddingBase encapsulates common implementation for derived Padding classes
+ */
+template <typename Derived> class PaddingBase
+{
+public:
+ virtual ~PaddingBase() = default;
+
+public:
+ uint32_t count(void) const { return _values.size(); }
+
+public:
+ uint32_t &value(uint32_t n) { return _values.at(n); }
+ const uint32_t &value(uint32_t n) const { return _values.at(n); }
+
+public:
+ void resize(uint32_t len) { return _values.resize(len); }
+
+private:
+ std::vector<uint32_t> _values;
+};
+
+/**
+ * @brief A RawPadding denotes padding values stored in Caffe model
+ *
+ * @note There may be a mismatch between the number of values in RawPadding and spatial rank
+ */
+struct RawPadding final : public PaddingBase<RawPadding>
+{
+ // Empty
+};
+
+/**
+ * @brief A SpatialPadding denotes padding values for each "spatial" dimension
+ *
+ * @note The number of values in SpatialPadding should be matched with spatial rank
+ */
+struct SpatialPadding final : public PaddingBase<SpatialPadding>
+{
+ // Empty
+};
+
+#endif // __PADDING_H__
diff --git a/compiler/enco/frontend/caffe/src/Padding.test.cpp b/compiler/enco/frontend/caffe/src/Padding.test.cpp
new file mode 100644
index 000000000..cb2495d06
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/Padding.test.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Padding.h"
+
+#include <gtest/gtest.h>
+
+namespace
+{
+
+struct DerivedPadding : PaddingBase<DerivedPadding>
+{
+ // Empty
+};
+
+} // namespace
+
+TEST(PaddingTest, PaddingBase)
+{
+ DerivedPadding pad;
+
+ ASSERT_EQ(pad.count(), 0);
+
+ pad.resize(2);
+
+ ASSERT_EQ(pad.count(), 2);
+ ASSERT_EQ(pad.value(0), 0);
+ ASSERT_EQ(pad.value(1), 0);
+
+ pad.value(1) = 4;
+
+ ASSERT_EQ(pad.count(), 2);
+ ASSERT_EQ(pad.value(0), 0);
+ ASSERT_EQ(pad.value(1), 4);
+}
diff --git a/compiler/enco/frontend/caffe/src/PaddingUtils.cpp b/compiler/enco/frontend/caffe/src/PaddingUtils.cpp
new file mode 100644
index 000000000..ffb4bfbfd
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/PaddingUtils.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PaddingUtils.h"
+
+#include <cassert>
+
+//
+// Section: Raw Padding Builder
+//
+RawPadding RawPaddingBuilder::with(const ::caffe::ConvolutionParameter &param) const
+{
+ RawPadding res;
+
+ if (param.has_pad_h() || param.has_pad_w())
+ {
+ assert(param.pad().size() == 0);
+ assert(param.has_pad_h() && param.has_pad_w());
+
+ res.resize(2);
+ res.value(0) = param.pad_h();
+ res.value(1) = param.pad_w();
+ }
+ else
+ {
+ // NOTE pad and pad_h/pad_w cannot be specified at the same time
+ //
+ // Reference: BaseConvolutionLayer<Dtype>::LayerSetUp in base_conv_layer.cpp
+ assert(!param.has_pad_h() && !param.has_pad_w());
+
+ uint32_t rank = param.pad().size();
+
+ res.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.value(axis) = param.pad(axis);
+ }
+ }
+
+ return res;
+}
+
+RawPadding RawPaddingBuilder::with(const ::caffe::PoolingParameter &param) const
+{
+ RawPadding res;
+
+ if (param.has_pad_h() || param.has_pad_w())
+ {
+ assert(!param.has_pad());
+ assert(param.has_pad_h() && param.has_pad_w());
+
+ res.resize(2);
+ res.value(0) = param.pad_h();
+ res.value(1) = param.pad_w();
+ }
+ else
+ {
+ // NOTE pad and pad_h/pad_w cannot be specified at the same time
+ //
+ // Reference: PoolingLayer<Dtype>::LayerSetUp in pooling_layer.cpp
+ assert(!param.has_pad_h() && !param.has_pad_w());
+
+ if (param.has_pad())
+ {
+ res.resize(1);
+ res.value(0) = param.pad();
+ }
+ }
+
+ return res;
+}
+
+RawPaddingBuilder build_raw_padding(void) { return RawPaddingBuilder{}; }
+
+//
+// Section: Spatial Padding Builder
+//
+SpatialPadding SpatialPaddingBuilder::with(const RawPadding &raw) const
+{
+ const auto spatial_rank = _spatial_rank;
+
+ SpatialPadding res;
+
+ res.resize(spatial_rank);
+
+ if (raw.count() == 0)
+ {
+ // NOTE default padding is 0
+ for (uint32_t spatial_axis = 0; spatial_axis < spatial_rank; ++spatial_axis)
+ {
+ res.value(spatial_axis) = 0;
+ }
+ }
+ else if (raw.count() == 1)
+ {
+ // NOTE One-for-all scheme
+ for (uint32_t spatial_axis = 0; spatial_axis < spatial_rank; ++spatial_axis)
+ {
+ res.value(spatial_axis) = raw.value(0);
+ }
+ }
+ else
+ {
+ // NOTE One-to-one scheme
+ assert(raw.count() == spatial_rank);
+ for (uint32_t spatial_axis = 0; spatial_axis < spatial_rank; ++spatial_axis)
+ {
+ res.value(spatial_axis) = raw.value(spatial_axis);
+ }
+ }
+
+ return res;
+}
+
+SpatialPaddingBuilder build_spatial_padding(uint32_t spatial_rank)
+{
+ return SpatialPaddingBuilder{spatial_rank};
+}
diff --git a/compiler/enco/frontend/caffe/src/PaddingUtils.h b/compiler/enco/frontend/caffe/src/PaddingUtils.h
new file mode 100644
index 000000000..81f32aaa8
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/PaddingUtils.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PADDING_UTILS_H__
+#define __PADDING_UTILS_H__
+
+#include "Padding.h"
+
+#include <caffe/proto/caffe.pb.h>
+
+/**
+ * @brief Construct a raw padding from each Layer parameter
+ *
+ * @note This class is an auxiliary class for build_raw_padding function below
+ */
+class RawPaddingBuilder
+{
+public:
+ friend RawPaddingBuilder build_raw_padding(void);
+
+private:
+ RawPaddingBuilder() = default;
+
+public:
+ RawPadding with(const ::caffe::ConvolutionParameter &) const;
+ RawPadding with(const ::caffe::PoolingParameter &) const;
+};
+
+/**
+ * RawPaddingBuilder is introduced to support the following code pattern:
+ *
+ * auto raw_padding = build_raw_padding().with(conv_param);
+ * ...
+ */
+RawPaddingBuilder build_raw_padding(void);
+
+/**
+ * @brief Convert a raw padding to a spatial padding of a given spatial rank
+ *
+ * @note This class is an auxiliary class for build_raw_padding function below
+ */
+class SpatialPaddingBuilder
+{
+public:
+ friend SpatialPaddingBuilder build_spatial_padding(uint32_t spatial_rank);
+
+private:
+ SpatialPaddingBuilder(uint32_t spatial_rank) : _spatial_rank{spatial_rank}
+ {
+ // DO NOTHING
+ }
+
+public:
+ SpatialPadding with(const RawPadding &raw) const;
+
+private:
+ uint32_t _spatial_rank = 0;
+};
+
+/**
+ * SpatialPaddingBuilder is introduced to support the following code pattern:
+ *
+ * auto raw_padding = build_raw_padding().with(conv_param);
+ * auto spatial_padding = build_spatial_padding(4).with(raw_padding);
+ */
+SpatialPaddingBuilder build_spatial_padding(uint32_t spatial_rank);
+
+#endif // __PADDING_UTILS_H__
diff --git a/compiler/enco/frontend/caffe/src/PoolingSpec.cpp b/compiler/enco/frontend/caffe/src/PoolingSpec.cpp
new file mode 100644
index 000000000..36216a2da
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/PoolingSpec.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoolingSpec.h"
+#include "PaddingUtils.h"
+
+#include <map>
+#include <cassert>
+
+PoolingSpec::PoolingSpec(const ::caffe::PoolingParameter &param) : _param(param)
+{
+ // DO NOTHING
+}
+
+PoolingMethod PoolingSpec::method(void) const
+{
+ if (!_param.has_pool())
+ {
+ // Default pooling method is MAX
+ // Reference: http://caffe.berkeleyvision.org/tutorial/layers/pooling.html
+ return PoolingMethod::Max;
+ }
+
+ std::map<::caffe::PoolingParameter_PoolMethod, PoolingMethod> methods;
+
+ // NOTE STOCHASTIC Pooling is not supported, yet
+ // TODO Support STOCHASTIC Pooling
+ methods[::caffe::PoolingParameter_PoolMethod_MAX] = PoolingMethod::Max;
+ methods[::caffe::PoolingParameter_PoolMethod_AVE] = PoolingMethod::Avg;
+
+ assert(_param.has_pool());
+ return methods.at(_param.pool());
+}
+
+uint32_t PoolingSpec::window_height(void) const
+{
+ // NOTE Global pooling is not supported, yet
+ // TODO Support global pooling
+ assert(!_param.global_pooling());
+
+ if (_param.has_kernel_h())
+ {
+ return _param.kernel_h();
+ }
+
+ assert(_param.has_kernel_size());
+ return _param.kernel_size();
+}
+
+uint32_t PoolingSpec::window_width(void) const
+{
+ // NOTE Global pooling is not supported, yet
+ // TODO Support global pooling
+ assert(!_param.global_pooling());
+
+ if (_param.has_kernel_w())
+ {
+ return _param.kernel_w();
+ }
+
+ assert(_param.has_kernel_size());
+ return _param.kernel_size();
+}
+
+uint32_t PoolingSpec::vertical_pad(void) const
+{
+ // NOTE The input of Pooling SHOULD BE a rank-4 tensor.
+ // Reference: PoolingLayer<Dtype>::Reshape in pooling_layer.cpp
+ auto raw_padding = build_raw_padding().with(_param);
+ auto spatial_padding = build_spatial_padding(2 /* SPATIAL RANK */).with(raw_padding);
+ return spatial_padding.value(0 /* H */);
+}
+
+uint32_t PoolingSpec::horizontal_pad(void) const
+{
+ // NOTE The input of Pooling SHOULD BE a rank-4 tensor.
+ // Reference: PoolingLayer<Dtype>::Reshape in pooling_layer.cpp
+ auto raw_padding = build_raw_padding().with(_param);
+ auto spatial_padding = build_spatial_padding(2 /* SPATIAL RANK */).with(raw_padding);
+ return spatial_padding.value(1 /* W */);
+}
+
+uint32_t PoolingSpec::vertical_stride(void) const
+{
+ if (_param.has_stride_h())
+ {
+ return _param.stride_h();
+ }
+
+ if (_param.has_stride())
+ {
+ return _param.stride();
+ }
+
+ return 1;
+}
+
+uint32_t PoolingSpec::horizontal_stride(void) const
+{
+ if (_param.has_stride_w())
+ {
+ return _param.stride_w();
+ }
+
+ if (_param.has_stride())
+ {
+ return _param.stride();
+ }
+
+ return 1;
+}
+
+nncc::core::ADT::tensor::Shape PoolingSpec::ofm_shape(void) const
+{
+ nncc::core::ADT::tensor::Shape res;
+
+ // NOTE Caffe supports only pooling over rank-4 tensor
+ assert(_ifm_shape.rank() == 4);
+ res.resize(4);
+
+ // N (= the number of bacths) SHOULD be same
+ res.dim(0) = _ifm_shape.dim(0);
+ // C (= the number of chaanels) SHOULD be same
+ res.dim(1) = _ifm_shape.dim(1);
+
+ // H and W are derived from IFM, Window, and Padding
+ const auto effective_input_height = _ifm_shape.dim(2) + 2 * vertical_pad() - window_height();
+ const auto effective_input_width = _ifm_shape.dim(3) + 2 * horizontal_pad() - window_width();
+ // TODO Remove the following asserts
+ assert(effective_input_height % vertical_stride() == 0);
+ assert(effective_input_width % horizontal_stride() == 0);
+ res.dim(2) = effective_input_height / vertical_stride() + 1;
+ res.dim(3) = effective_input_width / horizontal_stride() + 1;
+ return res;
+}
diff --git a/compiler/enco/frontend/caffe/src/PoolingSpec.h b/compiler/enco/frontend/caffe/src/PoolingSpec.h
new file mode 100644
index 000000000..655a773ba
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/PoolingSpec.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __POOLING_SPEC_H__
+#define __POOLING_SPEC_H__
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+enum class PoolingMethod
+{
+ Max,
+ Avg
+};
+
+class PoolingSpec
+{
+public:
+ PoolingSpec(const ::caffe::PoolingParameter &param);
+
+public:
+ const nncc::core::ADT::tensor::Shape &ifm_shape(void) const { return _ifm_shape; }
+ void ifm_shape(const nncc::core::ADT::tensor::Shape &shape) { _ifm_shape = shape; }
+
+public:
+ PoolingMethod method(void) const;
+
+public:
+ uint32_t window_height(void) const;
+ uint32_t window_width(void) const;
+
+public:
+ uint32_t vertical_pad(void) const;
+ uint32_t horizontal_pad(void) const;
+
+public:
+ uint32_t vertical_stride(void) const;
+ uint32_t horizontal_stride(void) const;
+
+public:
+ nncc::core::ADT::tensor::Shape ofm_shape(void) const;
+
+private:
+ const ::caffe::PoolingParameter &_param;
+ nncc::core::ADT::tensor::Shape _ifm_shape;
+};
+
+#endif // __POOLING_SPEC_H__
diff --git a/compiler/enco/frontend/caffe/src/PoolingSpec.test.cpp b/compiler/enco/frontend/caffe/src/PoolingSpec.test.cpp
new file mode 100644
index 000000000..26bcaa09b
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/PoolingSpec.test.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoolingSpec.h"
+#include "Importer.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <caffe/net.hpp>
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/text_format.h>
+
+#include <sstream>
+#include <stdexcept>
+
+#include <gtest/gtest.h>
+
+using namespace nncc::core::ADT;
+
+#define STRING(content) #content
+
+bool from_txt(const std::string &txt, ::caffe::PoolingParameter &out)
+{
+ std::stringstream ss{txt};
+ return from_txt(ss, out);
+}
+
+namespace
+{
+
+class SequentialBuilder
+{
+public:
+ SequentialBuilder(::caffe::NetParameter *net) : _net{net}
+ {
+ // DO NOTHING
+ }
+
+public:
+ bool addLayer(const std::string &prototxt)
+ {
+ auto layer = _net->add_layer();
+ std::stringstream ss{prototxt};
+ ::google::protobuf::io::IstreamInputStream iis{&ss};
+ return google::protobuf::TextFormat::Parse(&iis, layer);
+ }
+
+ bool addInputLayer(const tensor::Shape &shape)
+ {
+ auto param = new ::caffe::InputParameter;
+ {
+ auto s = param->add_shape();
+ for (uint32_t n = 0; n < shape.rank(); ++n)
+ {
+ s->add_dim(shape.dim(n));
+ }
+ }
+
+ auto layer = _net->add_layer();
+
+ layer->set_name("data");
+ layer->set_type("Input");
+ layer->add_top("data");
+ layer->set_allocated_input_param(param);
+
+ return true;
+ }
+
+private:
+ ::caffe::NetParameter *_net;
+};
+
+} // namespace
+
+namespace
+{
+
+class PoolingSpecTest : public ::testing::Test
+{
+protected:
+ tensor::Shape as_tensor_shape(const std::vector<int> &dims)
+ {
+ const uint32_t rank = dims.size();
+
+ tensor::Shape res;
+
+ res.resize(rank);
+
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = dims.at(axis);
+ }
+
+ return res;
+ }
+};
+} // namespace
+
+TEST_F(PoolingSpecTest, ifm_shape)
+{
+ ::caffe::PoolingParameter param;
+ PoolingSpec spec{param};
+
+ const tensor::Shape ifm_shape{1, 3, 244, 244};
+
+ spec.ifm_shape(ifm_shape);
+
+ ASSERT_EQ(spec.ifm_shape(), ifm_shape);
+}
+
+namespace
+{
+} // namespace
+
+TEST_F(PoolingSpecTest, kernel_size_same_for_all)
+{
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+
+ ::caffe::NetParameter param;
+ {
+ SequentialBuilder builder{&param};
+
+ builder.addInputLayer(ifm_shape);
+
+ // clang-format off
+ const char *prototxt = STRING(
+ name : "pool"
+ type : "Pooling"
+ bottom : "data"
+ top : "pool"
+ pooling_param { kernel_size : 3 }
+ );
+ // clang-format on
+
+ builder.addLayer(prototxt);
+ }
+
+ ::caffe::Net<float> net{param};
+
+ PoolingSpec spec{param.layer(1).pooling_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ ASSERT_EQ(spec.window_height(), 3);
+ ASSERT_EQ(spec.window_width(), 3);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("pool")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+TEST_F(PoolingSpecTest, pad_for_all)
+{
+ const tensor::Shape ifm_shape{1, 3, 15, 15};
+
+ ::caffe::NetParameter param;
+ {
+ SequentialBuilder builder{&param};
+
+ builder.addInputLayer(ifm_shape);
+
+ // clang-format off
+ const char *prototxt = STRING(
+ name : "pool"
+ type : "Pooling"
+ bottom : "data"
+ top : "pool"
+ pooling_param {
+ pool: MAX
+ kernel_size : 3
+ pad: 2
+ }
+ );
+ // clang-format on
+
+ builder.addLayer(prototxt);
+ }
+
+ ::caffe::Net<float> net{param};
+
+ PoolingSpec spec{param.layer(1).pooling_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ ASSERT_EQ(spec.vertical_pad(), 2);
+ ASSERT_EQ(spec.horizontal_pad(), 2);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("pool")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+TEST_F(PoolingSpecTest, stride_for_all)
+{
+ const tensor::Shape ifm_shape{1, 3, 15, 15};
+
+ ::caffe::NetParameter param;
+ {
+ SequentialBuilder builder{&param};
+
+ builder.addInputLayer(ifm_shape);
+
+ // clang-format off
+ const char *prototxt = STRING(
+ name : "pool"
+ type : "Pooling"
+ bottom : "data"
+ top : "pool"
+ pooling_param {
+ pool: MAX
+ kernel_size : 3
+ stride: 2
+ }
+ );
+ // clang-format on
+
+ builder.addLayer(prototxt);
+ }
+
+ ::caffe::Net<float> net{param};
+
+ PoolingSpec spec{param.layer(1).pooling_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ ASSERT_EQ(spec.vertical_stride(), 2);
+ ASSERT_EQ(spec.horizontal_stride(), 2);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("pool")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}
+
+TEST_F(PoolingSpecTest, method_none)
+{
+ const char *prototxt = "";
+
+ ::caffe::PoolingParameter param;
+ from_txt(prototxt, param);
+
+ PoolingSpec spec{param};
+
+ ASSERT_EQ(spec.method(), PoolingMethod::Max);
+}
+
+TEST_F(PoolingSpecTest, method_max)
+{
+ const char *prototxt = "pool: MAX";
+
+ ::caffe::PoolingParameter param;
+ from_txt(prototxt, param);
+
+ PoolingSpec spec{param};
+
+ ASSERT_EQ(spec.method(), PoolingMethod::Max);
+}
+
+TEST_F(PoolingSpecTest, method_avg)
+{
+ const char *prototxt = "pool: AVE";
+
+ ::caffe::PoolingParameter param;
+ from_txt(prototxt, param);
+
+ PoolingSpec spec{param};
+
+ ASSERT_EQ(spec.method(), PoolingMethod::Avg);
+}
diff --git a/compiler/enco/frontend/caffe/src/ShapeQuery.cpp b/compiler/enco/frontend/caffe/src/ShapeQuery.cpp
new file mode 100644
index 000000000..1166453b6
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ShapeQuery.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeQuery.h"
+
+#include <cassert>
+
+//
+// AxisSpecifier
+//
+AxisSpecifier axis_specifier(int32_t value) { return AxisSpecifier{value}; }
+
+//
+// ShapeQuery
+//
+uint32_t ShapeQuery::axis(const AxisSpecifier &specifier) const
+{
+ if (specifier.value() > 0)
+ {
+ return static_cast<uint32_t>(specifier.value());
+ }
+
+ assert(_shape->rank() >= static_cast<uint32_t>(-specifier.value()));
+ return static_cast<uint32_t>(_shape->rank() + specifier.value());
+}
+
+ShapeQuery query_on(const nncc::core::ADT::tensor::Shape &shape) { return ShapeQuery{&shape}; }
diff --git a/compiler/enco/frontend/caffe/src/ShapeQuery.h b/compiler/enco/frontend/caffe/src/ShapeQuery.h
new file mode 100644
index 000000000..260b6ad4d
--- /dev/null
+++ b/compiler/enco/frontend/caffe/src/ShapeQuery.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SHAPE_QUERY_H__
+#define __SHAPE_QUERY_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+/**
+ * @brief A wrapper class for an integer number that specifies axis
+ *
+ * Several Caffe layers includes 'axis' parameter (which may be negative) which specifies
+ * some axis required for operation.
+ *
+ * Here are several examples:
+ * - Convolution layer uses 'axis' parameter to specify "channel" axis
+ * (http://caffe.berkeleyvision.org/tutorial/layers/convolution.html)
+ * - Concat layer uses 'axis' parameter to specify axis to be concatenated
+ * (http://caffe.berkeleyvision.org/tutorial/layers/concat.html)
+ *
+ * AxisSpecifier class is introduced to distinguish this 'axis' parameter from other integers
+ * (to prevent possible mistake).
+ */
+class AxisSpecifier
+{
+public:
+ explicit AxisSpecifier(int32_t value) : _value{value}
+ {
+ // DO NOTHING
+ }
+
+public:
+ int32_t value(void) const { return _value; }
+
+private:
+ int32_t _value = 1;
+};
+
+AxisSpecifier axis_specifier(int32_t value);
+
+/**
+ * @brief A wrapper class that allows additional queries over tensor shape.
+ */
+class ShapeQuery
+{
+public:
+ explicit ShapeQuery(const nncc::core::ADT::tensor::Shape *shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /// @brief Return the dimension number (axis) specified by a given axis specifier
+ uint32_t axis(const AxisSpecifier &) const;
+
+private:
+ const nncc::core::ADT::tensor::Shape *_shape;
+};
+
+ShapeQuery query_on(const nncc::core::ADT::tensor::Shape &);
+
+#endif // __SHAPE_QUERY_H__
diff --git a/compiler/enco/frontend/tflite/CMakeLists.txt b/compiler/enco/frontend/tflite/CMakeLists.txt
new file mode 100644
index 000000000..77159879e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/CMakeLists.txt
@@ -0,0 +1,36 @@
+nnas_find_package(FlatBuffers QUIET)
+
+if(NOT FlatBuffers_FOUND)
+ return()
+endif(NOT FlatBuffers_FOUND)
+
+FlatBuffers_Target(enco_tflite_schema
+ OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated"
+ SCHEMA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/schema"
+ SCHEMA_FILES schema.fbs)
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+add_library(enco_tflite_frontend SHARED ${SOURCES})
+target_include_directories(enco_tflite_frontend PRIVATE src)
+target_link_libraries(enco_tflite_frontend enco_intf_frontend)
+target_link_libraries(enco_tflite_frontend enco_intf_cmdline)
+target_link_libraries(enco_tflite_frontend flatbuffers)
+target_link_libraries(enco_tflite_frontend enco_tflite_schema)
+target_link_libraries(enco_tflite_frontend stdex)
+target_link_libraries(enco_tflite_frontend morph)
+target_link_libraries(enco_tflite_frontend cwrap)
+
+nnas_find_package(GTest QUIET)
+
+if(NOT GTest_FOUND)
+ return()
+endif(NOT GTest_FOUND)
+
+add_executable(enco_tflite_frontend_test ${TESTS})
+target_include_directories(enco_tflite_frontend_test PRIVATE src)
+target_link_libraries(enco_tflite_frontend_test gtest_main)
+target_link_libraries(enco_tflite_frontend_test enco_tflite_frontend)
+add_test(enco_tflite_frontend_test enco_tflite_frontend_test)
diff --git a/compiler/enco/frontend/tflite/schema/schema.fbs b/compiler/enco/frontend/tflite/schema/schema.fbs
new file mode 100644
index 000000000..3045351f2
--- /dev/null
+++ b/compiler/enco/frontend/tflite/schema/schema.fbs
@@ -0,0 +1,734 @@
+// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Revision History
+// Version 0: Initial version.
+// Version 1: Add subgraphs to schema.
+// Version 2: Rename operators to conform to NN API.
+// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
+
+namespace tflite;
+
+// This corresponds to the version.
+file_identifier "TFL3";
+// File extension of any written files.
+file_extension "tflite";
+
+// IMPORTANT: All new members of tables, enums and unions must be added at the
+// end to ensure backwards compatibility.
+
+// The type of data stored in a tensor.
+enum TensorType : byte {
+ FLOAT32 = 0,
+ FLOAT16 = 1,
+ INT32 = 2,
+ UINT8 = 3,
+ INT64 = 4,
+ STRING = 5,
+ BOOL = 6,
+ INT16 = 7,
+ COMPLEX64 = 8,
+}
+
+// Parameters for converting a quantized tensor back to float. Given a
+// quantized value q, the corresponding float value f should be:
+// f = scale * (q - zero_point)
+table QuantizationParameters {
+ min:[float]; // For importing back into tensorflow.
+ max:[float]; // For importing back into tensorflow.
+ scale:[float]; // For dequantizing the tensor's values.
+ zero_point:[long];
+}
+
+table Tensor {
+ // The tensor shape. The meaning of each entry is operator-specific but
+ // builtin ops use: [batch size, height, width, number of channels] (That's
+ // Tensorflow's NHWC).
+ shape:[int];
+ type:TensorType;
+ // An index that refers to the buffers table at the root of the model. Or,
+ // if there is no data buffer associated (i.e. intermediate results), then
+ // this is 0 (which refers to an always existent empty buffer).
+ //
+ // The data_buffer itself is an opaque container, with the assumption that the
+ // target device is little-endian. In addition, all builtin operators assume
+ // the memory is ordered such that if `shape` is [4, 3, 2], then index
+ // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
+ buffer:uint;
+ name:string; // For debugging and importing back into tensorflow.
+ quantization:QuantizationParameters; // Optional.
+
+ is_variable:bool = false;
+}
+
+// A list of builtin operators. Builtin operators are slightly faster than custom
+// ones, but not by much. Moreover, while custom operators accept an opaque
+// object containing configuration parameters, builtins have a predetermined
+// set of acceptable options.
+enum BuiltinOperator : byte {
+ ADD = 0,
+ AVERAGE_POOL_2D = 1,
+ CONCATENATION = 2,
+ CONV_2D = 3,
+ DEPTHWISE_CONV_2D = 4,
+ // DEPTH_TO_SPACE = 5,
+ DEQUANTIZE = 6,
+ EMBEDDING_LOOKUP = 7,
+ FLOOR = 8,
+ FULLY_CONNECTED = 9,
+ HASHTABLE_LOOKUP = 10,
+ L2_NORMALIZATION = 11,
+ L2_POOL_2D = 12,
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+ LOGISTIC = 14,
+ LSH_PROJECTION = 15,
+ LSTM = 16,
+ MAX_POOL_2D = 17,
+ MUL = 18,
+ RELU = 19,
+ // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
+ // since different model developers use RELU1 in different ways. Never
+ // create another op called RELU1.
+ RELU_N1_TO_1 = 20,
+ RELU6 = 21,
+ RESHAPE = 22,
+ RESIZE_BILINEAR = 23,
+ RNN = 24,
+ SOFTMAX = 25,
+ SPACE_TO_DEPTH = 26,
+ SVDF = 27,
+ TANH = 28,
+ // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
+ CONCAT_EMBEDDINGS = 29,
+ SKIP_GRAM = 30,
+ CALL = 31,
+ CUSTOM = 32,
+ EMBEDDING_LOOKUP_SPARSE = 33,
+ PAD = 34,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ GATHER = 36,
+ BATCH_TO_SPACE_ND = 37,
+ SPACE_TO_BATCH_ND = 38,
+ TRANSPOSE = 39,
+ MEAN = 40,
+ SUB = 41,
+ DIV = 42,
+ SQUEEZE = 43,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ STRIDED_SLICE = 45,
+ BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ EXP = 47,
+ TOPK_V2 = 48,
+ SPLIT = 49,
+ LOG_SOFTMAX = 50,
+ // DELEGATE is a special op type for the operations which are delegated to
+ // other backends.
+ // WARNING: Experimental interface, subject to change
+ DELEGATE = 51,
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ CAST = 53,
+ PRELU = 54,
+ MAXIMUM = 55,
+ ARG_MAX = 56,
+ MINIMUM = 57,
+ LESS = 58,
+ NEG = 59,
+ PADV2 = 60,
+ GREATER = 61,
+ GREATER_EQUAL = 62,
+ LESS_EQUAL = 63,
+ SELECT = 64,
+ SLICE = 65,
+ SIN = 66,
+ TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
+ TILE = 69,
+ EXPAND_DIMS = 70,
+ EQUAL = 71,
+ NOT_EQUAL = 72,
+ LOG = 73,
+ SUM = 74,
+ SQRT = 75,
+ RSQRT = 76,
+ SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
+ ONE_HOT = 85,
+ LOGICAL_AND = 86,
+ LOGICAL_NOT = 87,
+ UNPACK = 88,
+ REDUCE_MIN = 89,
+ FLOOR_DIV = 90,
+ REDUCE_ANY = 91,
+ SQUARE = 92,
+ ZEROS_LIKE = 93,
+ FILL = 94,
+ FLOOR_MOD = 95,
+ RANGE = 96,
+}
+
+// Options for the builtin operators.
+union BuiltinOptions {
+ Conv2DOptions,
+ DepthwiseConv2DOptions,
+ ConcatEmbeddingsOptions,
+ LSHProjectionOptions,
+ Pool2DOptions,
+ SVDFOptions,
+ RNNOptions,
+ FullyConnectedOptions,
+ SoftmaxOptions,
+ ConcatenationOptions,
+ AddOptions,
+ L2NormOptions,
+ LocalResponseNormalizationOptions,
+ LSTMOptions,
+ ResizeBilinearOptions,
+ CallOptions,
+ ReshapeOptions,
+ SkipGramOptions,
+ SpaceToDepthOptions,
+ EmbeddingLookupSparseOptions,
+ MulOptions,
+ PadOptions,
+ GatherOptions,
+ BatchToSpaceNDOptions,
+ SpaceToBatchNDOptions,
+ TransposeOptions,
+ ReducerOptions,
+ SubOptions,
+ DivOptions,
+ SqueezeOptions,
+ SequenceRNNOptions,
+ StridedSliceOptions,
+ ExpOptions,
+ TopKV2Options,
+ SplitOptions,
+ LogSoftmaxOptions,
+ CastOptions,
+ DequantizeOptions,
+ MaximumMinimumOptions,
+ ArgMaxOptions,
+ LessOptions,
+ NegOptions,
+ PadV2Options,
+ GreaterOptions,
+ GreaterEqualOptions,
+ LessEqualOptions,
+ SelectOptions,
+ SliceOptions,
+ TransposeConvOptions,
+ SparseToDenseOptions,
+ TileOptions,
+ ExpandDimsOptions,
+ EqualOptions,
+ NotEqualOptions,
+ ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
+ OneHotOptions,
+ LogicalAndOptions,
+ LogicalNotOptions,
+ UnpackOptions,
+ FloorDivOptions,
+ SquareOptions,
+ ZerosLikeOptions,
+ FillOptions,
+ BidirectionalSequenceLSTMOptions,
+ BidirectionalSequenceRNNOptions,
+ UnidirectionalSequenceLSTMOptions,
+ FloorModOptions,
+ RangeOptions,
+}
+
+enum Padding : byte { SAME, VALID }
+
+enum ActivationFunctionType : byte {
+ NONE = 0,
+ RELU = 1,
+ RELU_N1_TO_1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGN_BIT = 5,
+}
+
+table Conv2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table Pool2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ filter_width:int;
+ filter_height:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DepthwiseConv2DOptions {
+ // Parameters for DepthwiseConv version 1 or above.
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ depth_multiplier:int;
+ fused_activation_function:ActivationFunctionType;
+ // Parameters for DepthwiseConv version 2 or above.
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table ConcatEmbeddingsOptions {
+ num_channels:int;
+ num_columns_per_channel:[int];
+ embedding_dim_per_channel:[int]; // This could be inferred from parameters.
+}
+
+enum LSHProjectionType: byte {
+ UNKNOWN = 0,
+ SPARSE = 1,
+ DENSE = 2,
+}
+
+table LSHProjectionOptions {
+ type: LSHProjectionType;
+}
+
+table SVDFOptions {
+ rank:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow RNNCell.
+table RNNOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow dynamic_rnn with RNNCell.
+table SequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
+table BidirectionalSequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+ merge_outputs: bool;
+}
+
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
+// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
+table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
+}
+
+table SoftmaxOptions {
+ beta: float;
+}
+
+// An implementation of TensorFlow concat.
+table ConcatenationOptions {
+ axis:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table AddOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table MulOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table L2NormOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table LocalResponseNormalizationOptions {
+ radius:int;
+ bias:float;
+ alpha:float;
+ beta:float;
+}
+
+enum LSTMKernelType : byte {
+ // Full LSTM kernel which supports peephole and projection.
+ FULL = 0,
+ // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
+ BASIC = 1,
+}
+
+// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
+table LSTMOptions {
+ // Parameters for LSTM version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // Parameters for LSTM version 2 or above.
+ // Basic kernel is only supported in version 2 or above.
+ kernel_type: LSTMKernelType = FULL;
+}
+
+// An implementation of TensorFlow dynamic_rnn with LSTMCell.
+table UnidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true then first dimension is sequence, otherwise batch.
+ time_major:bool;
+}
+
+table BidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true, store the outputs of both directions into the first output.
+ merge_outputs: bool;
+}
+
+table ResizeBilinearOptions {
+ new_height: int (deprecated);
+ new_width: int (deprecated);
+ align_corners: bool;
+}
+
+// A call operation options
+table CallOptions {
+ // The subgraph index that needs to be called.
+ subgraph:uint;
+}
+
+table PadOptions {
+}
+
+table PadV2Options {
+}
+
+table ReshapeOptions {
+ new_shape:[int];
+}
+
+table SpaceToBatchNDOptions {
+}
+
+table BatchToSpaceNDOptions {
+}
+
+table SkipGramOptions {
+ ngram_size: int;
+ max_skip_size: int;
+ include_all_ngrams: bool;
+}
+
+table SpaceToDepthOptions {
+ block_size: int;
+}
+
+table SubOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DivOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table TopKV2Options {
+}
+
+enum CombinerType : byte {
+ SUM = 0,
+ MEAN = 1,
+ SQRTN = 2,
+}
+
+table EmbeddingLookupSparseOptions {
+ combiner:CombinerType;
+}
+
+table GatherOptions {
+ axis: int;
+}
+
+table TransposeOptions {
+}
+
+table ExpOptions {
+}
+
+table ReducerOptions {
+ keep_dims: bool;
+}
+
+table SqueezeOptions {
+ squeeze_dims:[int];
+}
+
+table SplitOptions {
+ num_splits: int;
+}
+
+table StridedSliceOptions {
+ begin_mask: int;
+ end_mask: int;
+ ellipsis_mask: int;
+ new_axis_mask: int;
+ shrink_axis_mask: int;
+}
+
+table LogSoftmaxOptions {
+}
+
+table CastOptions {
+ in_data_type: TensorType;
+ out_data_type: TensorType;
+}
+
+table DequantizeOptions {
+}
+
+table MaximumMinimumOptions {
+}
+
+table TileOptions {
+}
+
+table ArgMaxOptions {
+ output_type : TensorType;
+}
+
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
+table GreaterOptions {
+}
+
+table GreaterEqualOptions {
+}
+
+table LessOptions {
+}
+
+table LessEqualOptions {
+}
+
+table NegOptions {
+}
+
+table SelectOptions {
+}
+
+table SliceOptions {
+}
+
+table TransposeConvOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+}
+
+table ExpandDimsOptions {
+}
+
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
+table EqualOptions {
+}
+
+table NotEqualOptions {
+}
+
+table ShapeOptions {
+ // Optional output type of the operation (int32 or int64). Defaults to int32.
+ out_type : TensorType;
+}
+
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
+}
+
+table OneHotOptions {
+ axis:int;
+}
+
+table LogicalAndOptions {
+}
+
+table LogicalNotOptions {
+}
+
+table UnpackOptions {
+ num:int;
+ axis:int;
+}
+
+table FloorDivOptions {
+}
+
+table SquareOptions {
+}
+
+table ZerosLikeOptions {
+}
+
+table FillOptions {
+}
+
+table FloorModOptions {
+}
+
+table RangeOptions {
+}
+
+// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
+// builtin, or a string if the operator is custom.
+table OperatorCode {
+ builtin_code:BuiltinOperator;
+ custom_code:string;
+
+ // The version of the operator. The version need to be bumped whenever new
+ // parameters are introduced into an op.
+ version:int = 1;
+}
+
+enum CustomOptionsFormat : byte {
+ FLEXBUFFERS = 0,
+}
+
+// An operator takes tensors as inputs and outputs. The type of operation being
+// performed is determined by an index into the list of valid OperatorCodes,
+// while the specifics of each operations is configured using builtin_options
+// or custom_options.
+table Operator {
+ // Index into the operator_codes array. Using an integer here avoids
+ // complicate map lookups.
+ opcode_index:uint;
+
+ // Optional input and output tensors are indicated by -1.
+ inputs:[int];
+ outputs:[int];
+
+ builtin_options:BuiltinOptions;
+ custom_options:[ubyte];
+ custom_options_format:CustomOptionsFormat;
+
+ // A list of booleans indicating the input tensors which are being mutated by
+ // this operator.(e.g. used by RNN and LSTM).
+ // For example, if the "inputs" array refers to 5 tensors and the second and
+ // fifth are mutable variables, then this list will contain
+ // [false, true, false, false, true].
+ //
+ // If the list is empty, no variable is mutated in this operator.
+ // The list either has the same length as `inputs`, or is empty.
+ mutating_variable_inputs:[bool];
+}
+
+// The root type, defining a subgraph, which typically represents an entire
+// model.
+table SubGraph {
+ // A list of all tensors used in this subgraph.
+ tensors:[Tensor];
+
+ // Indices of the tensors that are inputs into this subgraph. Note this is
+ // the list of non-static tensors that feed into the subgraph for inference.
+ inputs:[int];
+
+ // Indices of the tensors that are outputs out of this subgraph. Note this is
+ // the list of output tensors that are considered the product of the
+ // subgraph's inference.
+ outputs:[int];
+
+ // All operators, in execution order.
+ operators:[Operator];
+
+ // Name of this subgraph (used for debugging).
+ name:string;
+}
+
+// Table of raw data buffers (used for constant tensors). Referenced by tensors
+// by index. The generous alignment accommodates mmap-friendly data structures.
+table Buffer {
+ data:[ubyte] (force_align: 16);
+}
+
+table Model {
+ // Version of the schema.
+ version:uint;
+
+ // A list of all operator codes used in this model. This is
+ // kept in order because operators carry an index into this
+ // vector.
+ operator_codes:[OperatorCode];
+
+ // All the subgraphs of the model. The 0th is assumed to be the main
+ // model.
+ subgraphs:[SubGraph];
+
+ // A description of the model.
+ description:string;
+
+ // Buffers of the model.
+ // Note the 0th entry of this array must be an empty buffer (sentinel).
+ // This is a convention so that tensors without a buffer can provide 0 as
+ // their buffer.
+ buffers:[Buffer];
+
+ // Metadata about the model. Indirects into the existings buffers list.
+ metadata_buffer:[int];
+}
+
+root_type Model;
diff --git a/compiler/enco/frontend/tflite/schema/schema.meta b/compiler/enco/frontend/tflite/schema/schema.meta
new file mode 100644
index 000000000..8cc1f4e62
--- /dev/null
+++ b/compiler/enco/frontend/tflite/schema/schema.meta
@@ -0,0 +1,2 @@
+Commit: 24963954a84a3e85dc8dfe79a15a01dc33fedab4
+URL: https://github.com/tensorflow/tensorflow/blob/2496395/tensorflow/contrib/lite/schema/schema.fbs
diff --git a/compiler/enco/frontend/tflite/src/Context.cpp b/compiler/enco/frontend/tflite/src/Context.cpp
new file mode 100644
index 000000000..ef030dc5d
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Context.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Context.h"
+
+#include "Convert.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <map>
+#include <sstream>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+void TensorContext::prepare(const tflite::SubGraph *graph)
+{
+ for (uint32_t tensor_id = 0; tensor_id < graph->tensors()->size(); ++tensor_id)
+ {
+ auto const tensor_info = graph->tensors()->Get(tensor_id);
+ auto const tensor_name = tensor_info->name()->str();
+ auto const tensor_shape = as_tensor_shape(tensor_info->shape());
+ auto const tensor_type = tensor_info->type();
+
+ _name_ctx[tensor_id] = tensor_name;
+ _shape_ctx[tensor_id] = tensor_shape;
+ _type_ctx[tensor_id] = tensor_type;
+ }
+}
+
+TflOpCodeContext::TflOpCodeContext(
+ const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *opcodes)
+{
+ for (const tflite::OperatorCode *opcode : *opcodes)
+ {
+ _opcodes.push_back(opcode);
+ }
+}
+
+tflite::BuiltinOperator TflOpCodeContext::builtin_code(const tflite::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _opcodes.size());
+ const tflite::OperatorCode *opcode = _opcodes.at(index);
+ return opcode->builtin_code();
+}
+
+std::string TflOpCodeContext::opcode_name(const tflite::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _opcodes.size());
+ const tflite::OperatorCode *opcode = _opcodes.at(index);
+
+ if (!is_valid(opcode))
+ {
+ std::ostringstream oss;
+ oss << "(invalid: " << index << ")";
+ return oss.str();
+ }
+
+ if (is_custom(opcode))
+ {
+ if (!opcode->custom_code())
+ return "(invalid custom)";
+
+ return opcode->custom_code()->c_str();
+ }
+
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return EnumNameBuiltinOperator(code);
+}
+
+bool TflOpCodeContext::is_valid(const tflite::OperatorCode *opcode)
+{
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return (tflite::BuiltinOperator_MIN <= code && code <= tflite::BuiltinOperator_MAX);
+}
+
+bool TflOpCodeContext::is_custom(const tflite::OperatorCode *opcode)
+{
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return (code == tflite::BuiltinOperator_CUSTOM);
+}
+
+TflBufferContext::TflBufferContext(const tflite::Model *tfl_model)
+{
+ const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>> *tfl_buffers;
+
+ tfl_buffers = tfl_model->buffers();
+
+ for (uint32_t buffer_id = 0; buffer_id < tfl_buffers->size(); ++buffer_id)
+ {
+ _buffer_ctx[buffer_id] = (*tfl_buffers)[buffer_id];
+ }
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Context.h b/compiler/enco/frontend/tflite/src/Context.h
new file mode 100644
index 000000000..f72385f9a
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Context.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+#include "Convert.h"
+#include "TensorBags.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <schema_generated.h>
+
+#include <map>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Extracts and holds operand(tensor) information such as name, shape, and type
+ */
+class TensorContext
+{
+public:
+ void prepare(const tflite::SubGraph *graph);
+
+ const std::string &name(uint32_t tensor_id) { return _name_ctx[tensor_id]; }
+ const tensor::Shape &shape(uint32_t tensor_id) { return _shape_ctx[tensor_id]; }
+ const tflite::TensorType &type(uint32_t tensor_id) { return _type_ctx[tensor_id]; }
+
+private:
+ std::map<uint32_t, std::string> _name_ctx;
+ std::map<uint32_t, tensor::Shape> _shape_ctx;
+ std::map<uint32_t, tflite::TensorType> _type_ctx;
+};
+
+/**
+ * @brief Class that holds operator codes and related methods
+ */
+class TflOpCodeContext
+{
+public:
+ TflOpCodeContext(const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *opcodes);
+
+ /**
+ * @brief Returns BuiltinOperator value of the operator
+ */
+ tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const;
+
+ /**
+ * @brief Returns human readable name of the operator code of the operator
+ *
+ * @note TF lite InterpreterBuilder sets an error state and returns error code
+ * for invalid opcode. Here we just return human readable message as
+ * this method returns a name for the operator code.
+ */
+ std::string opcode_name(const tflite::Operator *op) const;
+
+public:
+ static bool is_valid(const tflite::OperatorCode *opcode);
+ static bool is_custom(const tflite::OperatorCode *opcode);
+
+private:
+ std::vector<const tflite::OperatorCode *> _opcodes;
+};
+
+/**
+ * @brief Class to read and provide buffer information of tflite
+ */
+class TflBufferContext
+{
+public:
+ template <typename T> struct TflBuffer
+ {
+ TflBuffer(const T *p, size_t s) : ptr{p}, len{s} {};
+ const T *ptr;
+ size_t len;
+ };
+
+public:
+ explicit TflBufferContext(const tflite::Model *tfl_model);
+
+public:
+ template <typename T>
+ TflBuffer<T> tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
+ {
+ TflBufferContext::TflBuffer<T> res{nullptr, 0};
+ const auto *tensor = graph->tensors()->Get(tensor_idx);
+ uint32_t tfl_buf_id = tensor->buffer();
+
+ assert(_buffer_ctx.size() > tfl_buf_id);
+
+ const tflite::Buffer *tfl_buffer = _buffer_ctx.at(tfl_buf_id);
+
+ if (auto *array = tfl_buffer->data())
+ {
+ if (size_t size = array->size())
+ {
+ assert(size % sizeof(T) == 0);
+
+ res.len = size / sizeof(T);
+ res.ptr = reinterpret_cast<const T *>(array->data());
+ }
+ }
+
+ return res;
+ }
+
+private:
+ std::map<uint32_t /* Buffer ID */, const tflite::Buffer *> _buffer_ctx;
+};
+
+/**
+ * @brief Class to store context to build IR from tflite
+ */
+class GraphBuilderContext
+{
+public:
+ explicit GraphBuilderContext(coco::Module *m, coco::Data *d, coco::Block *block,
+ TensorBags &tensor_bags, TensorContext &tensor_context,
+ TflBufferContext &buffer_context, const tflite::SubGraph *graph)
+ : _m(m), _d(d), _block(block), _tensor_bags(tensor_bags), _tensor_context(tensor_context),
+ _buffer_context(buffer_context), _graph(graph)
+ {
+ // DO NOTHING
+ }
+
+ GraphBuilderContext() = delete;
+ GraphBuilderContext(const GraphBuilderContext &) = delete;
+ GraphBuilderContext(GraphBuilderContext &&) = delete;
+
+public:
+ coco::Module *m() { return _m; }
+ coco::Data *d() { return _d; }
+ coco::Block *block() { return _block; }
+ TensorContext &tensor() { return _tensor_context; }
+ TensorBags &bags() { return _tensor_bags; }
+ TflBufferContext &buffer() { return _buffer_context; }
+ const tflite::SubGraph *graph() { return _graph; }
+
+private:
+ coco::Module *_m;
+ coco::Data *_d;
+ coco::Block *_block;
+ TensorContext &_tensor_context;
+ TensorBags &_tensor_bags;
+ TflBufferContext &_buffer_context;
+ const tflite::SubGraph *_graph;
+};
+
+} // namespace tflimport
+
+#endif // __CONTEXT_H__
diff --git a/compiler/enco/frontend/tflite/src/Convert.cpp b/compiler/enco/frontend/tflite/src/Convert.cpp
new file mode 100644
index 000000000..ffae95d01
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Convert.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+IndexVector as_index_vector(const flatbuffers::Vector<int32_t> *array)
+{
+ const uint32_t size = array->size();
+
+ std::vector<int32_t> res(size);
+
+ for (uint32_t i = 0; i < size; i++)
+ {
+ res[i] = array->Get(i);
+ }
+
+ return res;
+}
+
+tensor::Shape as_tensor_shape(const flatbuffers::Vector<int32_t> *shape)
+{
+ const uint32_t rank = shape->size();
+
+ tensor::Shape res;
+
+ res.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = shape->Get(axis);
+ }
+
+ return res;
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Convert.h b/compiler/enco/frontend/tflite/src/Convert.h
new file mode 100644
index 000000000..fb4c248bf
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Convert.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVERT_H__
+#define __CONVERT_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+using IndexVector = std::vector<int32_t>;
+
+/**
+ * @brief Converts flatbuffers::Vector to IndexVector
+ */
+IndexVector as_index_vector(const flatbuffers::Vector<int32_t> *array);
+
+/**
+ * @brief Converts flatbuffers::Vector to nncc::core::ADT::tensor::Shape
+ */
+tensor::Shape as_tensor_shape(const flatbuffers::Vector<int32_t> *shape);
+
+} // namespace tflimport
+
+#endif // __CONVERT_H__
diff --git a/compiler/enco/frontend/tflite/src/Entry.cpp b/compiler/enco/frontend/tflite/src/Entry.cpp
new file mode 100644
index 000000000..c69e18074
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Entry.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "RawModelLoader.h"
+
+#include <cmdline/View.h>
+
+#include <stdex/Memory.h>
+
+#include <fstream>
+#include <cassert>
+
+using stdex::make_unique;
+
+extern "C" std::unique_ptr<enco::Frontend> make_frontend(const cmdline::View &cmdline)
+{
+ assert(cmdline.size() == 1); // tflite file name
+
+ auto model = load_from(cmdline.at(0));
+
+ return make_unique<Frontend>(std::move(model));
+}
diff --git a/compiler/enco/frontend/tflite/src/Frontend.cpp b/compiler/enco/frontend/tflite/src/Frontend.cpp
new file mode 100644
index 000000000..c64f181f4
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "Context.h"
+#include "Convert.h"
+#include "TensorBags.h"
+#include "GraphBuilderRegistry.h"
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <iostream>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Set module input operands and its information
+ */
+void set_module_inputs(coco::Module *m, TensorContext &ctx, TensorBags &bags,
+ const IndexVector &inputs)
+{
+ for (uint32_t n = 0; n < inputs.size(); ++n)
+ {
+ auto const tensor_id = inputs.at(n);
+
+ auto const tensor_name = ctx.name(tensor_id);
+ auto const tensor_shape = ctx.shape(tensor_id);
+ auto const tensor_bag = bags.bag(tensor_id);
+
+ auto input = m->entity()->input()->create(tensor_shape);
+
+ input->name(tensor_name);
+ input->bag(tensor_bag);
+ input->reorder<tensor::LexicalLayout>();
+
+ m->input()->insert(input);
+ }
+}
+
+/**
+ * @brief Set module output operands and its information
+ */
+void set_module_outputs(coco::Module *m, TensorContext &ctx, TensorBags &bags,
+ const IndexVector &outputs)
+{
+ for (uint32_t n = 0; n < outputs.size(); ++n)
+ {
+ auto const tensor_id = outputs.at(n);
+
+ auto const tensor_name = ctx.name(tensor_id);
+ auto const tensor_shape = ctx.shape(tensor_id);
+ auto const tensor_bag = bags.bag(tensor_id);
+
+ auto output = m->entity()->output()->create(tensor_shape);
+
+ output->name(tensor_name);
+ output->bag(tensor_bag);
+ output->reorder<tensor::LexicalLayout>();
+
+ m->output()->insert(output);
+ }
+}
+
+/**
+ * @brief Copy values of tfl tensors into coco::Data if the data was not copied
+ */
+void copy_tensors(GraphBuilderContext *ctx)
+{
+ auto d = ctx->d();
+
+ // for each bag, check if bag is not allocated but tflite tensor has values
+ for (auto &iter : ctx->bags())
+ {
+ auto tfl_tensor_id = iter.first;
+ auto bag = iter.second;
+
+ auto tfl_buffer = ctx->buffer().tensor_buffer<float>(ctx->graph(), tfl_tensor_id);
+
+ // TODO remove this line when support int32 is ready
+ if (ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_INT32)
+ {
+ std::cout << "*** INT32 COPYING IS NOT SUPPORTED ***" << std::endl;
+ continue;
+ }
+
+ assert(ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_FLOAT32);
+
+ auto span = d->f32()->weight(bag); // TODO support other type
+
+ if (!(span.data() == nullptr && span.size() == 0)) // already allocated
+ continue;
+
+ if (tfl_buffer.ptr == nullptr || tfl_buffer.len == 0) // no data to copy
+ continue;
+
+ d->f32()->allocate(bag);
+
+ auto ifm_span = d->f32()->weight(bag);
+ for (uint32_t idx = 0; idx < tfl_buffer.len; ++idx)
+ {
+ ifm_span[idx] = tfl_buffer.ptr[idx];
+ }
+ }
+}
+
+} // namespace tflimport
+
+Frontend::Frontend(std::unique_ptr<RawModel> &&raw) : _raw{std::move(raw)}
+{
+ // DO NOTHING
+}
+
+enco::Bundle Frontend::load(void) const
+{
+ auto model = _raw->model();
+
+ assert(model->version() == 3);
+ assert(model->subgraphs()->size() == 1);
+
+ auto graph = model->subgraphs()->Get(0);
+
+ auto m = coco::Module::create();
+ auto d = coco::Data::create();
+
+ tflimport::TensorContext tensor_context;
+ tflimport::TensorBags tensor_bags;
+
+ tensor_context.prepare(graph);
+ tensor_bags.prepare(graph, m);
+
+ auto inputs = tflimport::as_index_vector(graph->inputs());
+ auto outputs = tflimport::as_index_vector(graph->outputs());
+
+ tflimport::set_module_inputs(m.get(), tensor_context, tensor_bags, inputs);
+ tflimport::set_module_outputs(m.get(), tensor_context, tensor_bags, outputs);
+
+ auto blk = m->entity()->block()->create();
+ m->block()->append(blk);
+
+ auto opcodes = model->operator_codes();
+
+ tflimport::TflBufferContext buffer_context(model);
+ tflimport::TflOpCodeContext opcode_context(opcodes);
+
+ auto operators = graph->operators();
+
+ tflimport::GraphBuilderContext opbuilder_context(m.get(), d.get(), blk, tensor_bags,
+ tensor_context, buffer_context, graph);
+
+ for (int i = 0; i < operators->Length(); ++i)
+ {
+ const auto *op = operators->Get(i);
+ tflite::BuiltinOperator builtincode = opcode_context.builtin_code(op);
+
+ if (const auto *graph_builder = tflimport::GraphBuilderRegistry::get().lookup(builtincode))
+ {
+ if (!graph_builder->validate(op))
+ {
+ throw std::runtime_error{"Invalid operator"};
+ }
+
+ graph_builder->build(op, &opbuilder_context);
+ }
+ else
+ {
+ std::string opcodename = opcode_context.opcode_name(op);
+ throw std::runtime_error{"Not supported: " + opcodename};
+ }
+
+ // copying unfilled tensor value
+ copy_tensors(&opbuilder_context);
+ }
+
+ // Create "Bundle"
+ enco::Bundle bundle;
+
+ bundle.module(std::move(m));
+ bundle.data(std::move(d));
+
+ return std::move(bundle);
+}
diff --git a/compiler/enco/frontend/tflite/src/Frontend.h b/compiler/enco/frontend/tflite/src/Frontend.h
new file mode 100644
index 000000000..bb0c9cd2c
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FRONTEND_H__
+#define __FRONTEND_H__
+
+#include "RawModel.h"
+
+#include <enco/Frontend.h>
+
+#include <flatbuffers/flatbuffers.h>
+
+#include <memory>
+
+class Frontend final : public enco::Frontend
+{
+public:
+ Frontend(std::unique_ptr<RawModel> &&raw);
+
+public:
+ enco::Bundle load(void) const override;
+
+private:
+ std::unique_ptr<RawModel> _raw;
+};
+
+#endif // __FRONTEND_H__
diff --git a/compiler/enco/frontend/tflite/src/Frontend.test.cpp b/compiler/enco/frontend/tflite/src/Frontend.test.cpp
new file mode 100644
index 000000000..aee6099e7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.test.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+
+#include <stdex/Memory.h>
+
+#include <gtest/gtest.h>
+
+using stdex::make_unique;
+
+namespace
+{
+
+struct MockRawModel final : public RawModel
+{
+ const tflite::Model *model(void) const override { return nullptr; }
+};
+
+} // namespace
+
+TEST(FrontendTest, constructor)
+{
+ // Let's test whether Frontend is actually constructible.
+ auto frontend = make_unique<Frontend>(make_unique<MockRawModel>());
+
+ ASSERT_NE(frontend, nullptr);
+}
diff --git a/compiler/enco/frontend/tflite/src/GraphBuilder.h b/compiler/enco/frontend/tflite/src/GraphBuilder.h
new file mode 100644
index 000000000..f2cb57848
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/GraphBuilder.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_H__
+#define __GRAPH_BUILDER_H__
+
+#include "Context.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief Parent class of tflite operation graph builders (e.g., Conv2DGraphBuilder)
+ */
+class GraphBuilder
+{
+public:
+ /**
+ * TODO Declare "validate" method as a pure virtual method
+ *
+ * Q: Is it possible to validate T/F Lite model only with this interface?
+ */
+ virtual bool validate(const tflite::Operator *) const { return true; }
+
+ virtual void build(const tflite::Operator *op, GraphBuilderContext *context) const = 0;
+ virtual ~GraphBuilder() {}
+};
+
+} // namespace tflimport
+
+#endif // __GRAPH_BUILDER_H__
diff --git a/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h b/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h
new file mode 100644
index 000000000..1ae882e89
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_REGISTRY_H__
+#define __GRAPH_BUILDER_REGISTRY_H__
+
+#include "Op/Conv2D.h"
+#include "Op/DepthwiseConv2D.h"
+#include "Op/AveragePool2D.h"
+#include "Op/MaxPool2D.h"
+#include "Op/Concatenation.h"
+#include "Op/ReLU.h"
+#include "Op/ReLU6.h"
+#include "Op/Reshape.h"
+#include "Op/Sub.h"
+#include "Op/Div.h"
+
+#include <schema_generated.h>
+#include <stdex/Memory.h>
+
+#include <map>
+
+using stdex::make_unique;
+
+namespace tflimport
+{
+
+/**
+ * @brief Class to return graph builder for passed tflite::builtinOperator
+ */
+class GraphBuilderRegistry
+{
+public:
+ /**
+ * @brief Returns registered GraphBuilder pointer for BuiltinOperator or
+ * nullptr if not registered
+ */
+ const GraphBuilder *lookup(tflite::BuiltinOperator op) const
+ {
+ if (_builder_map.find(op) == _builder_map.end())
+ return nullptr;
+
+ return _builder_map.at(op).get();
+ }
+
+ static GraphBuilderRegistry &get()
+ {
+ static GraphBuilderRegistry me;
+ return me;
+ }
+
+private:
+ GraphBuilderRegistry()
+ {
+ // add GraphBuilder for each tflite operation.
+ _builder_map[tflite::BuiltinOperator_CONV_2D] = make_unique<Conv2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] =
+ make_unique<DepthwiseConv2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_AVERAGE_POOL_2D] = make_unique<AvgPool2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_MAX_POOL_2D] = make_unique<MaxPool2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_CONCATENATION] = make_unique<ConcatenationGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RELU] = make_unique<ReLUGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RELU6] = make_unique<ReLU6GraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RESHAPE] = make_unique<ReshapeGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_SUB] = make_unique<SubGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_DIV] = make_unique<DivGraphBuilder>();
+ }
+
+private:
+ std::map<tflite::BuiltinOperator, std::unique_ptr<GraphBuilder>> _builder_map;
+};
+
+} // namespace tflimport
+
+#endif // __GRAPH_BUILDER_REGISTRY_H__
diff --git a/compiler/enco/frontend/tflite/src/IRBuilder.h b/compiler/enco/frontend/tflite/src/IRBuilder.h
new file mode 100644
index 000000000..edfe247e1
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/IRBuilder.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file IRBuilder.h
+ * @brief coco IR builders. This is code is copied from enco caffe frontend.
+ */
+#ifndef __IR_BUILDER_H__
+#define __IR_BUILDER_H__
+
+#include "coco/IR/Module.h"
+
+#include <deque>
+
+using namespace nncc::core::ADT;
+
+class OpBuilder
+{
+public:
+ OpBuilder(coco::Module *module) : _module{module}
+ {
+ // module SHOULD BE valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Return true if the internal stack is empty
+ */
+ bool empty(void) const { return _stack.empty(); }
+
+ /**
+ * @brief Return the operation at the top of the internal stack
+ */
+ coco::Op *top(void) const
+ {
+ assert(_stack.size() > 0);
+ return _stack.front();
+ }
+
+ /**
+ * @brief Push op onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Op; Stack
+ */
+ OpBuilder &push(coco::Op *op)
+ {
+ _stack.push_front(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Load" op and push it onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Load(obj); Stack
+ */
+ OpBuilder &load(coco::Object *obj)
+ {
+ auto op = _module->entity()->op()->create<coco::Load>();
+ op->object(obj);
+ push(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Add" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Add(Left, Right); Stack
+ */
+ OpBuilder &add(void) { return binary<coco::Add>(); }
+
+ /**
+ * @brief Create "Mul" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Mul(Left, Right); Stack
+ */
+ OpBuilder &mul(void) { return binary<coco::Mul>(); }
+
+ /**
+ * @brief Pop op from the internal stack
+ *
+ * BEFORE| Op; Stack
+ * AFTER | Stack
+ */
+ coco::Op *pop(void)
+ {
+ assert(_stack.size() > 0);
+ auto op = _stack.front();
+ _stack.pop_front();
+ return op;
+ }
+
+private:
+ template <typename ConcreteOp> OpBuilder &binary()
+ {
+ assert(_stack.size() >= 2);
+ auto left = pop();
+ auto right = pop();
+
+ auto op = _module->entity()->op()->create<ConcreteOp>();
+ op->left(left);
+ op->right(right);
+ push(op);
+
+ return (*this);
+ }
+
+private:
+ coco::Module *_module;
+ std::deque<coco::Op *> _stack;
+};
+
+inline OpBuilder op_builder(coco::Module *m) { return OpBuilder{m}; }
+inline OpBuilder op_builder(const std::unique_ptr<coco::Module> &m) { return op_builder(m.get()); }
+
+class InstrBuilder
+{
+public:
+ InstrBuilder(coco::Module *module) : _module{module}
+ {
+ // NOTE _module SHOULD be valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Create "Eval" instruction with a given "Object" and "Op"
+ *
+ * @note "eval(out, op)" will create "%out <- Eval(op)" instruction
+ */
+ coco::Eval *eval(coco::Object *out, coco::Op *op) const
+ {
+ auto ins = _module->entity()->instr()->create<coco::Eval>();
+ ins->op(op);
+ ins->out(out);
+ return ins;
+ }
+
+ /**
+ * @brief Create "Copy" instruction with given two "Object"
+ *
+ * @note "copy(into, from)" will create "%into <- Copy(%from)" instruction
+ */
+ coco::Copy *copy(coco::Object *into, coco::Object *from) const
+ {
+ auto ins = _module->entity()->instr()->create<coco::Copy>();
+ ins->from(from);
+ ins->into(into);
+ return ins;
+ }
+
+private:
+ coco::Module *_module;
+};
+
+using ModuleHandle = std::unique_ptr<coco::Module>;
+
+inline InstrBuilder instr_builder(coco::Module *m) { return InstrBuilder{m}; }
+inline InstrBuilder instr_builder(const ModuleHandle &m) { return instr_builder(m.get()); }
+
+#endif // __IR_BUILDER_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Activation.cpp b/compiler/enco/frontend/tflite/src/Op/Activation.cpp
new file mode 100644
index 000000000..d6215ba34
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Activation.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Activation.h"
+
+#include <IRBuilder.h>
+
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::FeatureObject *build_activation(tflite::ActivationFunctionType act, coco::Block *block,
+ coco::FeatureObject *ifm)
+{
+ assert(ifm != nullptr && ifm->asFeature() != nullptr); // support feature only in this version
+
+ coco::Module *m = block->module();
+
+ auto shape = ifm->asFeature()->shape();
+
+ // creates output object
+ auto output_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto output_bag = m->entity()->bag()->create(num_elements(shape));
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(shape));
+
+ switch (act)
+ {
+ case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
+ {
+ // Create Copy Instr (copying from ifm to output_obj),
+ // redundant layer but optimized by backend
+ auto copy_ins = instr_builder(m).copy(output_obj, ifm);
+
+ // Append the instruction to the block
+ block->instr()->append(copy_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
+ {
+ // Create Eval(output_obj, ReLU(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu_op = m->entity()->op()->create<coco::ReLU>();
+ relu_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
+ {
+ // Create Eval(output_obj, ReLU6(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
+ relu6_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ default:
+ // TODO support other fused activations
+ assert(false);
+ break;
+ }
+
+ return output_obj;
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Activation.h b/compiler/enco/frontend/tflite/src/Op/Activation.h
new file mode 100644
index 000000000..05306dd41
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Activation.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ACTIVATION_H__
+#define __OP_ACTIVATION_H__
+
+#include <coco/IR/Block.h>
+#include <coco/IR/FeatureObject.h>
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief Add coco::Eval for fused activation.
+ * This method creates an ofm object, appends Eval(ofm object, RELU(...)) into block,
+ * and returns ofm object.
+ */
+coco::FeatureObject *build_activation(tflite::ActivationFunctionType act, coco::Block *block,
+ coco::FeatureObject *ifm);
+} // namespace tflimport
+
+#endif // __OP_ACTIVATION_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp
new file mode 100644
index 000000000..16f68fcdb
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AveragePool2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool AvgPool2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Pool2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void AvgPool2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a AvgPool2D
+ auto coco_avgpool2d = m->entity()->op()->create<coco::AvgPool2D>();
+ auto *params = op->builtin_options_as_Pool2DOptions();
+
+ // NOTE For Tensorflow lite, PaddingExcluded is needed
+ coco_avgpool2d->divisor(coco::AvgPool2D::Divisor::PaddingExcluded);
+
+ coco_avgpool2d->window()->height(params->filter_height());
+ coco_avgpool2d->window()->width(params->filter_width());
+
+ coco_avgpool2d->stride()->vertical(params->stride_h());
+ coco_avgpool2d->stride()->horizontal(params->stride_w());
+
+ coco::Padding2D padding =
+ pool2D_padding(params, ifm_shape, params->filter_width(), params->filter_height());
+
+ coco_avgpool2d->pad()->top(padding.top());
+ coco_avgpool2d->pad()->bottom(padding.bottom());
+ coco_avgpool2d->pad()->left(padding.left());
+ coco_avgpool2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_avgpool2d->arg(coco_load);
+
+ // Create an Eval instruction
+ auto ins = instr_builder(m).eval(ofm_obj, coco_avgpool2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h
new file mode 100644
index 000000000..3e37e3cad
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_AVERAGEPOOL2D_H__
+#define __OP_AVERAGEPOOL2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for AvgPool2D operator
+ */
+class AvgPool2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_AVERAGEPOOL2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp b/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp
new file mode 100644
index 000000000..ce0f47b21
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concatenation.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <array>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace
+{
+
+/**
+ * @brief Convert a numeric tensor axis as a ConcatF FeatureAxis value
+ */
+coco::ConcatF::Axis as_ConcatF_axis(uint32_t axis)
+{
+ // NOTE The feature map (in TensorFlow) is a rank-4 (NHWC) tensor
+ assert(axis < 4);
+
+ coco::ConcatF::Axis res = coco::ConcatF::Axis::Unknown;
+
+ switch (axis)
+ {
+ case 0:
+ res = coco::ConcatF::Axis::Batch;
+ break;
+ case 1:
+ res = coco::ConcatF::Axis::Height;
+ break;
+ case 2:
+ res = coco::ConcatF::Axis::Width;
+ break;
+ case 3:
+ res = coco::ConcatF::Axis::Depth;
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * @brief Convert a coco FeatureShape as an array of 'uint32_t' values
+ */
+std::array<uint32_t, 4> as_dims(const coco::FeatureShape &shape)
+{
+ std::array<uint32_t, 4> res;
+
+ res[0] = shape.batch();
+ res[1] = shape.height();
+ res[2] = shape.width();
+ res[3] = shape.depth();
+
+ return res;
+}
+
+/**
+ * @brief Convert a tensor shape as a coco FeatureShape
+ */
+coco::FeatureShape as_feature_shape(const tensor::Shape &shape)
+{
+ assert(shape.rank() == 4);
+
+ auto const B = shape.dim(0);
+ auto const C = shape.dim(3);
+ auto const H = shape.dim(1);
+ auto const W = shape.dim(2);
+
+ return coco::FeatureShape{B, C, H, W};
+}
+
+} // namespace
+
+namespace tflimport
+{
+
+void ConcatenationGraphBuilder::build(const tflite::Operator *op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 ~ N : any number of input features
+ // output index 0 : one output feature
+ assert(opinputs.size() > 0);
+ assert(opoutputs.size() == 1);
+
+ // Default parameter values are referenced from schema_generated.h
+ int32_t concat_axis = 0;
+ tflite::ActivationFunctionType activation = tflite::ActivationFunctionType_NONE;
+
+ if (auto *concatenation_params = op->builtin_options_as_ConcatenationOptions())
+ {
+ activation = concatenation_params->fused_activation_function();
+ concat_axis = concatenation_params->axis();
+
+ const int32_t rank = static_cast<int32_t>(tensor_context.shape(opinputs.at(0)).rank());
+ if (concat_axis < 0)
+ {
+ concat_axis += rank;
+ }
+ assert(concat_axis >= 0);
+ assert(concat_axis < rank);
+ }
+ assert(as_ConcatF_axis(concat_axis) != coco::ConcatF::Axis::Unknown);
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ // Construct a vector of input objects
+ std::vector<coco::FeatureObject *> input_objects;
+
+ for (auto &input_index : opinputs)
+ {
+ const tensor::Shape &input_shape = tensor_context.shape(input_index);
+ coco::FeatureObject *input_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *input_bag = bags.bag(input_index);
+ input_obj->bag(input_bag);
+ input_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(input_shape)));
+
+ input_objects.emplace_back(input_obj);
+ }
+
+ coco::FeatureObject *last_feature = input_objects.at(0);
+
+ assert(last_feature != nullptr);
+ assert(last_feature->bag() != nullptr);
+
+ // Update coco IR
+ //
+ // Given a sequence of input features %in[0] / %in[1] / ... / %in[N]
+ // the below code constructs a sequence of eval instructions
+ // - Load is omitted for simplicity
+ //
+ // %tmp = eval(ConcatF(%in[0], %in[1]))
+ // %tmp = eval(ConcatF(%tmp, %in[2]))
+ // ...
+ // %tmp = eval(ConcatF(%tmp, %in[N]))
+ // %out[0] = copy(%tmp)
+ //
+ for (uint32_t n = 1; n < input_objects.size(); ++n)
+ {
+ auto const left_feature = last_feature;
+ auto const left_shape = left_feature->layout()->shape();
+
+ auto right_feature = input_objects.at(n);
+ auto right_shape = right_feature->layout()->shape();
+
+ // Compute output dimensionalities
+ auto compute_out_dims = [&left_shape, &right_shape, concat_axis](void) {
+ std::array<uint32_t, 4> out_dims;
+
+ const auto left_dims = as_dims(left_shape);
+ const auto right_dims = as_dims(right_shape);
+
+ for (uint32_t axis = 0; axis < 4 /* FEATURE MAP RANK */; ++axis)
+ {
+ // The dimensionality of all the axises except 'concat' axis SHOULD BE INDETICAL
+ assert((concat_axis == axis) || (left_dims[axis] == right_dims[axis]));
+
+ out_dims[axis] = left_dims[axis];
+ if (axis == concat_axis)
+ {
+ out_dims[axis] += right_dims[axis];
+ }
+ }
+
+ return out_dims;
+ };
+
+ const auto out_dims = compute_out_dims();
+
+ const uint32_t B = out_dims[0 /* BATCH */];
+ const uint32_t C = out_dims[3 /* DEPTH */];
+ const uint32_t H = out_dims[1 /* HEIGHT */];
+ const uint32_t W = out_dims[2 /* WIDTH */];
+
+ const coco::FeatureShape out_shape{B, C, H, W};
+
+ auto out_bag = m->entity()->bag()->create(B * num_elements(out_shape));
+ auto out_feature = m->entity()->object()->create<coco::FeatureObject>();
+
+ out_feature->bag(out_bag);
+ out_feature->layout(coco::FeatureLayouts::BHWC::create(out_shape));
+
+ auto left_load = op_builder(m).load(left_feature).pop();
+ auto right_load = op_builder(m).load(right_feature).pop();
+
+ auto concat_f = m->entity()->op()->create<coco::ConcatF>();
+
+ concat_f->axis(as_ConcatF_axis(concat_axis));
+ concat_f->left(left_load);
+ concat_f->right(right_load);
+
+ auto eval = instr_builder(m).eval(out_feature, concat_f);
+
+ // Append the constructed Shuffle instruction
+ blk->instr()->append(eval);
+
+ // Update 'last_feature'
+ last_feature = out_feature;
+ }
+
+ // Insert copy instruction from last_feature to output operand
+ int const ofm_idx = opoutputs.at(0);
+ auto const ofm_shape = tensor_context.shape(ofm_idx);
+
+ auto ofm_bag = bags.bag(ofm_idx);
+ auto ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Copy instruction from last into ofm
+ auto copy_ins = instr_builder(m).copy(ofm_obj, last_feature);
+
+ // Append the instruction
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Concatenation.h b/compiler/enco/frontend/tflite/src/Op/Concatenation.h
new file mode 100644
index 000000000..eb7625a85
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Concatenation.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CONCATENATION_H__
+#define __OP_CONCATENATION_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Concatenation operator
+ */
+class ConcatenationGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_CONCATENATION_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp b/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp
new file mode 100644
index 000000000..e9516c0e9
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/KernelLayouts.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool Conv2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Conv2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void Conv2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ // preparation
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ TflBufferContext &buffer_context = context->buffer();
+ const tflite::SubGraph *graph = context->graph();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : kernel
+ // input index 2 : bias (optional)
+ bool hasBias = (opinputs.size() == 3);
+ assert(opinputs.size() == 2 || hasBias);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ker_idx = opinputs.at(1);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+ const tensor::Shape &ker_shape = tensor_context.shape(ker_idx);
+
+ // Create an input feature map object
+ auto *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an an output feature map object
+ auto *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an kernel object
+ auto *ker_obj = m->entity()->object()->create<coco::KernelObject>();
+ auto *ker_bag = bags.bag(ker_idx);
+ ker_obj->bag(ker_bag);
+ ker_obj->layout(coco::KernelLayouts::NHWC::create(as_kernel_shape(ker_shape)));
+
+ // Create a Load op
+ auto load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a Conv2D op
+ auto coco_conv2d = m->entity()->op()->create<coco::Conv2D>();
+
+ // populating Conv2D objects and options such as stride and padding
+ coco_conv2d->ker(ker_obj);
+
+ auto *conv_params = op->builtin_options_as_Conv2DOptions();
+
+ coco_conv2d->stride()->vertical(conv_params->stride_h());
+ coco_conv2d->stride()->horizontal(conv_params->stride_w());
+
+ // conv_params->padding() to left, top, right, bottom
+ coco::Padding2D padding = conv2D_padding(conv_params, ifm_shape, ker_shape);
+
+ coco_conv2d->pad()->top(padding.top());
+ coco_conv2d->pad()->bottom(padding.bottom());
+ coco_conv2d->pad()->left(padding.left());
+ coco_conv2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_conv2d->arg(load);
+
+ // Object to store Conv2D output
+ auto *conv2d_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *conv2d_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ conv2d_obj->bag(conv2d_bag);
+ conv2d_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an Eval instruction for Conv2D
+ auto conv2d_ins = instr_builder(m).eval(conv2d_obj, coco_conv2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(conv2d_ins);
+
+ // Last Object to make a copy to Output Object
+ coco::FeatureObject *last_obj = conv2d_obj;
+
+ if (hasBias)
+ {
+ // When there is a bias, use btmp_obj as bias add output
+ // Bias is adding last_obj with bias weight values
+ auto *btmp_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *btmp_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ btmp_obj->bag(btmp_bag);
+ btmp_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_obj->shape()));
+
+ int bias_idx = opinputs.at(2);
+
+ // Create an object for bias
+ auto bias_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *bias_bag = bags.bag(bias_idx);
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(ofm_obj->shape()));
+
+ // Create Op of conv2d output (last_obj) + bias values(bias_obj)
+ auto bias_add = op_builder(m).load(last_obj).load(bias_obj).add().pop();
+
+ // Create Instr as bias add result write to btmp_obj
+ auto bias_add_ins = instr_builder(m).eval(btmp_obj, bias_add);
+
+ // Append the instruction
+ blk->instr()->append(bias_add_ins);
+
+ // Update last_obj to btmp_obj
+ last_obj = btmp_obj;
+ }
+
+ // fused activation
+ coco::FeatureObject *act_output =
+ build_activation(conv_params->fused_activation_function(), blk, last_obj);
+
+ // Create Copy Instr of last_obj to Output Object
+ auto copy_ins = instr_builder(m).copy(ofm_obj, act_output);
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Conv2D.h b/compiler/enco/frontend/tflite/src/Op/Conv2D.h
new file mode 100644
index 000000000..018815bd4
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Conv2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CONV2D_H__
+#define __OP_CONV2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Conv2D operator
+ */
+class Conv2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *context) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_CONV2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp
new file mode 100644
index 000000000..e3d7b263e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthwiseConv2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+
+#include <coco/IR/Module.h>
+#include <coco/IR/KernelLayouts.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool DepthwiseConv2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_DepthwiseConv2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void DepthwiseConv2DGraphBuilder::build(const tflite::Operator *op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ // preparation
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ TflBufferContext &buffer_context = context->buffer();
+ const tflite::SubGraph *graph = context->graph();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : kernel
+ // input index 2 : bias (optional)
+ bool hasBias = (opinputs.size() == 3);
+ assert(opinputs.size() == 2 || hasBias);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ker_idx = opinputs.at(1);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+ tensor::Shape &ker_shape = const_cast<tensor::Shape &>(tensor_context.shape(ker_idx));
+
+ assert(ifm_shape.rank() == 4);
+ assert(ofm_shape.rank() == 4);
+ assert(ker_shape.rank() == 4);
+
+ assert(ker_shape.dim(0) == 1); // value > 1 was not tested. This value seems 1 in DepthwiseConv2D
+ assert(ifm_shape.dim(3) == ofm_shape.dim(3));
+ assert(ofm_shape.dim(3) == ker_shape.dim(3));
+
+ // Create an input feature map object
+ auto *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an an output feature map object
+ auto *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an kernel object
+ auto *ker_obj = m->entity()->object()->create<coco::KernelObject>();
+ auto *ker_bag = bags.bag(ker_idx);
+ ker_obj->bag(ker_bag);
+
+ // Adjust tflite kernel shape [1, h, w, channel_out] for coco::Kernel.
+ // coco::Kernel will have kernel.count = channel_out, kernel.depth = 1 ( == ker_shape.dim(0))
+ kernel::Shape new_shape{ker_shape.dim(3), 1, ker_shape.dim(1), ker_shape.dim(2)};
+ ker_obj->layout(coco::KernelLayouts::NHWC::create(new_shape));
+
+ // Create a kernel overlay for the kernel object
+ // TODO : support for other types
+ d->f32()->allocate(ker_bag);
+
+ TflBufferContext::TflBuffer<float> buffer = buffer_context.tensor_buffer<float>(graph, ker_idx);
+
+ auto ker_spn = d->f32()->weight(ker_bag);
+
+ // Copy data from tflBuffer of [1, h, w, channel_out] shape to coco::Data, which will be accessed
+ // by coco::KernelLayouts::NHWC
+ for (auto n = 0; n < new_shape.count(); n++)
+ {
+ auto tfl_c = n;
+ for (auto h = 0; h < new_shape.height(); h++)
+ {
+ for (auto w = 0; w < new_shape.width(); w++)
+ {
+ auto hw = new_shape.height() * new_shape.width();
+ for (auto c = 0; c < new_shape.depth(); c++)
+ {
+ auto tfl_n = c;
+ auto hwc = hw * new_shape.depth();
+ auto wc = new_shape.width() * new_shape.depth();
+
+ ker_spn[n * hwc + h * wc + w * new_shape.depth() + c] =
+ buffer.ptr[tfl_n * hw * new_shape.count() + /* new_shape.count() is old c */
+ h * new_shape.width() * new_shape.count() + w * new_shape.count() + tfl_c];
+ }
+ }
+ }
+ }
+
+ // Create a Load op
+ auto load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a coco::Conv2D op for DepthwiseConv2D
+ auto coco_dconv2d = m->entity()->op()->create<coco::Conv2D>();
+
+ // populating objects and options such as stride and padding for DepthwiseConv2D
+ coco_dconv2d->ker(ker_obj);
+
+ // setting params passed from TFLITE DepthwiseConv2DOptions
+ auto dconv_params = op->builtin_options_as_DepthwiseConv2DOptions();
+
+ assert(dconv_params->depth_multiplier() == 1); // other depth_multiplier was not tested
+
+ coco_dconv2d->group(ifm_obj->asFeature()->shape().depth());
+
+ coco_dconv2d->stride()->vertical(dconv_params->stride_h());
+ coco_dconv2d->stride()->horizontal(dconv_params->stride_w());
+
+ coco::Padding2D padding = depthwiseConv2D_padding(dconv_params, ifm_shape, ker_shape);
+ coco_dconv2d->pad()->top(padding.top());
+ coco_dconv2d->pad()->bottom(padding.bottom());
+ coco_dconv2d->pad()->left(padding.left());
+ coco_dconv2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_dconv2d->arg(load);
+
+ // Object to store output for DepthwiseConv2D
+ auto *dconv2d_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *dconv2d_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ dconv2d_obj->bag(dconv2d_bag);
+ dconv2d_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an Eval instruction for DepthwiseConv2D
+ auto dconv2d_ins = instr_builder(m).eval(dconv2d_obj, coco_dconv2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(dconv2d_ins);
+
+ // Last Object to make a copy to Output Object
+ coco::FeatureObject *last_obj = dconv2d_obj;
+
+ if (hasBias)
+ {
+ // When there is a bias, use btmp_obj as bias add output
+ // Bias is adding last_obj with bias weight values
+ auto *btmp_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *btmp_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ btmp_obj->bag(btmp_bag);
+ btmp_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_obj->shape()));
+
+ int bias_idx = opinputs.at(2);
+
+ // Create an object for bias
+ auto bias_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *bias_bag = bags.bag(bias_idx);
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(ofm_obj->shape()));
+
+ // Create Op of conv2d output (last_obj) + bias values(bias_obj)
+ auto bias_add = op_builder(m).load(last_obj).load(bias_obj).add().pop();
+
+ // Create Instr as bias add result write to btmp_obj
+ auto bias_add_ins = instr_builder(m).eval(btmp_obj, bias_add);
+
+ // Append the instruction
+ blk->instr()->append(bias_add_ins);
+
+ // Update last_obj to btmp_obj
+ last_obj = btmp_obj;
+ }
+
+ // fused activation
+ coco::FeatureObject *act_output =
+ build_activation(dconv_params->fused_activation_function(), blk, last_obj);
+
+ // Create Copy Instr of last_obj to Output Object
+ auto copy_ins = instr_builder(m).copy(ofm_obj, act_output);
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h
new file mode 100644
index 000000000..b36b36b8f
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_DEPTHWISECONV2D_H__
+#define __OP_DEPTHWISECONV2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for DepthwiseConv2D operator
+ */
+class DepthwiseConv2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *context) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_DEPTHWISECONV2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Div.cpp b/compiler/enco/frontend/tflite/src/Op/Div.cpp
new file mode 100644
index 000000000..6b71be2e6
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Div.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Div.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void DivGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : numerator
+ // input index 1 : denominator
+ // output index 0 : result
+ assert(opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ tflite::ActivationFunctionType activation;
+ if (auto *options = op->builtin_options_as_DivOptions())
+ {
+ activation = options->fused_activation_function();
+ }
+ else
+ {
+ activation = tflite::ActivationFunctionType_NONE;
+ }
+
+ // TODO activation, e.g. ReLU
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ auto num_idx = opinputs.at(0);
+ auto denom_idx = opinputs.at(1);
+ auto out_idx = opoutputs.at(0);
+
+ const tensor::Shape &num_shape = tensor_context.shape(num_idx);
+ const tensor::Shape &denom_shape = tensor_context.shape(denom_idx);
+ const tensor::Shape &out_shape = tensor_context.shape(out_idx);
+
+ // TODO Now input/output assumes Feature map, but Div should support generic object type
+ // Create an object for an input
+ auto *num_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *num_bag = bags.bag(num_idx);
+ num_obj->bag(num_bag);
+ num_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(num_shape)));
+
+ auto *denom_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *denom_bag = bags.bag(denom_idx);
+ denom_obj->bag(denom_bag);
+ denom_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(denom_shape)));
+
+ // Create an object for an output
+ auto *out_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *out_bag = bags.bag(out_idx);
+ out_obj->bag(out_bag);
+ out_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(out_shape)));
+
+ // Create a Load ops for each input
+ auto coco_load_num = op_builder(m).load(num_obj).pop();
+ auto coco_load_denom = op_builder(m).load(denom_obj).pop();
+
+ // Create a Div op
+ auto coco_div = m->entity()->op()->create<coco::Div>();
+
+ // Link ops
+ coco_div->left(coco_load_num);
+ coco_div->right(coco_load_denom);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(out_obj, coco_div);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Div.h b/compiler/enco/frontend/tflite/src/Op/Div.h
new file mode 100644
index 000000000..053d1a441
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Div.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_DIV_H__
+#define __OP_DIV_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Div operator
+ */
+class DivGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_DIV_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp
new file mode 100644
index 000000000..ee4406425
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPool2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool MaxPool2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Pool2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void MaxPool2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ coco::Op *coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a MaxPool2D
+ coco::MaxPool2D *coco_maxpool2d = m->entity()->op()->create<coco::MaxPool2D>();
+ const tflite::Pool2DOptions *params = op->builtin_options_as_Pool2DOptions();
+
+ coco_maxpool2d->window()->height(params->filter_height());
+ coco_maxpool2d->window()->width(params->filter_width());
+
+ coco_maxpool2d->stride()->vertical(params->stride_h());
+ coco_maxpool2d->stride()->horizontal(params->stride_w());
+
+ coco::Padding2D padding =
+ pool2D_padding(params, ifm_shape, params->filter_width(), params->filter_height());
+
+ coco_maxpool2d->pad()->top(padding.top());
+ coco_maxpool2d->pad()->bottom(padding.bottom());
+ coco_maxpool2d->pad()->left(padding.left());
+ coco_maxpool2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_maxpool2d->arg(coco_load);
+
+ // Create an Eval instruction
+ coco::Eval *ins = instr_builder(m).eval(ofm_obj, coco_maxpool2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h
new file mode 100644
index 000000000..06a828528
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MAXPOOL2D_H__
+#define __OP_MAXPOOL2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for AvgPool2D operator
+ */
+class MaxPool2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_MAXPOOL2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Padding.cpp b/compiler/enco/frontend/tflite/src/Op/Padding.cpp
new file mode 100644
index 000000000..9a0e4ef41
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Padding.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Padding.h"
+
+#include "Convert.h"
+#include "TensorBags.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <map>
+#include <sstream>
+#include <algorithm>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::Padding2D get_padding(const tensor::Shape &ifm_shape, const int kernel_w, const int kernel_h,
+ tflite::Padding padding, int stride_w, int stride_h,
+ int dilation_w_factor, int dilation_h_factor)
+{
+ assert(stride_w != 0);
+ assert(stride_h != 0);
+ assert(ifm_shape.rank() == 4);
+
+ /**
+ * Compute [top padding + bottom padding] (or [left padding + right padding]).
+ * If this returns an even number, top = return value / 2 and bottom = return value - top
+ * If this returns an odd number, top = return value / 2 and bottom = return value - top (so,
+ * bottom = top + 1)
+ *
+ * Code based on https://www.tensorflow.org/api_guides/python/nn#Convolution
+ */
+ auto compute_padding = [](tflite::Padding padding, int stride, int dilation_rate, int in_size,
+ int filter_size) {
+ int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
+ if (padding == tflite::Padding_SAME)
+ {
+ if (in_size % stride == 0)
+ return std::max(effective_filter_size - stride, 0);
+ else
+ return std::max(effective_filter_size - (in_size % stride), 0);
+ }
+ else // padding == VALID
+ {
+ return 0;
+ }
+ };
+
+ // ifm shape is from order of NHWC. ifm W = dim(2), ifm H = dim(1)
+ int padding_w = compute_padding(padding, stride_w, dilation_w_factor, ifm_shape.dim(2), kernel_w);
+ int padding_h = compute_padding(padding, stride_h, dilation_h_factor, ifm_shape.dim(1), kernel_h);
+
+ coco::Padding2D coco_padding;
+ coco_padding.top(padding_h / 2).bottom(padding_h - padding_h / 2);
+ coco_padding.left(padding_w / 2).right(padding_w - padding_w / 2);
+
+ return coco_padding;
+}
+
+coco::Padding2D pool2D_padding(const tflite::Pool2DOptions *options, const tensor::Shape &ifm_shape,
+ const int filter_w, const int filter_h)
+{
+ return get_padding(ifm_shape, filter_w, filter_h, options->padding(), options->stride_w(),
+ options->stride_h(), 1, 1);
+}
+
+coco::Padding2D conv2D_padding(const tflite::Conv2DOptions *options, const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape)
+{
+ return get_padding(ifm_shape, kernel_shape.dim(2), kernel_shape.dim(1), /* kernel layout: NHWC */
+ options->padding(), options->stride_w(), options->stride_h(),
+ options->dilation_w_factor(), options->dilation_h_factor());
+}
+
+coco::Padding2D depthwiseConv2D_padding(const tflite::DepthwiseConv2DOptions *options,
+ const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape)
+{
+ return get_padding(ifm_shape, kernel_shape.dim(2), kernel_shape.dim(1), /* kernel layout: NHWC */
+ options->padding(), options->stride_w(), options->stride_h(),
+ options->dilation_w_factor(), options->dilation_h_factor());
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Padding.h b/compiler/enco/frontend/tflite/src/Op/Padding.h
new file mode 100644
index 000000000..ac84adeb7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Padding.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_PADDING_H__
+#define __OP_PADDING_H__
+
+#include <coco/IR/Padding2D.h>
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::Padding2D pool2D_padding(const tflite::Pool2DOptions *options, const tensor::Shape &ifm_shape,
+ const int filter_w, const int filter_h);
+
+coco::Padding2D conv2D_padding(const tflite::Conv2DOptions *options, const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape);
+
+coco::Padding2D depthwiseConv2D_padding(const tflite::DepthwiseConv2DOptions *options,
+ const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape);
+
+} // namespace tflimport
+
+#endif // __OP_PADDING_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU.cpp b/compiler/enco/frontend/tflite/src/Op/ReLU.cpp
new file mode 100644
index 000000000..4922f4d1f
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReLUGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ auto ifm_idx = opinputs.at(0);
+ auto ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a ReLU
+ auto coco_relu = m->entity()->op()->create<coco::ReLU>();
+
+ // Link ops
+ coco_relu->arg(coco_load);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(ofm_obj, coco_relu);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU.h b/compiler/enco/frontend/tflite/src/Op/ReLU.h
new file mode 100644
index 000000000..c78400d7e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RELU_H__
+#define __OP_RELU_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for ReLU operator
+ */
+class ReLUGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RELU_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp b/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp
new file mode 100644
index 000000000..936fda3e2
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU6.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReLU6GraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a ReLU6
+ auto coco_relu6 = m->entity()->op()->create<coco::ReLU6>();
+
+ // Link ops
+ coco_relu6->arg(coco_load);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(ofm_obj, coco_relu6);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU6.h b/compiler/enco/frontend/tflite/src/Op/ReLU6.h
new file mode 100644
index 000000000..10bcd4f71
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU6.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RELU6_H__
+#define __OP_RELU6_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for ReLU6 operator
+ */
+class ReLU6GraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RELU6_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Reshape.cpp b/compiler/enco/frontend/tflite/src/Op/Reshape.cpp
new file mode 100644
index 000000000..9bd473fa9
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Reshape.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reshape.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReshapeGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : output shape (int32_t), (optional or not, is not clear)
+ // output index 0 : output feature
+ assert(opinputs.size() == 1 || opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ // Note: there are actually 3 places where we can get output shape from
+ // current TF lite implementation. From output operand shape, second input,
+ // and ReshapeOption (new_shape). Here we use output operand shape
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ auto ifm_bag = bags.bag(ifm_idx);
+ auto ofm_bag = bags.bag(ofm_idx);
+
+ // TODO: move to InstrBuilder as 'shuffle_elements()'
+ // Create a 1:1 shuffle instruction from ifm into ofm
+ // Note: Reshape is change of shape information and there is no value change
+ // in the bag itself. We implement this as just make a element wise copy of
+ // the bag from input to output. So there is no need of 'reshape' operator
+ auto shuffle_ins = m->entity()->instr()->create<coco::Shuffle>();
+ auto num_elem = ifm_bag->size();
+
+ assert(num_elem == ofm_bag->size());
+
+ shuffle_ins->from(ifm_bag);
+ shuffle_ins->into(ofm_bag);
+
+ for (uint32_t n = 0; n < num_elem; ++n)
+ {
+ const auto from = coco::ElemID(n);
+ const auto into = coco::ElemID(n);
+
+ shuffle_ins->insert(from, into);
+ }
+
+ // Append the instruction
+ blk->instr()->append(shuffle_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Reshape.h b/compiler/enco/frontend/tflite/src/Op/Reshape.h
new file mode 100644
index 000000000..7447b56c8
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Reshape.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RESHAPE_H__
+#define __OP_RESHAPE_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Reshape operator
+ */
+class ReshapeGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RESHAPE_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Sub.cpp b/compiler/enco/frontend/tflite/src/Op/Sub.cpp
new file mode 100644
index 000000000..62973bb22
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Sub.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sub.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void SubGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : left input feature
+ // input index 1 : right input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ // Default parameter values are referenced from schema_generated.h
+ auto *params = op->builtin_options_as_SubOptions();
+ tflite::ActivationFunctionType activation = tflite::ActivationFunctionType_NONE;
+
+ if (auto *params = op->builtin_options_as_SubOptions())
+ {
+ activation = params->fused_activation_function();
+ }
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ // Construct a vector of input objects
+ std::vector<coco::FeatureObject *> input_objects;
+
+ for (auto &input_index : opinputs)
+ {
+ // Add objects for input feature map
+ const tensor::Shape &input_shape = tensor_context.shape(input_index);
+ coco::FeatureObject *input_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *input_bag = bags.bag(input_index);
+ input_obj->bag(input_bag);
+ input_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(input_shape)));
+
+ input_objects.emplace_back(input_obj);
+ }
+
+ // Create an object for an output feature map
+ int const output_index = opoutputs.at(0);
+ const tensor::Shape &output_shape = tensor_context.shape(output_index);
+ coco::FeatureObject *output_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *output_bag = bags.bag(output_index);
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(output_shape)));
+
+ // Create Load ops
+ auto left_load = op_builder(m).load(input_objects[0]).pop();
+ auto right_load = op_builder(m).load(input_objects[1]).pop();
+
+ // Create a Sub
+ auto coco_sub = m->entity()->op()->create<coco::Sub>();
+
+ coco_sub->left(left_load);
+ coco_sub->right(right_load);
+
+ // Create an Eval instruction
+ auto eval = instr_builder(m).eval(output_obj, coco_sub);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Sub.h b/compiler/enco/frontend/tflite/src/Op/Sub.h
new file mode 100644
index 000000000..580d8baa3
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Sub.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SUB_H__
+#define __OP_SUB_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Sub operator
+ */
+class SubGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_SUB_H__
diff --git a/compiler/enco/frontend/tflite/src/RawModel.h b/compiler/enco/frontend/tflite/src/RawModel.h
new file mode 100644
index 000000000..02946f1d7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RAW_MODEL_H__
+#define __RAW_MODEL_H__
+
+#include "schema_generated.h"
+
+struct RawModel
+{
+ virtual ~RawModel() = default;
+
+ virtual const tflite::Model *model(void) const = 0;
+};
+
+#endif // __RAW_MODEL_H__
diff --git a/compiler/enco/frontend/tflite/src/RawModelLoader.cpp b/compiler/enco/frontend/tflite/src/RawModelLoader.cpp
new file mode 100644
index 000000000..5c127f37c
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModelLoader.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RawModelLoader.h"
+
+#include "cwrap/Fildes.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+namespace
+{
+
+class MemoryMappedRawModel final : public RawModel
+{
+public:
+ /**
+ * @require fd and data SHOULD be valid
+ */
+ explicit MemoryMappedRawModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ~MemoryMappedRawModel()
+ {
+ munmap(_data, _size);
+ close(_fd);
+ }
+
+public:
+ MemoryMappedRawModel(const MemoryMappedRawModel &) = delete;
+ MemoryMappedRawModel(MemoryMappedRawModel &&) = delete;
+
+public:
+ const tflite::Model *model(void) const override { return tflite::GetModel(_data); }
+
+private:
+ int _fd = -1;
+ void *_data = nullptr;
+ size_t _size = 0;
+};
+
+} // namespace
+
+std::unique_ptr<RawModel> load_from(const std::string &path)
+{
+ cwrap::Fildes fildes{open(path.c_str(), O_RDONLY)};
+
+ if (fildes.get() == -1)
+ {
+ // Return nullptr on open failure
+ return nullptr;
+ }
+
+ struct stat st;
+ if (fstat(fildes.get(), &st) == -1)
+ {
+ // Return nullptr on fstat failure
+ return nullptr;
+ }
+
+ auto size = st.st_size;
+ auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fildes.get(), 0);
+
+ if (data == MAP_FAILED)
+ {
+ // Return nullptr on mmap failure
+ return nullptr;
+ }
+
+ return std::unique_ptr<RawModel>{new MemoryMappedRawModel(fildes.release(), data, size)};
+}
diff --git a/compiler/enco/frontend/tflite/src/RawModelLoader.h b/compiler/enco/frontend/tflite/src/RawModelLoader.h
new file mode 100644
index 000000000..5d93528de
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModelLoader.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RAW_MODEL_LOADER_H__
+#define __RAW_MODEL_LOADER_H__
+
+#include "RawModel.h"
+
+/**
+ * @brief Load TensorFlow Lite model (as a RawModel) from a given path
+ *
+ * @note May return a nullptr
+ */
+std::unique_ptr<RawModel> load_from(const std::string &path);
+
+#endif // __RAW_MODEL_LOADER_H__
diff --git a/compiler/enco/frontend/tflite/src/TensorBags.h b/compiler/enco/frontend/tflite/src/TensorBags.h
new file mode 100644
index 000000000..29558b85e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/TensorBags.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TENSOR_BAGS_H__
+#define __TENSOR_BAGS_H__
+
+#include "Convert.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <schema_generated.h>
+
+#include <map>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Pre-creates coco:Bags for each operands(tensors)
+ */
+class TensorBags
+{
+public:
+ void prepare(const tflite::SubGraph *graph, std::unique_ptr<coco::Module> &m)
+ {
+ for (uint32_t tensor_id = 0; tensor_id < graph->tensors()->size(); ++tensor_id)
+ {
+ auto const tensor_info = graph->tensors()->Get(tensor_id);
+ auto const tensor_shape = as_tensor_shape(tensor_info->shape());
+ auto const tensor_bag = m->entity()->bag()->create(num_elements(tensor_shape));
+
+ _bag_ctx[tensor_id] = tensor_bag;
+ }
+ }
+
+ coco::Bag *bag(int32_t tensor_id) { return _bag_ctx[tensor_id]; }
+
+public:
+ std::map<uint32_t, coco::Bag *>::iterator begin() { return _bag_ctx.begin(); }
+
+ std::map<uint32_t, coco::Bag *>::iterator end() { return _bag_ctx.end(); }
+
+private:
+ std::map<uint32_t, coco::Bag *> _bag_ctx;
+};
+
+} // namespace tflimport
+
+#endif // __TENSOR_BAGS_H__
diff --git a/compiler/enco/requires.cmake b/compiler/enco/requires.cmake
new file mode 100644
index 000000000..fee0e18e5
--- /dev/null
+++ b/compiler/enco/requires.cmake
@@ -0,0 +1,8 @@
+require("coco")
+require("caffegen")
+require("tflchef")
+require("ann-api")
+require("ann-ref")
+require("nnkit")
+require("cwrap")
+require("enco-intf")
diff --git a/compiler/enco/test/CMakeLists.txt b/compiler/enco/test/CMakeLists.txt
new file mode 100644
index 000000000..5ea6cdadd
--- /dev/null
+++ b/compiler/enco/test/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectories()
diff --git a/compiler/enco/test/basic/000/CMakeLists.txt b/compiler/enco/test/basic/000/CMakeLists.txt
new file mode 100644
index 000000000..20ba3c571
--- /dev/null
+++ b/compiler/enco/test/basic/000/CMakeLists.txt
@@ -0,0 +1,26 @@
+###
+### This test first generates C++ code from an empty model, and check whether is has compile error
+###
+set(PREFIX enco-basic-test-000)
+set(GENERATED_CPP ${PREFIX}.cpp)
+set(GENERATED_ASM ${PREFIX}.embed.S)
+set(GENERATED_BIN ${PREFIX}.bin)
+set(SOURCE_TARGET ${PREFIX}-src)
+set(LIB_TARGET ${PREFIX}-lib)
+
+add_library(${PREFIX}-frontend SHARED enco.test.cpp)
+target_link_libraries(${PREFIX}-frontend enco_intf_cmdline)
+target_link_libraries(${PREFIX}-frontend enco_intf_frontend)
+target_link_libraries(${PREFIX}-frontend stdex)
+
+# NOTE BYPRODUCTS are not specified in order to enforce source code generation
+add_custom_command(OUTPUT ${GENERATED_CPP} ${GENERATED_ASM} ${GENERATED_BIN}
+ COMMAND $<TARGET_FILE:enco-cli>
+ --frontend $<TARGET_FILE:${PREFIX}-frontend>
+ --backend-arg ${PREFIX}
+ DEPENDS enco-cli ${PREFIX}-frontend)
+set_source_files_properties(${GENERATED_ASM} PROPERTIES GENERATED TRUE LANGUAGE C)
+add_library(${LIB_TARGET} SHARED ${GENERATED_CPP} ${GENERATED_ASM})
+# NOTE This line is necessary to compile the generated assembly (it includes the generated bin file)
+target_include_directories(${LIB_TARGET} PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+target_link_libraries(${LIB_TARGET} PRIVATE ann_api)
diff --git a/compiler/enco/test/basic/000/enco.test.cpp b/compiler/enco/test/basic/000/enco.test.cpp
new file mode 100644
index 000000000..3dbf96613
--- /dev/null
+++ b/compiler/enco/test/basic/000/enco.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <enco/Frontend.h>
+#include <cmdline/View.h>
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+#include <stdex/Memory.h>
+
+using namespace nncc::core::ADT;
+
+namespace
+{
+
+//
+// Dummy frontend for testing
+//
+struct Frontend final : public enco::Frontend
+{
+ enco::Bundle load(void) const override
+ {
+ auto m = coco::Module::create();
+ auto d = coco::Data::create();
+
+ // Create an input
+ {
+ const tensor::Shape shape{1, 3, 3, 1};
+
+ auto bag = m->entity()->bag()->create(9);
+ auto input = m->entity()->input()->create(shape);
+
+ input->bag(bag);
+ input->name("input");
+ input->reorder<tensor::LexicalLayout>();
+
+ m->input()->insert(input);
+ }
+
+ // Create an output
+ {
+ const tensor::Shape shape{1, 3, 3, 1};
+
+ auto bag = m->entity()->bag()->create(9);
+ auto output = m->entity()->output()->create(shape);
+
+ output->bag(bag);
+ output->name("output");
+ output->reorder<tensor::LexicalLayout>();
+
+ m->output()->insert(output);
+ }
+
+ enco::Bundle bundle;
+
+ bundle.module(std::move(m));
+ bundle.data(std::move(d));
+
+ return std::move(bundle);
+ }
+};
+
+} // namespace
+
+extern "C" std::unique_ptr<enco::Frontend> make_frontend(const cmdline::View &cmdline)
+{
+ return stdex::make_unique<Frontend>();
+}
diff --git a/compiler/enco/test/basic/CMakeLists.txt b/compiler/enco/test/basic/CMakeLists.txt
new file mode 100644
index 000000000..5ea6cdadd
--- /dev/null
+++ b/compiler/enco/test/basic/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectories()
diff --git a/compiler/enco/test/binder.cpp b/compiler/enco/test/binder.cpp
new file mode 100644
index 000000000..c8c72fc8b
--- /dev/null
+++ b/compiler/enco/test/binder.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Generated API
+//
+struct Network;
+
+Network *Network_construct();
+void Network_destruct(Network *net);
+
+unsigned Network_input_count(const Network *);
+const char *Network_input_name(const Network *, unsigned n);
+unsigned Network_input_rank(const Network *, unsigned n);
+unsigned Network_input_dim(const Network *, unsigned n, unsigned axis);
+void Network_input_bind(Network *net, unsigned n, const void *ptr, unsigned len);
+
+unsigned Network_output_count(const Network *net);
+const char *Network_output_name(const Network *, unsigned n);
+unsigned Network_output_rank(const Network *, unsigned n);
+unsigned Network_output_dim(const Network *, unsigned n, unsigned axis);
+void Network_output_bind(Network *net, unsigned n, void *ptr, unsigned len);
+
+void Network_invoke(Network *net);
+
+//
+// nnkit backend
+//
+#include <nnkit/Backend.h>
+#include <nnkit/TensorContext.h>
+#include <nnkit/CmdlineArguments.h>
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/Overlay.h>
+
+#include <stdex/Memory.h>
+
+using stdex::make_unique;
+using namespace nncc::core::ADT;
+
+namespace
+{
+
+class TensorContext final : public nnkit::TensorContext
+{
+public:
+ TensorContext() = default;
+
+public:
+ void allocate(const std::string &name, const tensor::Shape &shape)
+ {
+ using nncc::core::ADT::tensor::num_elements;
+
+ auto blob = make_unique<std::vector<uint8_t>>();
+ blob->resize(num_elements(shape) * sizeof(float));
+
+ _names.emplace_back(name);
+ _shapes.emplace_back(shape);
+ _blobs.emplace_back(std::move(blob));
+ }
+
+public:
+ uint8_t *base(uint32_t n) const { return _blobs.at(n)->data(); }
+
+public:
+ uint32_t size(void) const override { return _blobs.size(); }
+
+public:
+ std::string name(uint32_t n) const override { return _names.at(n); }
+
+public:
+ tensor::Shape shape(uint32_t n) const override { return _shapes.at(n); }
+
+public:
+ uint32_t size(uint32_t n) const { return _blobs.at(n)->size(); }
+
+public:
+ // Float (fp32) tensor support
+ bool isFloatTensor(uint32_t n) const override { return true; }
+ void getMutableFloatTensor(uint32_t n, const TensorContext::TypedAccessor<float> &f) override
+ {
+ using nncc::core::ADT::tensor::LexicalLayout;
+ using nncc::core::ADT::tensor::make_overlay;
+
+ auto base = reinterpret_cast<float *>(this->base(n));
+ auto view = make_overlay<float, LexicalLayout>(shape(n), base);
+
+ f(*this, n, view);
+ }
+
+ void getConstFloatTensor(uint32_t n, const TensorContext::TypedReader<float> &f) const override
+ {
+ using nncc::core::ADT::tensor::LexicalLayout;
+ using nncc::core::ADT::tensor::make_overlay;
+
+ auto base = reinterpret_cast<float *>(this->base(n));
+ auto view = make_overlay<float, LexicalLayout>(shape(n), base);
+
+ f(*this, n, view);
+ }
+
+private:
+ std::vector<std::string> _names;
+ std::vector<tensor::Shape> _shapes;
+ std::vector<std::unique_ptr<std::vector<uint8_t>>> _blobs;
+};
+
+class Backend final : public nnkit::Backend
+{
+public:
+ Backend()
+ {
+ _net = Network_construct();
+
+ // Allocate and bind inputs
+ for (uint32_t n = 0; n < Network_input_count(_net); ++n)
+ {
+ const uint32_t rank = Network_input_rank(_net, n);
+ const std::string name = Network_input_name(_net, n);
+
+ tensor::Shape shape;
+
+ shape.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ shape.dim(axis) = Network_input_dim(_net, n, axis);
+ }
+
+ _inputs.allocate(name, shape);
+
+ Network_input_bind(_net, n, reinterpret_cast<const void *>(_inputs.base(n)), _inputs.size(n));
+ }
+
+ // Allocate and bind outputs
+ for (uint32_t n = 0; n < Network_output_count(_net); ++n)
+ {
+ const uint32_t rank = Network_output_rank(_net, n);
+ const std::string name = Network_output_name(_net, n);
+
+ tensor::Shape shape;
+
+ shape.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ shape.dim(axis) = Network_output_dim(_net, n, axis);
+ }
+
+ _outputs.allocate(name, shape);
+
+ Network_output_bind(_net, n, reinterpret_cast<void *>(_outputs.base(n)), _outputs.size(n));
+ }
+ }
+
+public:
+ ~Backend() { Network_destruct(_net); }
+
+public:
+ void prepare(const std::function<void(nnkit::TensorContext &)> &f) override { f(_inputs); }
+ void run(void) override { Network_invoke(_net); }
+ void teardown(const std::function<void(nnkit::TensorContext &)> &f) override { f(_outputs); }
+
+private:
+ Network *_net;
+
+private:
+ TensorContext _inputs;
+ TensorContext _outputs;
+};
+
+} // namespace
+
+extern "C" std::unique_ptr<nnkit::Backend> make_backend(const nnkit::CmdlineArguments &args)
+{
+ return make_unique<::Backend>();
+}
diff --git a/compiler/enco/test/caffe/CMakeLists.txt b/compiler/enco/test/caffe/CMakeLists.txt
new file mode 100644
index 000000000..ee49b6b28
--- /dev/null
+++ b/compiler/enco/test/caffe/CMakeLists.txt
@@ -0,0 +1,141 @@
+option(ENCO_CAFFE_TEST "Enable enco test for caffe" ON)
+
+if(NOT ENCO_CAFFE_TEST)
+ return()
+endif(NOT ENCO_CAFFE_TEST)
+
+# TODO Use REQUIRED if supported
+nncc_find_resource(BVLCCaffeTests)
+
+if(NOT BVLCCaffeTests_FOUND)
+ message(FATAL_ERROR "Fail to find BVLCCaffeTests")
+endif(NOT BVLCCaffeTests_FOUND)
+
+# TESTCASE_BASE_DIR indicates where all the testcases are located
+set(TESTCASE_BASE_DIR "${BVLCCaffeTests_DIR}")
+
+###
+### Common function(s)
+###
+function(get_test_configuration PREFIX)
+ set(PROTOTXT_FILE "${PREFIX}.prototxt")
+ set(PROTOTXT_FILE "${PROTOTXT_FILE}" PARENT_SCOPE)
+ set(PROTOTXT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${PROTOTXT_FILE}" PARENT_SCOPE)
+ set(CAFFEMODEL_FILE "${PREFIX}.caffemodel")
+ set(CAFFEMODEL_FILE "${CAFFEMODEL_FILE}" PARENT_SCOPE)
+ set(CAFFEMODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/${CAFFEMODEL_FILE}" PARENT_SCOPE)
+ set(SOURCE_FILE ${PREFIX}.cpp)
+ set(SOURCE_FILE "${SOURCE_FILE}" PARENT_SCOPE)
+ set(SOURCE_PATH "${CMAKE_CURRENT_BINARY_DIR}/${SOURCE_FILE}" PARENT_SCOPE)
+ set(ASM_FILE ${PREFIX}.embed.S)
+ set(ASM_FILE "${ASM_FILE}" PARENT_SCOPE)
+ set(ASM_PATH "${CMAKE_CURRENT_BINARY_DIR}/${ASM_FILE}" PARENT_SCOPE)
+ set(BIN_FILE ${PREFIX}.bin)
+ set(BIN_FILE "${BIN_FILE}" PARENT_SCOPE)
+ set(BIN_PATH "${CMAKE_CURRENT_BINARY_DIR}/${BIN_FILE}" PARENT_SCOPE)
+endfunction(get_test_configuration)
+
+###
+### Prepare test(s)
+###
+if(NOT TARGET caffegen)
+ return()
+endif(NOT TARGET caffegen)
+
+if(NOT TARGET enco_caffe_frontend)
+ return()
+endif(NOT TARGET enco_caffe_frontend)
+
+# TODO Use "whitelist" instead
+#
+# WHY?
+#
+# Tests are now shared by multiple frameworks (not private), and thus
+# some tests may be unsupported.
+#
+file(GLOB MODELS RELATIVE "${TESTCASE_BASE_DIR}" "${TESTCASE_BASE_DIR}/*/test.prototxt")
+
+foreach(MODEL IN ITEMS ${MODELS})
+ get_filename_component(PREFIX ${MODEL} DIRECTORY)
+ get_test_configuration(${PREFIX})
+
+ set(MODEL_FILE ${TESTCASE_BASE_DIR}/${MODEL})
+
+ # Copy prototxt
+ # TODO Fix indentation
+ add_custom_command(OUTPUT ${PROTOTXT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy "${MODEL_FILE}" "${PROTOTXT_PATH}"
+ DEPENDS "${MODEL_FILE}"
+ COMMENT "Generating ${PROTOTXT_FILE}")
+
+ # Generate caffemodel
+ # TODO Fix indentation
+ add_custom_command(OUTPUT ${CAFFEMODEL_PATH}
+ COMMAND cat ${PROTOTXT_PATH}
+ | GLOG_minloglevel=2 $<TARGET_FILE:caffegen> init
+ | GLOG_minloglevel=2 $<TARGET_FILE:caffegen> encode
+ > ${CAFFEMODEL_PATH}
+ DEPENDS caffegen ${PROTOTXT_PATH}
+ COMMENT "Generating ${CAFFEMODEL_FILE}")
+
+ # Generate C++ code
+ # TODO Fix indentation
+ add_custom_command(OUTPUT ${SOURCE_PATH} ${ASM_PATH} ${BIN_PATH}
+ COMMAND $<TARGET_FILE:enco-cli>
+ --frontend $<TARGET_FILE:enco_caffe_frontend>
+ --frontend-arg ${PROTOTXT_FILE}
+ --frontend-arg ${CAFFEMODEL_FILE}
+ --backend-arg ${PREFIX}
+ DEPENDS enco-cli enco_caffe_frontend ${CAFFEMODEL_PATH}
+ COMMENT "Generating ${SOURCE_FILE}")
+ set_source_files_properties(${ASM_PATH} PROPERTIES GENERATED TRUE LANGUAGE C)
+
+ list(APPEND CANDIDATES ${PREFIX})
+endforeach(MODEL)
+
+###
+### Inference test
+###
+if(NOT TARGET ann_ref_static)
+ return()
+endif(NOT TARGET ann_ref_static)
+
+find_program(H5DIFF h5diff)
+
+if (NOT H5DIFF)
+ return()
+endif(NOT H5DIFF)
+
+message(STATUS "Enable enco(caffe) inference test")
+
+foreach(PREFIX IN ITEMS ${CANDIDATES})
+ if(NOT EXISTS "${TESTCASE_BASE_DIR}/${PREFIX}/INFERENCE")
+ continue()
+ endif()
+
+ get_test_configuration(${PREFIX})
+
+ set(BINDER_TARGET enco_caffe_test_${PREFIX}_binder)
+
+ # Compile nnkit binder (from generated C++ code)
+ add_library(${BINDER_TARGET} SHARED ${CMAKE_CURRENT_SOURCE_DIR}/../binder.cpp ${SOURCE_PATH} ${ASM_PATH})
+ target_include_directories(${BINDER_TARGET} PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+ target_link_libraries(${BINDER_TARGET} nnkit_intf_backend)
+ target_link_libraries(${BINDER_TARGET} ann_api)
+ target_link_libraries(${BINDER_TARGET} ann_ref_static)
+ target_link_libraries(${BINDER_TARGET} stdex)
+ set_target_properties(${BINDER_TARGET} PROPERTIES OUTPUT_NAME ${PREFIX})
+
+ list(APPEND TESTS ${PREFIX})
+endforeach(PREFIX)
+
+# Run tests
+add_test(NAME enco_test_caffe
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/runall.sh"
+ $<TARGET_FILE:nnkit-run>
+ $<TARGET_FILE:nnkit_caffe_backend>
+ $<TARGET_FILE:nnkit_randomize_action>
+ $<TARGET_FILE:nnkit_HDF5_export_action>
+ $<TARGET_FILE:nnkit_HDF5_import_action>
+ "${CMAKE_CURRENT_BINARY_DIR}"
+ ${TESTS})
diff --git a/compiler/enco/test/caffe/runall.sh b/compiler/enco/test/caffe/runall.sh
new file mode 100755
index 000000000..3b18f1c6b
--- /dev/null
+++ b/compiler/enco/test/caffe/runall.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+if [[ $# -le 6 ]]; then
+ echo "USAGE: $0 [nnkit-run path] [reference backend path] [randomize action path] [HDF5 export action path] [HDF5 import action path] [WORKDIR] [Prefix1] [Prefix2]..."
+ exit 255
+fi
+
+NNKIT_RUN_PATH="$1"; shift
+REFERENCE_BACKEND_PATH="$1"; shift
+RANDOMIZE_ACTION_PATH="$1"; shift
+HDF5_EXPORT_ACTION_PATH="$1"; shift
+HDF5_IMPORT_ACTION_PATH="$1"; shift
+WORKDIR="$1"; shift
+
+echo "-- Found nnkit-run: ${NNKIT_RUN_PATH}"
+echo "-- Found reference backend: ${REFERENCE_BACKEND_PATH}"
+echo "-- Found randomize action: ${RANDOMIZE_ACTION_PATH}"
+echo "-- Found HDF5 export action: ${HDF5_EXPORT_ACTION_PATH}"
+echo "-- Found HDF5 import action: ${HDF5_IMPORT_ACTION_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${PASSED_TAG}"
+
+ cat > "${PREFIX}.log" <(
+ exec 2>&1
+
+ echo "-- Found prototxt: ${PREFIX}.prototxt"
+ echo "-- Found caffemodel: ${PREFIX}.caffemodel"
+ echo "-- Found backend: lib${PREFIX}.so"
+
+ "${NNKIT_RUN_PATH}" \
+ --backend "${REFERENCE_BACKEND_PATH}" \
+ --backend-arg "${WORKDIR}/${PREFIX}.prototxt" \
+ --backend-arg "${WORKDIR}/${PREFIX}.caffemodel" \
+ --pre "${RANDOMIZE_ACTION_PATH}" \
+ --pre "${HDF5_EXPORT_ACTION_PATH}" \
+ --pre-arg "${PREFIX}.input.h5" \
+ --post "${HDF5_EXPORT_ACTION_PATH}" \
+ --post-arg "${PREFIX}.expected.h5"
+
+ "${NNKIT_RUN_PATH}" \
+ --backend "./lib${PREFIX}.so" \
+ --pre "${HDF5_IMPORT_ACTION_PATH}" \
+ --pre-arg "${PREFIX}.input.h5" \
+ --post "${HDF5_EXPORT_ACTION_PATH}" \
+ --post-arg "${PREFIX}.obtained.h5"
+
+ h5diff -d 0.001 "${PREFIX}.expected.h5" "${PREFIX}.obtained.h5"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ else
+ FAILED+=("$PREFIX")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0
diff --git a/compiler/enco/test/tflite/AveragePool2D_000/INFERENCE b/compiler/enco/test/tflite/AveragePool2D_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/AveragePool2D_000/INFERENCE
diff --git a/compiler/enco/test/tflite/AveragePool2D_000/test.recipe b/compiler/enco/test/tflite/AveragePool2D_000/test.recipe
new file mode 100644
index 000000000..746c34334
--- /dev/null
+++ b/compiler/enco/test/tflite/AveragePool2D_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+}
+operation {
+ type: "AveragePool2D"
+ averagepool2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ filter_width: 2
+ filter_height: 2
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/AveragePool2D_001/INFERENCE b/compiler/enco/test/tflite/AveragePool2D_001/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/AveragePool2D_001/INFERENCE
diff --git a/compiler/enco/test/tflite/AveragePool2D_001/test.recipe b/compiler/enco/test/tflite/AveragePool2D_001/test.recipe
new file mode 100644
index 000000000..36bbda78c
--- /dev/null
+++ b/compiler/enco/test/tflite/AveragePool2D_001/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 5 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 5 }
+}
+operation {
+ type: "AveragePool2D"
+ averagepool2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ filter_width: 3
+ filter_height: 3
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/CMakeLists.txt b/compiler/enco/test/tflite/CMakeLists.txt
new file mode 100644
index 000000000..d5a96a6da
--- /dev/null
+++ b/compiler/enco/test/tflite/CMakeLists.txt
@@ -0,0 +1,108 @@
+option(ENCO_TFLITE_TEST "Enable enco test for TFLite" ON)
+
+if(NOT ENCO_TFLITE_TEST)
+ return()
+endif(NOT ENCO_TFLITE_TEST)
+
+###
+### Common function(s)
+###
+function(get_test_configuration PREFIX)
+ set(RECIPE_FILE "${PREFIX}.recipe" PARENT_SCOPE)
+ set(TFLITEMODEL_FILE "${PREFIX}.tflite" PARENT_SCOPE)
+ set(SOURCE_FILE ${PREFIX}.cpp PARENT_SCOPE)
+ set(ASM_FILE ${PREFIX}.embed.S PARENT_SCOPE)
+ set(BIN_FILE ${PREFIX}.bin PARENT_SCOPE)
+endfunction(get_test_configuration)
+
+###
+### Prepare test(s)
+###
+if(NOT TARGET tflchef-file)
+ return()
+endif(NOT TARGET tflchef-file)
+
+if(NOT TARGET enco_tflite_frontend)
+ return()
+endif(NOT TARGET enco_tflite_frontend)
+
+file(GLOB MODELS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*/test.recipe")
+
+foreach(MODEL IN ITEMS ${MODELS})
+ get_filename_component(PREFIX ${MODEL} DIRECTORY)
+ get_test_configuration(${PREFIX})
+
+ set(MODEL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${MODEL})
+
+ # Copy recipe
+ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}
+ COMMAND ${CMAKE_COMMAND} -E copy "${MODEL_FILE}"
+ "${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}"
+ DEPENDS "${MODEL_FILE}"
+ COMMENT "Copying ${RECIPE_FILE}")
+
+ # Generate tflitemodel
+ add_custom_command(OUTPUT ${TFLITEMODEL_FILE}
+ COMMAND $<TARGET_FILE:tflchef-file> ${RECIPE_FILE} ${TFLITEMODEL_FILE}
+ DEPENDS tflchef ${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}
+ COMMENT "Generating ${TFLITEMODEL_FILE}")
+
+ # Generate C++ code
+ add_custom_command(OUTPUT ${SOURCE_FILE} ${ASM_FILE} ${BIN_FILE}
+ COMMAND $<TARGET_FILE:enco-cli>
+ --frontend $<TARGET_FILE:enco_tflite_frontend>
+ --frontend-arg ${TFLITEMODEL_FILE}
+ --backend-arg ${PREFIX}
+ DEPENDS enco-cli enco_caffe_frontend ${TFLITEMODEL_FILE}
+ COMMENT "Generating ${SOURCE_FILE}")
+ set_source_files_properties(${ASM_FILE} PROPERTIES GENERATED TRUE LANGUAGE C)
+
+ list(APPEND CANDIDATES ${PREFIX})
+endforeach(MODEL)
+
+###
+### Inference test
+###
+if(NOT TARGET ann_ref_static)
+ return()
+endif(NOT TARGET ann_ref_static)
+
+find_program(H5DIFF h5diff)
+
+if (NOT H5DIFF)
+ return()
+endif(NOT H5DIFF)
+
+message(STATUS "Enable enco(tflite) inference test")
+
+foreach(PREFIX IN ITEMS ${CANDIDATES})
+ if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/${PREFIX}/INFERENCE")
+ continue()
+ endif()
+
+ get_test_configuration(${PREFIX})
+
+ set(BINDER_TARGET enco_tflite_test_${PREFIX}_binder)
+
+ # Compile nnkit binder (from generated C++ code)
+ add_library(${BINDER_TARGET} SHARED ${CMAKE_CURRENT_SOURCE_DIR}/../binder.cpp ${SOURCE_FILE} ${ASM_FILE})
+ target_include_directories(${BINDER_TARGET} PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+ target_link_libraries(${BINDER_TARGET} nnkit_intf_backend)
+ target_link_libraries(${BINDER_TARGET} ann_api)
+ target_link_libraries(${BINDER_TARGET} ann_ref_static)
+ target_link_libraries(${BINDER_TARGET} stdex)
+ set_target_properties(${BINDER_TARGET} PROPERTIES OUTPUT_NAME ${PREFIX})
+
+ list(APPEND TESTS ${PREFIX})
+endforeach(PREFIX)
+
+# Run tests
+add_test(NAME enco_test_tflite
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/runall.sh"
+ $<TARGET_FILE:nnkit-run>
+ $<TARGET_FILE:nnkit_tflite_backend>
+ $<TARGET_FILE:nnkit_randomize_action>
+ $<TARGET_FILE:nnkit_HDF5_export_action>
+ $<TARGET_FILE:nnkit_HDF5_import_action>
+ "${CMAKE_CURRENT_BINARY_DIR}"
+ ${TESTS})
diff --git a/compiler/enco/test/tflite/Concat_000/INFERENCE b/compiler/enco/test/tflite/Concat_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_000/INFERENCE
diff --git a/compiler/enco/test/tflite/Concat_000/test.recipe b/compiler/enco/test/tflite/Concat_000/test.recipe
new file mode 100644
index 000000000..35641bd07
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_000/test.recipe
@@ -0,0 +1,28 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 3
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Concat_001/INFERENCE b/compiler/enco/test/tflite/Concat_001/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_001/INFERENCE
diff --git a/compiler/enco/test/tflite/Concat_001/test.recipe b/compiler/enco/test/tflite/Concat_001/test.recipe
new file mode 100644
index 000000000..7adaf1645
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_001/test.recipe
@@ -0,0 +1,29 @@
+# Concatenate two feature maps along "width" dimension
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 2 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 3 dim: 1 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 2
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Concat_002/INFERENCE b/compiler/enco/test/tflite/Concat_002/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_002/INFERENCE
diff --git a/compiler/enco/test/tflite/Concat_002/test.recipe b/compiler/enco/test/tflite/Concat_002/test.recipe
new file mode 100644
index 000000000..918cb13d3
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_002/test.recipe
@@ -0,0 +1,29 @@
+# Concatenate two feature maps along "height" dimension
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 1 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 1 dim: 1 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 1
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Concat_003/INFERENCE b/compiler/enco/test/tflite/Concat_003/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_003/INFERENCE
diff --git a/compiler/enco/test/tflite/Concat_003/test.recipe b/compiler/enco/test/tflite/Concat_003/test.recipe
new file mode 100644
index 000000000..8f1b64ea6
--- /dev/null
+++ b/compiler/enco/test/tflite/Concat_003/test.recipe
@@ -0,0 +1,29 @@
+# Concatenate two feature maps along "batch" dimension
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 2 dim: 1 dim: 1 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 1 dim: 1 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 0
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Conv2D_000/INFERENCE b/compiler/enco/test/tflite/Conv2D_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_000/INFERENCE
diff --git a/compiler/enco/test/tflite/Conv2D_000/test.recipe b/compiler/enco/test/tflite/Conv2D_000/test.recipe
new file mode 100644
index 000000000..9f0841819
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_000/test.recipe
@@ -0,0 +1,45 @@
+# Test for basic case: VALID padding, no activation layer, stride=[1,1]
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Conv2D_001/INFERENCE b/compiler/enco/test/tflite/Conv2D_001/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_001/INFERENCE
diff --git a/compiler/enco/test/tflite/Conv2D_001/test.recipe b/compiler/enco/test/tflite/Conv2D_001/test.recipe
new file mode 100644
index 000000000..d9d4904da
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_001/test.recipe
@@ -0,0 +1,45 @@
+# Test for SAME padding
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 5 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 5 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Conv2D_002/INFERENCE b/compiler/enco/test/tflite/Conv2D_002/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_002/INFERENCE
diff --git a/compiler/enco/test/tflite/Conv2D_002/test.recipe b/compiler/enco/test/tflite/Conv2D_002/test.recipe
new file mode 100644
index 000000000..55976c9b9
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_002/test.recipe
@@ -0,0 +1,46 @@
+# Test for RELU activation layer
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ activation: RELU
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Conv2D_003/INFERENCE b/compiler/enco/test/tflite/Conv2D_003/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_003/INFERENCE
diff --git a/compiler/enco/test/tflite/Conv2D_003/test.recipe b/compiler/enco/test/tflite/Conv2D_003/test.recipe
new file mode 100644
index 000000000..30c9473b7
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_003/test.recipe
@@ -0,0 +1,45 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ activation: RELU6
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Conv2D_004/INFERENCE b/compiler/enco/test/tflite/Conv2D_004/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_004/INFERENCE
diff --git a/compiler/enco/test/tflite/Conv2D_004/test.recipe b/compiler/enco/test/tflite/Conv2D_004/test.recipe
new file mode 100644
index 000000000..20f4a9908
--- /dev/null
+++ b/compiler/enco/test/tflite/Conv2D_004/test.recipe
@@ -0,0 +1,45 @@
+# Conv2D with ifm w, h = 14, 14 && ofm w, h = 7, 7 && stride = 2, 2 && padding = SAME (similar case from Mobile)
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 14 dim: 14 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: SAME
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/DepthwiseConv2D_000/INFERENCE b/compiler/enco/test/tflite/DepthwiseConv2D_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/DepthwiseConv2D_000/INFERENCE
diff --git a/compiler/enco/test/tflite/DepthwiseConv2D_000/test.recipe b/compiler/enco/test/tflite/DepthwiseConv2D_000/test.recipe
new file mode 100644
index 000000000..27bc767fc
--- /dev/null
+++ b/compiler/enco/test/tflite/DepthwiseConv2D_000/test.recipe
@@ -0,0 +1,48 @@
+# SAME padding, stride = [1,1], activation=RELU6.
+# In mobilenet, there are two cases using depthwiseConv2D : A case like this one, and another case with stride=[2,2]
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim: 5 dim: 4 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 4 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 4 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim: 5 dim: 4 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ depth_multiplier: 1
+ activation: RELU6
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/DepthwiseConv2D_001/INFERENCE b/compiler/enco/test/tflite/DepthwiseConv2D_001/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/DepthwiseConv2D_001/INFERENCE
diff --git a/compiler/enco/test/tflite/DepthwiseConv2D_001/test.recipe b/compiler/enco/test/tflite/DepthwiseConv2D_001/test.recipe
new file mode 100644
index 000000000..0166474d8
--- /dev/null
+++ b/compiler/enco/test/tflite/DepthwiseConv2D_001/test.recipe
@@ -0,0 +1,46 @@
+# depthwiseConv2D with ifm w, h = 14, 14 && ofm w, h = 7, 7 && stride = 2, 2 && padding = SAME (similar case from Mobile)
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 14 dim: 14 dim: 5 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 5 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 5 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 5 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 2
+ stride_h: 2
+ activation: RELU6
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Div_000/INFERENCE b/compiler/enco/test/tflite/Div_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Div_000/INFERENCE
diff --git a/compiler/enco/test/tflite/Div_000/test.recipe b/compiler/enco/test/tflite/Div_000/test.recipe
new file mode 100644
index 000000000..a6335de46
--- /dev/null
+++ b/compiler/enco/test/tflite/Div_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm0"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Div"
+ input: "ifm0"
+ input: "ifm1"
+ output: "ofm"
+ div_options {
+ activation: NONE
+ }
+}
+input: "ifm0"
+input: "ifm1"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/MaxPool2D_000/INFERENCE b/compiler/enco/test/tflite/MaxPool2D_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/MaxPool2D_000/INFERENCE
diff --git a/compiler/enco/test/tflite/MaxPool2D_000/test.recipe b/compiler/enco/test/tflite/MaxPool2D_000/test.recipe
new file mode 100644
index 000000000..718630f08
--- /dev/null
+++ b/compiler/enco/test/tflite/MaxPool2D_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+}
+operation {
+ type: "MaxPool2D"
+ maxpool2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ filter_width: 2
+ filter_height: 2
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/ReLU6_000/INFERENCE b/compiler/enco/test/tflite/ReLU6_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/ReLU6_000/INFERENCE
diff --git a/compiler/enco/test/tflite/ReLU6_000/test.recipe b/compiler/enco/test/tflite/ReLU6_000/test.recipe
new file mode 100644
index 000000000..226593593
--- /dev/null
+++ b/compiler/enco/test/tflite/ReLU6_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "ReLU6"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/ReLU_000/INFERENCE b/compiler/enco/test/tflite/ReLU_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/ReLU_000/INFERENCE
diff --git a/compiler/enco/test/tflite/ReLU_000/test.recipe b/compiler/enco/test/tflite/ReLU_000/test.recipe
new file mode 100644
index 000000000..8eaa3602f
--- /dev/null
+++ b/compiler/enco/test/tflite/ReLU_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "ReLU"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Regression_0000/INFERENCE b/compiler/enco/test/tflite/Regression_0000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0000/INFERENCE
diff --git a/compiler/enco/test/tflite/Regression_0000/test.recipe b/compiler/enco/test/tflite/Regression_0000/test.recipe
new file mode 100644
index 000000000..2f3c03670
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0000/test.recipe
@@ -0,0 +1,84 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 2 }
+}
+operand {
+ name: "ker_0"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias_0"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "0.1"
+ }
+}
+operand {
+ name: "ofm_0"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ }
+ input: "ifm"
+ input: "ker_0"
+ input: "bias_0"
+ output: "ofm_0"
+}
+operand {
+ name: "ker_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias_1"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "0.1"
+ }
+}
+operand {
+ name: "ofm_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ }
+ input: "ifm"
+ input: "ker_1"
+ input: "bias_1"
+ output: "ofm_1"
+}
+input: "ifm"
+output: "ofm_0"
+output: "ofm_1"
diff --git a/compiler/enco/test/tflite/Regression_0001/INFERENCE b/compiler/enco/test/tflite/Regression_0001/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0001/INFERENCE
diff --git a/compiler/enco/test/tflite/Regression_0001/test.recipe b/compiler/enco/test/tflite/Regression_0001/test.recipe
new file mode 100644
index 000000000..e6f4eca8f
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0001/test.recipe
@@ -0,0 +1,50 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler { tag: "gaussian" arg: "0.0" arg: "1.0" }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler { tag: "gaussian" arg: "0.0" arg: "1.0" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operand {
+ name: "arr"
+ type: FLOAT32
+ shape { dim: 1 dim: 9 }
+}
+operand {
+ name: "shape"
+ type: INT32
+ shape { dim: 2 }
+ filler { tag: "explicit" arg: "-1" arg: "9" }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options { padding: VALID stride_w: 1 stride_h: 1 activation: RELU6 }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+operation {
+ type: "Reshape"
+ input: "ofm"
+ input: "shape"
+ output: "arr"
+ reshape_options { new_shape: [-1, 9] }
+}
+input: "ifm"
+output: "arr"
diff --git a/compiler/enco/test/tflite/Regression_0002/INFERENCE b/compiler/enco/test/tflite/Regression_0002/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0002/INFERENCE
diff --git a/compiler/enco/test/tflite/Regression_0002/test.recipe b/compiler/enco/test/tflite/Regression_0002/test.recipe
new file mode 100644
index 000000000..8234c7996
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0002/test.recipe
@@ -0,0 +1,45 @@
+# Compilation SHOULD NOT fail even when there is no effective calcualtion
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "0.1"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
diff --git a/compiler/enco/test/tflite/Regression_0003/INFERENCE b/compiler/enco/test/tflite/Regression_0003/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0003/INFERENCE
diff --git a/compiler/enco/test/tflite/Regression_0003/test.recipe b/compiler/enco/test/tflite/Regression_0003/test.recipe
new file mode 100644
index 000000000..693c45543
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0003/test.recipe
@@ -0,0 +1,33 @@
+# Compilation SHOULD NOT fail even if all the inputs are constant
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ filler { tag: "constant" arg: "0.1" }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler { tag: "constant" arg: "0.2" }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler { tag: "constant" arg: "0.3" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options { padding: VALID }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Regression_0004/INFERENCE b/compiler/enco/test/tflite/Regression_0004/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0004/INFERENCE
diff --git a/compiler/enco/test/tflite/Regression_0004/test.recipe b/compiler/enco/test/tflite/Regression_0004/test.recipe
new file mode 100644
index 000000000..80705efd5
--- /dev/null
+++ b/compiler/enco/test/tflite/Regression_0004/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm0"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ filler { tag: "constant" arg: "0.1" }
+}
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ filler { tag: "constant" arg: "0.1" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Div"
+ input: "ifm0"
+ input: "ifm1"
+ output: "ofm"
+ div_options {
+ activation: NONE
+ }
+}
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Reshape_000/INFERENCE b/compiler/enco/test/tflite/Reshape_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Reshape_000/INFERENCE
diff --git a/compiler/enco/test/tflite/Reshape_000/test.recipe b/compiler/enco/test/tflite/Reshape_000/test.recipe
new file mode 100644
index 000000000..bb7ce48a9
--- /dev/null
+++ b/compiler/enco/test/tflite/Reshape_000/test.recipe
@@ -0,0 +1,21 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 10 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 10 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: -1
+ new_shape: 10
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/Sub_000/INFERENCE b/compiler/enco/test/tflite/Sub_000/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/Sub_000/INFERENCE
diff --git a/compiler/enco/test/tflite/Sub_000/test.recipe b/compiler/enco/test/tflite/Sub_000/test.recipe
new file mode 100644
index 000000000..0397c9c2b
--- /dev/null
+++ b/compiler/enco/test/tflite/Sub_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operation {
+ type: "Sub"
+ sub_options {
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/compiler/enco/test/tflite/empty/INFERENCE b/compiler/enco/test/tflite/empty/INFERENCE
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/empty/INFERENCE
diff --git a/compiler/enco/test/tflite/empty/test.recipe b/compiler/enco/test/tflite/empty/test.recipe
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/compiler/enco/test/tflite/empty/test.recipe
diff --git a/compiler/enco/test/tflite/runall.sh b/compiler/enco/test/tflite/runall.sh
new file mode 100755
index 000000000..c274f724b
--- /dev/null
+++ b/compiler/enco/test/tflite/runall.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+if [[ $# -le 6 ]]; then
+ echo "USAGE: $0 [nnkit-run path] [reference backend path] [randomize action path] [HDF5 export action path] [HDF5 import action path] [WORKDIR] [Prefix1] [Prefix2] ..."
+ exit 255
+fi
+
+NNKIT_RUN_PATH="$1"; shift
+REFERENCE_BACKEND_PATH="$1"; shift
+RANDOMIZE_ACTION_PATH="$1"; shift
+HDF5_EXPORT_ACTION_PATH="$1"; shift
+HDF5_IMPORT_ACTION_PATH="$1"; shift
+WORKDIR="$1"; shift
+
+echo "-- Found nnkit-run: ${NNKIT_RUN_PATH}"
+echo "-- Found reference backend: ${REFERENCE_BACKEND_PATH}"
+echo "-- Found randomize action: ${RANDOMIZE_ACTION_PATH}"
+echo "-- Found HDF5 export action: ${HDF5_EXPORT_ACTION_PATH}"
+echo "-- Found HDF5 import action: ${HDF5_IMPORT_ACTION_PATH}"
+echo "-- Found workdir: ${WORKDIR}"
+
+TESTED=()
+PASSED=()
+FAILED=()
+
+pushd "${WORKDIR}"
+while [[ $# -ne 0 ]]; do
+ PREFIX="$1"; shift
+
+ TESTED+=("${PREFIX}")
+
+ PASSED_TAG="${PREFIX}.passed"
+
+ rm -f "${PASSED_TAG}"
+
+ cat > "${PREFIX}.log" <(
+ exec 2>&1
+
+ echo "-- Found tflite: ${PREFIX}.tflite"
+ echo "-- Found backend: lib${PREFIX}.so"
+
+ "${NNKIT_RUN_PATH}" \
+ --backend "${REFERENCE_BACKEND_PATH}" \
+ --backend-arg "${WORKDIR}/${PREFIX}.tflite" \
+ --pre "${RANDOMIZE_ACTION_PATH}" \
+ --pre "${HDF5_EXPORT_ACTION_PATH}" \
+ --pre-arg "${PREFIX}.input.h5" \
+ --post "${HDF5_EXPORT_ACTION_PATH}" \
+ --post-arg "${PREFIX}.expected.h5"
+
+ "${NNKIT_RUN_PATH}" \
+ --backend "./lib${PREFIX}.so" \
+ --pre "${HDF5_IMPORT_ACTION_PATH}" \
+ --pre-arg "${PREFIX}.input.h5" \
+ --post "${HDF5_EXPORT_ACTION_PATH}" \
+ --post-arg "${PREFIX}.obtained.h5"
+
+ h5diff -d 0.001 "${PREFIX}.expected.h5" "${PREFIX}.obtained.h5"
+
+ if [[ $? -eq 0 ]]; then
+ touch "${PASSED_TAG}"
+ fi
+ )
+
+ if [[ -f "${PASSED_TAG}" ]]; then
+ PASSED+=("$PREFIX")
+ else
+ FAILED+=("$PREFIX")
+ fi
+done
+popd
+
+if [[ ${#TESTED[@]} -ne ${#PASSED[@]} ]]; then
+ echo "FAILED"
+ for TEST in "${FAILED[@]}"
+ do
+ echo "- ${TEST}"
+ done
+ exit 255
+fi
+
+echo "PASSED"
+exit 0