summaryrefslogtreecommitdiff
path: root/compiler/enco/frontend/tflite/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/enco/frontend/tflite/src')
-rw-r--r--compiler/enco/frontend/tflite/src/Context.cpp116
-rw-r--r--compiler/enco/frontend/tflite/src/Context.h169
-rw-r--r--compiler/enco/frontend/tflite/src/Convert.cpp57
-rw-r--r--compiler/enco/frontend/tflite/src/Convert.h43
-rw-r--r--compiler/enco/frontend/tflite/src/Entry.cpp36
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.cpp198
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.h40
-rw-r--r--compiler/enco/frontend/tflite/src/Frontend.test.cpp41
-rw-r--r--compiler/enco/frontend/tflite/src/GraphBuilder.h46
-rw-r--r--compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h88
-rw-r--r--compiler/enco/frontend/tflite/src/IRBuilder.h178
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Activation.cpp96
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Activation.h37
-rw-r--r--compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp126
-rw-r--r--compiler/enco/frontend/tflite/src/Op/AveragePool2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Concatenation.cpp252
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Concatenation.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Conv2D.cpp181
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Conv2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp230
-rw-r--r--compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Div.cpp116
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Div.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp123
-rw-r--r--compiler/enco/frontend/tflite/src/Op/MaxPool2D.h39
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Padding.cpp105
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Padding.h42
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU6.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/ReLU6.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Reshape.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Reshape.h38
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Sub.cpp112
-rw-r--r--compiler/enco/frontend/tflite/src/Op/Sub.h38
-rw-r--r--compiler/enco/frontend/tflite/src/RawModel.h29
-rw-r--r--compiler/enco/frontend/tflite/src/RawModelLoader.cpp89
-rw-r--r--compiler/enco/frontend/tflite/src/RawModelLoader.h29
-rw-r--r--compiler/enco/frontend/tflite/src/TensorBags.h65
39 files changed, 3295 insertions, 0 deletions
diff --git a/compiler/enco/frontend/tflite/src/Context.cpp b/compiler/enco/frontend/tflite/src/Context.cpp
new file mode 100644
index 000000000..ef030dc5d
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Context.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Context.h"
+
+#include "Convert.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <map>
+#include <sstream>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+void TensorContext::prepare(const tflite::SubGraph *graph)
+{
+ for (uint32_t tensor_id = 0; tensor_id < graph->tensors()->size(); ++tensor_id)
+ {
+ auto const tensor_info = graph->tensors()->Get(tensor_id);
+ auto const tensor_name = tensor_info->name()->str();
+ auto const tensor_shape = as_tensor_shape(tensor_info->shape());
+ auto const tensor_type = tensor_info->type();
+
+ _name_ctx[tensor_id] = tensor_name;
+ _shape_ctx[tensor_id] = tensor_shape;
+ _type_ctx[tensor_id] = tensor_type;
+ }
+}
+
+TflOpCodeContext::TflOpCodeContext(
+ const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *opcodes)
+{
+ for (const tflite::OperatorCode *opcode : *opcodes)
+ {
+ _opcodes.push_back(opcode);
+ }
+}
+
+tflite::BuiltinOperator TflOpCodeContext::builtin_code(const tflite::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _opcodes.size());
+ const tflite::OperatorCode *opcode = _opcodes.at(index);
+ return opcode->builtin_code();
+}
+
+std::string TflOpCodeContext::opcode_name(const tflite::Operator *op) const
+{
+ uint32_t index = op->opcode_index();
+ assert(index < _opcodes.size());
+ const tflite::OperatorCode *opcode = _opcodes.at(index);
+
+ if (!is_valid(opcode))
+ {
+ std::ostringstream oss;
+ oss << "(invalid: " << index << ")";
+ return oss.str();
+ }
+
+ if (is_custom(opcode))
+ {
+ if (!opcode->custom_code())
+ return "(invalid custom)";
+
+ return opcode->custom_code()->c_str();
+ }
+
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return EnumNameBuiltinOperator(code);
+}
+
+bool TflOpCodeContext::is_valid(const tflite::OperatorCode *opcode)
+{
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return (tflite::BuiltinOperator_MIN <= code && code <= tflite::BuiltinOperator_MAX);
+}
+
+bool TflOpCodeContext::is_custom(const tflite::OperatorCode *opcode)
+{
+ tflite::BuiltinOperator code = opcode->builtin_code();
+ return (code == tflite::BuiltinOperator_CUSTOM);
+}
+
+TflBufferContext::TflBufferContext(const tflite::Model *tfl_model)
+{
+ const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>> *tfl_buffers;
+
+ tfl_buffers = tfl_model->buffers();
+
+ for (uint32_t buffer_id = 0; buffer_id < tfl_buffers->size(); ++buffer_id)
+ {
+ _buffer_ctx[buffer_id] = (*tfl_buffers)[buffer_id];
+ }
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Context.h b/compiler/enco/frontend/tflite/src/Context.h
new file mode 100644
index 000000000..f72385f9a
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Context.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+#include "Convert.h"
+#include "TensorBags.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <schema_generated.h>
+
+#include <map>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Extracts and holds operand(tensor) information such as name, shape, and type
+ */
+class TensorContext
+{
+public:
+ void prepare(const tflite::SubGraph *graph);
+
+ const std::string &name(uint32_t tensor_id) { return _name_ctx[tensor_id]; }
+ const tensor::Shape &shape(uint32_t tensor_id) { return _shape_ctx[tensor_id]; }
+ const tflite::TensorType &type(uint32_t tensor_id) { return _type_ctx[tensor_id]; }
+
+private:
+ std::map<uint32_t, std::string> _name_ctx;
+ std::map<uint32_t, tensor::Shape> _shape_ctx;
+ std::map<uint32_t, tflite::TensorType> _type_ctx;
+};
+
+/**
+ * @brief Class that holds operator codes and related methods
+ */
+class TflOpCodeContext
+{
+public:
+ TflOpCodeContext(const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *opcodes);
+
+ /**
+ * @brief Returns BuiltinOperator value of the operator
+ */
+ tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const;
+
+ /**
+ * @brief Returns human readable name of the operator code of the operator
+ *
+ * @note TF lite InterpreterBuilder sets an error state and returns error code
+ * for invalid opcode. Here we just return human readable message as
+ * this method returns a name for the operator code.
+ */
+ std::string opcode_name(const tflite::Operator *op) const;
+
+public:
+ static bool is_valid(const tflite::OperatorCode *opcode);
+ static bool is_custom(const tflite::OperatorCode *opcode);
+
+private:
+ std::vector<const tflite::OperatorCode *> _opcodes;
+};
+
+/**
+ * @brief Class to read and provide buffer information of tflite
+ */
+class TflBufferContext
+{
+public:
+ template <typename T> struct TflBuffer
+ {
+ TflBuffer(const T *p, size_t s) : ptr{p}, len{s} {};
+ const T *ptr;
+ size_t len;
+ };
+
+public:
+ explicit TflBufferContext(const tflite::Model *tfl_model);
+
+public:
+ template <typename T>
+ TflBuffer<T> tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
+ {
+ TflBufferContext::TflBuffer<T> res{nullptr, 0};
+ const auto *tensor = graph->tensors()->Get(tensor_idx);
+ uint32_t tfl_buf_id = tensor->buffer();
+
+ assert(_buffer_ctx.size() > tfl_buf_id);
+
+ const tflite::Buffer *tfl_buffer = _buffer_ctx.at(tfl_buf_id);
+
+ if (auto *array = tfl_buffer->data())
+ {
+ if (size_t size = array->size())
+ {
+ assert(size % sizeof(T) == 0);
+
+ res.len = size / sizeof(T);
+ res.ptr = reinterpret_cast<const T *>(array->data());
+ }
+ }
+
+ return res;
+ }
+
+private:
+ std::map<uint32_t /* Buffer ID */, const tflite::Buffer *> _buffer_ctx;
+};
+
+/**
+ * @brief Class to store context to build IR from tflite
+ */
+class GraphBuilderContext
+{
+public:
+ explicit GraphBuilderContext(coco::Module *m, coco::Data *d, coco::Block *block,
+ TensorBags &tensor_bags, TensorContext &tensor_context,
+ TflBufferContext &buffer_context, const tflite::SubGraph *graph)
+ : _m(m), _d(d), _block(block), _tensor_bags(tensor_bags), _tensor_context(tensor_context),
+ _buffer_context(buffer_context), _graph(graph)
+ {
+ // DO NOTHING
+ }
+
+ GraphBuilderContext() = delete;
+ GraphBuilderContext(const GraphBuilderContext &) = delete;
+ GraphBuilderContext(GraphBuilderContext &&) = delete;
+
+public:
+ coco::Module *m() { return _m; }
+ coco::Data *d() { return _d; }
+ coco::Block *block() { return _block; }
+ TensorContext &tensor() { return _tensor_context; }
+ TensorBags &bags() { return _tensor_bags; }
+ TflBufferContext &buffer() { return _buffer_context; }
+ const tflite::SubGraph *graph() { return _graph; }
+
+private:
+ coco::Module *_m;
+ coco::Data *_d;
+ coco::Block *_block;
+ TensorContext &_tensor_context;
+ TensorBags &_tensor_bags;
+ TflBufferContext &_buffer_context;
+ const tflite::SubGraph *_graph;
+};
+
+} // namespace tflimport
+
+#endif // __CONTEXT_H__
diff --git a/compiler/enco/frontend/tflite/src/Convert.cpp b/compiler/enco/frontend/tflite/src/Convert.cpp
new file mode 100644
index 000000000..ffae95d01
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Convert.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+IndexVector as_index_vector(const flatbuffers::Vector<int32_t> *array)
+{
+ const uint32_t size = array->size();
+
+ std::vector<int32_t> res(size);
+
+ for (uint32_t i = 0; i < size; i++)
+ {
+ res[i] = array->Get(i);
+ }
+
+ return res;
+}
+
+tensor::Shape as_tensor_shape(const flatbuffers::Vector<int32_t> *shape)
+{
+ const uint32_t rank = shape->size();
+
+ tensor::Shape res;
+
+ res.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res.dim(axis) = shape->Get(axis);
+ }
+
+ return res;
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Convert.h b/compiler/enco/frontend/tflite/src/Convert.h
new file mode 100644
index 000000000..fb4c248bf
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Convert.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVERT_H__
+#define __CONVERT_H__
+
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+using IndexVector = std::vector<int32_t>;
+
+/**
+ * @brief Converts flatbuffers::Vector to IndexVector
+ */
+IndexVector as_index_vector(const flatbuffers::Vector<int32_t> *array);
+
+/**
+ * @brief Converts flatbuffers::Vector to nncc::core::ADT::tensor::Shape
+ */
+tensor::Shape as_tensor_shape(const flatbuffers::Vector<int32_t> *shape);
+
+} // namespace tflimport
+
+#endif // __CONVERT_H__
diff --git a/compiler/enco/frontend/tflite/src/Entry.cpp b/compiler/enco/frontend/tflite/src/Entry.cpp
new file mode 100644
index 000000000..c69e18074
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Entry.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "RawModelLoader.h"
+
+#include <cmdline/View.h>
+
+#include <stdex/Memory.h>
+
+#include <fstream>
+#include <cassert>
+
+using stdex::make_unique;
+
+extern "C" std::unique_ptr<enco::Frontend> make_frontend(const cmdline::View &cmdline)
+{
+ assert(cmdline.size() == 1); // tflite file name
+
+ auto model = load_from(cmdline.at(0));
+
+ return make_unique<Frontend>(std::move(model));
+}
diff --git a/compiler/enco/frontend/tflite/src/Frontend.cpp b/compiler/enco/frontend/tflite/src/Frontend.cpp
new file mode 100644
index 000000000..c64f181f4
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+#include "Context.h"
+#include "Convert.h"
+#include "TensorBags.h"
+#include "GraphBuilderRegistry.h"
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <iostream>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Set module input operands and its information
+ */
+void set_module_inputs(coco::Module *m, TensorContext &ctx, TensorBags &bags,
+ const IndexVector &inputs)
+{
+ for (uint32_t n = 0; n < inputs.size(); ++n)
+ {
+ auto const tensor_id = inputs.at(n);
+
+ auto const tensor_name = ctx.name(tensor_id);
+ auto const tensor_shape = ctx.shape(tensor_id);
+ auto const tensor_bag = bags.bag(tensor_id);
+
+ auto input = m->entity()->input()->create(tensor_shape);
+
+ input->name(tensor_name);
+ input->bag(tensor_bag);
+ input->reorder<tensor::LexicalLayout>();
+
+ m->input()->insert(input);
+ }
+}
+
+/**
+ * @brief Set module output operands and its information
+ */
+void set_module_outputs(coco::Module *m, TensorContext &ctx, TensorBags &bags,
+ const IndexVector &outputs)
+{
+ for (uint32_t n = 0; n < outputs.size(); ++n)
+ {
+ auto const tensor_id = outputs.at(n);
+
+ auto const tensor_name = ctx.name(tensor_id);
+ auto const tensor_shape = ctx.shape(tensor_id);
+ auto const tensor_bag = bags.bag(tensor_id);
+
+ auto output = m->entity()->output()->create(tensor_shape);
+
+ output->name(tensor_name);
+ output->bag(tensor_bag);
+ output->reorder<tensor::LexicalLayout>();
+
+ m->output()->insert(output);
+ }
+}
+
+/**
+ * @brief Copy values of tfl tensors into coco::Data if the data was not copied
+ */
+void copy_tensors(GraphBuilderContext *ctx)
+{
+ auto d = ctx->d();
+
+ // for each bag, check if bag is not allocated but tflite tensor has values
+ for (auto &iter : ctx->bags())
+ {
+ auto tfl_tensor_id = iter.first;
+ auto bag = iter.second;
+
+ auto tfl_buffer = ctx->buffer().tensor_buffer<float>(ctx->graph(), tfl_tensor_id);
+
+ // TODO remove this line when support int32 is ready
+ if (ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_INT32)
+ {
+ std::cout << "*** INT32 COPYING IS NOT SUPPORTED ***" << std::endl;
+ continue;
+ }
+
+ assert(ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_FLOAT32);
+
+ auto span = d->f32()->weight(bag); // TODO support other type
+
+ if (!(span.data() == nullptr && span.size() == 0)) // already allocated
+ continue;
+
+ if (tfl_buffer.ptr == nullptr || tfl_buffer.len == 0) // no data to copy
+ continue;
+
+ d->f32()->allocate(bag);
+
+ auto ifm_span = d->f32()->weight(bag);
+ for (uint32_t idx = 0; idx < tfl_buffer.len; ++idx)
+ {
+ ifm_span[idx] = tfl_buffer.ptr[idx];
+ }
+ }
+}
+
+} // namespace tflimport
+
+Frontend::Frontend(std::unique_ptr<RawModel> &&raw) : _raw{std::move(raw)}
+{
+ // DO NOTHING
+}
+
+enco::Bundle Frontend::load(void) const
+{
+ auto model = _raw->model();
+
+ assert(model->version() == 3);
+ assert(model->subgraphs()->size() == 1);
+
+ auto graph = model->subgraphs()->Get(0);
+
+ auto m = coco::Module::create();
+ auto d = coco::Data::create();
+
+ tflimport::TensorContext tensor_context;
+ tflimport::TensorBags tensor_bags;
+
+ tensor_context.prepare(graph);
+ tensor_bags.prepare(graph, m);
+
+ auto inputs = tflimport::as_index_vector(graph->inputs());
+ auto outputs = tflimport::as_index_vector(graph->outputs());
+
+ tflimport::set_module_inputs(m.get(), tensor_context, tensor_bags, inputs);
+ tflimport::set_module_outputs(m.get(), tensor_context, tensor_bags, outputs);
+
+ auto blk = m->entity()->block()->create();
+ m->block()->append(blk);
+
+ auto opcodes = model->operator_codes();
+
+ tflimport::TflBufferContext buffer_context(model);
+ tflimport::TflOpCodeContext opcode_context(opcodes);
+
+ auto operators = graph->operators();
+
+ tflimport::GraphBuilderContext opbuilder_context(m.get(), d.get(), blk, tensor_bags,
+ tensor_context, buffer_context, graph);
+
+ for (int i = 0; i < operators->Length(); ++i)
+ {
+ const auto *op = operators->Get(i);
+ tflite::BuiltinOperator builtincode = opcode_context.builtin_code(op);
+
+ if (const auto *graph_builder = tflimport::GraphBuilderRegistry::get().lookup(builtincode))
+ {
+ if (!graph_builder->validate(op))
+ {
+ throw std::runtime_error{"Invalid operator"};
+ }
+
+ graph_builder->build(op, &opbuilder_context);
+ }
+ else
+ {
+ std::string opcodename = opcode_context.opcode_name(op);
+ throw std::runtime_error{"Not supported: " + opcodename};
+ }
+
+ // copying unfilled tensor value
+ copy_tensors(&opbuilder_context);
+ }
+
+ // Create "Bundle"
+ enco::Bundle bundle;
+
+ bundle.module(std::move(m));
+ bundle.data(std::move(d));
+
+ return std::move(bundle);
+}
diff --git a/compiler/enco/frontend/tflite/src/Frontend.h b/compiler/enco/frontend/tflite/src/Frontend.h
new file mode 100644
index 000000000..bb0c9cd2c
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FRONTEND_H__
+#define __FRONTEND_H__
+
+#include "RawModel.h"
+
+#include <enco/Frontend.h>
+
+#include <flatbuffers/flatbuffers.h>
+
+#include <memory>
+
+class Frontend final : public enco::Frontend
+{
+public:
+ Frontend(std::unique_ptr<RawModel> &&raw);
+
+public:
+ enco::Bundle load(void) const override;
+
+private:
+ std::unique_ptr<RawModel> _raw;
+};
+
+#endif // __FRONTEND_H__
diff --git a/compiler/enco/frontend/tflite/src/Frontend.test.cpp b/compiler/enco/frontend/tflite/src/Frontend.test.cpp
new file mode 100644
index 000000000..aee6099e7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Frontend.test.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Frontend.h"
+
+#include <stdex/Memory.h>
+
+#include <gtest/gtest.h>
+
+using stdex::make_unique;
+
+namespace
+{
+
+struct MockRawModel final : public RawModel
+{
+ const tflite::Model *model(void) const override { return nullptr; }
+};
+
+} // namespace
+
+TEST(FrontendTest, constructor)
+{
+ // Let's test whether Frontend is actually constructible.
+ auto frontend = make_unique<Frontend>(make_unique<MockRawModel>());
+
+ ASSERT_NE(frontend, nullptr);
+}
diff --git a/compiler/enco/frontend/tflite/src/GraphBuilder.h b/compiler/enco/frontend/tflite/src/GraphBuilder.h
new file mode 100644
index 000000000..f2cb57848
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/GraphBuilder.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_H__
+#define __GRAPH_BUILDER_H__
+
+#include "Context.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief Parent class of tflite operation graph builders (e.g., Conv2DGraphBuilder)
+ */
+class GraphBuilder
+{
+public:
+ /**
+ * TODO Declare "validate" method as a pure virtual method
+ *
+ * Q: Is it possible to validate T/F Lite model only with this interface?
+ */
+ virtual bool validate(const tflite::Operator *) const { return true; }
+
+ virtual void build(const tflite::Operator *op, GraphBuilderContext *context) const = 0;
+ virtual ~GraphBuilder() {}
+};
+
+} // namespace tflimport
+
+#endif // __GRAPH_BUILDER_H__
diff --git a/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h b/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h
new file mode 100644
index 000000000..1ae882e89
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/GraphBuilderRegistry.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GRAPH_BUILDER_REGISTRY_H__
+#define __GRAPH_BUILDER_REGISTRY_H__
+
+#include "Op/Conv2D.h"
+#include "Op/DepthwiseConv2D.h"
+#include "Op/AveragePool2D.h"
+#include "Op/MaxPool2D.h"
+#include "Op/Concatenation.h"
+#include "Op/ReLU.h"
+#include "Op/ReLU6.h"
+#include "Op/Reshape.h"
+#include "Op/Sub.h"
+#include "Op/Div.h"
+
+#include <schema_generated.h>
+#include <stdex/Memory.h>
+
+#include <map>
+
+using stdex::make_unique;
+
+namespace tflimport
+{
+
+/**
+ * @brief Class to return graph builder for passed tflite::builtinOperator
+ */
+class GraphBuilderRegistry
+{
+public:
+ /**
+ * @brief Returns registered GraphBuilder pointer for BuiltinOperator or
+ * nullptr if not registered
+ */
+ const GraphBuilder *lookup(tflite::BuiltinOperator op) const
+ {
+ if (_builder_map.find(op) == _builder_map.end())
+ return nullptr;
+
+ return _builder_map.at(op).get();
+ }
+
+ static GraphBuilderRegistry &get()
+ {
+ static GraphBuilderRegistry me;
+ return me;
+ }
+
+private:
+ GraphBuilderRegistry()
+ {
+ // add GraphBuilder for each tflite operation.
+ _builder_map[tflite::BuiltinOperator_CONV_2D] = make_unique<Conv2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] =
+ make_unique<DepthwiseConv2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_AVERAGE_POOL_2D] = make_unique<AvgPool2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_MAX_POOL_2D] = make_unique<MaxPool2DGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_CONCATENATION] = make_unique<ConcatenationGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RELU] = make_unique<ReLUGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RELU6] = make_unique<ReLU6GraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_RESHAPE] = make_unique<ReshapeGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_SUB] = make_unique<SubGraphBuilder>();
+ _builder_map[tflite::BuiltinOperator_DIV] = make_unique<DivGraphBuilder>();
+ }
+
+private:
+ std::map<tflite::BuiltinOperator, std::unique_ptr<GraphBuilder>> _builder_map;
+};
+
+} // namespace tflimport
+
+#endif // __GRAPH_BUILDER_REGISTRY_H__
diff --git a/compiler/enco/frontend/tflite/src/IRBuilder.h b/compiler/enco/frontend/tflite/src/IRBuilder.h
new file mode 100644
index 000000000..edfe247e1
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/IRBuilder.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file IRBuilder.h
+ * @brief coco IR builders. This is code is copied from enco caffe frontend.
+ */
+#ifndef __IR_BUILDER_H__
+#define __IR_BUILDER_H__
+
+#include "coco/IR/Module.h"
+
+#include <deque>
+
+using namespace nncc::core::ADT;
+
+class OpBuilder
+{
+public:
+ OpBuilder(coco::Module *module) : _module{module}
+ {
+ // module SHOULD BE valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Return true if the internal stack is empty
+ */
+ bool empty(void) const { return _stack.empty(); }
+
+ /**
+ * @brief Return the operation at the top of the internal stack
+ */
+ coco::Op *top(void) const
+ {
+ assert(_stack.size() > 0);
+ return _stack.front();
+ }
+
+ /**
+ * @brief Push op onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Op; Stack
+ */
+ OpBuilder &push(coco::Op *op)
+ {
+ _stack.push_front(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Load" op and push it onto the internal stack
+ *
+ * BEFORE| Stack
+ * AFTER | Load(obj); Stack
+ */
+ OpBuilder &load(coco::Object *obj)
+ {
+ auto op = _module->entity()->op()->create<coco::Load>();
+ op->object(obj);
+ push(op);
+ return (*this);
+ }
+
+ /**
+ * @brief Create "Add" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Add(Left, Right); Stack
+ */
+ OpBuilder &add(void) { return binary<coco::Add>(); }
+
+ /**
+ * @brief Create "Mul" op and push it onto the internal stack
+ *
+ * BEFORE| Left; Right; Stack
+ * AFTER | Mul(Left, Right); Stack
+ */
+ OpBuilder &mul(void) { return binary<coco::Mul>(); }
+
+ /**
+ * @brief Pop op from the internal stack
+ *
+ * BEFORE| Op; Stack
+ * AFTER | Stack
+ */
+ coco::Op *pop(void)
+ {
+ assert(_stack.size() > 0);
+ auto op = _stack.front();
+ _stack.pop_front();
+ return op;
+ }
+
+private:
+ template <typename ConcreteOp> OpBuilder &binary()
+ {
+ assert(_stack.size() >= 2);
+ auto left = pop();
+ auto right = pop();
+
+ auto op = _module->entity()->op()->create<ConcreteOp>();
+ op->left(left);
+ op->right(right);
+ push(op);
+
+ return (*this);
+ }
+
+private:
+ coco::Module *_module;
+ std::deque<coco::Op *> _stack;
+};
+
+inline OpBuilder op_builder(coco::Module *m) { return OpBuilder{m}; }
+inline OpBuilder op_builder(const std::unique_ptr<coco::Module> &m) { return op_builder(m.get()); }
+
+class InstrBuilder
+{
+public:
+ InstrBuilder(coco::Module *module) : _module{module}
+ {
+ // NOTE _module SHOULD be valid
+ assert(_module != nullptr);
+ }
+
+public:
+ /**
+ * @brief Create "Eval" instruction with a given "Object" and "Op"
+ *
+ * @note "eval(out, op)" will create "%out <- Eval(op)" instruction
+ */
+ coco::Eval *eval(coco::Object *out, coco::Op *op) const
+ {
+ auto ins = _module->entity()->instr()->create<coco::Eval>();
+ ins->op(op);
+ ins->out(out);
+ return ins;
+ }
+
+ /**
+ * @brief Create "Copy" instruction with given two "Object"
+ *
+ * @note "copy(into, from)" will create "%into <- Copy(%from)" instruction
+ */
+ coco::Copy *copy(coco::Object *into, coco::Object *from) const
+ {
+ auto ins = _module->entity()->instr()->create<coco::Copy>();
+ ins->from(from);
+ ins->into(into);
+ return ins;
+ }
+
+private:
+ coco::Module *_module;
+};
+
+using ModuleHandle = std::unique_ptr<coco::Module>;
+
+inline InstrBuilder instr_builder(coco::Module *m) { return InstrBuilder{m}; }
+inline InstrBuilder instr_builder(const ModuleHandle &m) { return instr_builder(m.get()); }
+
+#endif // __IR_BUILDER_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Activation.cpp b/compiler/enco/frontend/tflite/src/Op/Activation.cpp
new file mode 100644
index 000000000..d6215ba34
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Activation.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Activation.h"
+
+#include <IRBuilder.h>
+
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::FeatureObject *build_activation(tflite::ActivationFunctionType act, coco::Block *block,
+ coco::FeatureObject *ifm)
+{
+ assert(ifm != nullptr && ifm->asFeature() != nullptr); // support feature only in this version
+
+ coco::Module *m = block->module();
+
+ auto shape = ifm->asFeature()->shape();
+
+ // creates output object
+ auto output_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto output_bag = m->entity()->bag()->create(num_elements(shape));
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(shape));
+
+ switch (act)
+ {
+ case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
+ {
+ // Create Copy Instr (copying from ifm to output_obj),
+ // redundant layer but optimized by backend
+ auto copy_ins = instr_builder(m).copy(output_obj, ifm);
+
+ // Append the instruction to the block
+ block->instr()->append(copy_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
+ {
+ // Create Eval(output_obj, ReLU(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu_op = m->entity()->op()->create<coco::ReLU>();
+ relu_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
+ {
+ // Create Eval(output_obj, ReLU6(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
+ relu6_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ default:
+ // TODO support other fused activations
+ assert(false);
+ break;
+ }
+
+ return output_obj;
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Activation.h b/compiler/enco/frontend/tflite/src/Op/Activation.h
new file mode 100644
index 000000000..05306dd41
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Activation.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_ACTIVATION_H__
+#define __OP_ACTIVATION_H__
+
+#include <coco/IR/Block.h>
+#include <coco/IR/FeatureObject.h>
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief Add coco::Eval for fused activation.
+ * This method creates an ofm object, appends Eval(ofm object, RELU(...)) into block,
+ * and returns ofm object.
+ */
+coco::FeatureObject *build_activation(tflite::ActivationFunctionType act, coco::Block *block,
+ coco::FeatureObject *ifm);
+} // namespace tflimport
+
+#endif // __OP_ACTIVATION_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp
new file mode 100644
index 000000000..16f68fcdb
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AveragePool2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool AvgPool2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Pool2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void AvgPool2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a AvgPool2D
+ auto coco_avgpool2d = m->entity()->op()->create<coco::AvgPool2D>();
+ auto *params = op->builtin_options_as_Pool2DOptions();
+
+ // NOTE For Tensorflow lite, PaddingExcluded is needed
+ coco_avgpool2d->divisor(coco::AvgPool2D::Divisor::PaddingExcluded);
+
+ coco_avgpool2d->window()->height(params->filter_height());
+ coco_avgpool2d->window()->width(params->filter_width());
+
+ coco_avgpool2d->stride()->vertical(params->stride_h());
+ coco_avgpool2d->stride()->horizontal(params->stride_w());
+
+ coco::Padding2D padding =
+ pool2D_padding(params, ifm_shape, params->filter_width(), params->filter_height());
+
+ coco_avgpool2d->pad()->top(padding.top());
+ coco_avgpool2d->pad()->bottom(padding.bottom());
+ coco_avgpool2d->pad()->left(padding.left());
+ coco_avgpool2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_avgpool2d->arg(coco_load);
+
+ // Create an Eval instruction
+ auto ins = instr_builder(m).eval(ofm_obj, coco_avgpool2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h
new file mode 100644
index 000000000..3e37e3cad
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/AveragePool2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_AVERAGEPOOL2D_H__
+#define __OP_AVERAGEPOOL2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for AvgPool2D operator
+ */
+class AvgPool2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_AVERAGEPOOL2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp b/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp
new file mode 100644
index 000000000..ce0f47b21
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Concatenation.cpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concatenation.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <array>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace
+{
+
+/**
+ * @brief Convert a numeric tensor axis as a ConcatF FeatureAxis value
+ */
+coco::ConcatF::Axis as_ConcatF_axis(uint32_t axis)
+{
+ // NOTE The feature map (in TensorFlow) is a rank-4 (NHWC) tensor
+ assert(axis < 4);
+
+ coco::ConcatF::Axis res = coco::ConcatF::Axis::Unknown;
+
+ switch (axis)
+ {
+ case 0:
+ res = coco::ConcatF::Axis::Batch;
+ break;
+ case 1:
+ res = coco::ConcatF::Axis::Height;
+ break;
+ case 2:
+ res = coco::ConcatF::Axis::Width;
+ break;
+ case 3:
+ res = coco::ConcatF::Axis::Depth;
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * @brief Convert a coco FeatureShape as an array of 'uint32_t' values
+ */
+std::array<uint32_t, 4> as_dims(const coco::FeatureShape &shape)
+{
+ std::array<uint32_t, 4> res;
+
+ res[0] = shape.batch();
+ res[1] = shape.height();
+ res[2] = shape.width();
+ res[3] = shape.depth();
+
+ return res;
+}
+
+/**
+ * @brief Convert a tensor shape as a coco FeatureShape
+ */
+coco::FeatureShape as_feature_shape(const tensor::Shape &shape)
+{
+ assert(shape.rank() == 4);
+
+ auto const B = shape.dim(0);
+ auto const C = shape.dim(3);
+ auto const H = shape.dim(1);
+ auto const W = shape.dim(2);
+
+ return coco::FeatureShape{B, C, H, W};
+}
+
+} // namespace
+
+namespace tflimport
+{
+
+void ConcatenationGraphBuilder::build(const tflite::Operator *op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 ~ N : any number of input features
+ // output index 0 : one output feature
+ assert(opinputs.size() > 0);
+ assert(opoutputs.size() == 1);
+
+ // Default parameter values are referenced from schema_generated.h
+ int32_t concat_axis = 0;
+ tflite::ActivationFunctionType activation = tflite::ActivationFunctionType_NONE;
+
+ if (auto *concatenation_params = op->builtin_options_as_ConcatenationOptions())
+ {
+ activation = concatenation_params->fused_activation_function();
+ concat_axis = concatenation_params->axis();
+
+ const int32_t rank = static_cast<int32_t>(tensor_context.shape(opinputs.at(0)).rank());
+ if (concat_axis < 0)
+ {
+ concat_axis += rank;
+ }
+ assert(concat_axis >= 0);
+ assert(concat_axis < rank);
+ }
+ assert(as_ConcatF_axis(concat_axis) != coco::ConcatF::Axis::Unknown);
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ // Construct a vector of input objects
+ std::vector<coco::FeatureObject *> input_objects;
+
+ for (auto &input_index : opinputs)
+ {
+ const tensor::Shape &input_shape = tensor_context.shape(input_index);
+ coco::FeatureObject *input_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *input_bag = bags.bag(input_index);
+ input_obj->bag(input_bag);
+ input_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(input_shape)));
+
+ input_objects.emplace_back(input_obj);
+ }
+
+ coco::FeatureObject *last_feature = input_objects.at(0);
+
+ assert(last_feature != nullptr);
+ assert(last_feature->bag() != nullptr);
+
+ // Update coco IR
+ //
+ // Given a sequence of input features %in[0] / %in[1] / ... / %in[N]
+ // the below code constructs a sequence of eval instructions
+ // - Load is omitted for simplicity
+ //
+ // %tmp = eval(ConcatF(%in[0], %in[1]))
+ // %tmp = eval(ConcatF(%tmp, %in[2]))
+ // ...
+ // %tmp = eval(ConcatF(%tmp, %in[N]))
+ // %out[0] = copy(%tmp)
+ //
+ for (uint32_t n = 1; n < input_objects.size(); ++n)
+ {
+ auto const left_feature = last_feature;
+ auto const left_shape = left_feature->layout()->shape();
+
+ auto right_feature = input_objects.at(n);
+ auto right_shape = right_feature->layout()->shape();
+
+ // Compute output dimensionalities
+ auto compute_out_dims = [&left_shape, &right_shape, concat_axis](void) {
+ std::array<uint32_t, 4> out_dims;
+
+ const auto left_dims = as_dims(left_shape);
+ const auto right_dims = as_dims(right_shape);
+
+ for (uint32_t axis = 0; axis < 4 /* FEATURE MAP RANK */; ++axis)
+ {
+ // The dimensionality of all the axises except 'concat' axis SHOULD BE INDETICAL
+ assert((concat_axis == axis) || (left_dims[axis] == right_dims[axis]));
+
+ out_dims[axis] = left_dims[axis];
+ if (axis == concat_axis)
+ {
+ out_dims[axis] += right_dims[axis];
+ }
+ }
+
+ return out_dims;
+ };
+
+ const auto out_dims = compute_out_dims();
+
+ const uint32_t B = out_dims[0 /* BATCH */];
+ const uint32_t C = out_dims[3 /* DEPTH */];
+ const uint32_t H = out_dims[1 /* HEIGHT */];
+ const uint32_t W = out_dims[2 /* WIDTH */];
+
+ const coco::FeatureShape out_shape{B, C, H, W};
+
+ auto out_bag = m->entity()->bag()->create(B * num_elements(out_shape));
+ auto out_feature = m->entity()->object()->create<coco::FeatureObject>();
+
+ out_feature->bag(out_bag);
+ out_feature->layout(coco::FeatureLayouts::BHWC::create(out_shape));
+
+ auto left_load = op_builder(m).load(left_feature).pop();
+ auto right_load = op_builder(m).load(right_feature).pop();
+
+ auto concat_f = m->entity()->op()->create<coco::ConcatF>();
+
+ concat_f->axis(as_ConcatF_axis(concat_axis));
+ concat_f->left(left_load);
+ concat_f->right(right_load);
+
+ auto eval = instr_builder(m).eval(out_feature, concat_f);
+
+ // Append the constructed Shuffle instruction
+ blk->instr()->append(eval);
+
+ // Update 'last_feature'
+ last_feature = out_feature;
+ }
+
+ // Insert copy instruction from last_feature to output operand
+ int const ofm_idx = opoutputs.at(0);
+ auto const ofm_shape = tensor_context.shape(ofm_idx);
+
+ auto ofm_bag = bags.bag(ofm_idx);
+ auto ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Copy instruction from last into ofm
+ auto copy_ins = instr_builder(m).copy(ofm_obj, last_feature);
+
+ // Append the instruction
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Concatenation.h b/compiler/enco/frontend/tflite/src/Op/Concatenation.h
new file mode 100644
index 000000000..eb7625a85
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Concatenation.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CONCATENATION_H__
+#define __OP_CONCATENATION_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Concatenation operator
+ */
+class ConcatenationGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_CONCATENATION_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp b/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp
new file mode 100644
index 000000000..e9516c0e9
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Conv2D.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/KernelLayouts.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool Conv2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Conv2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void Conv2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ // preparation
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ TflBufferContext &buffer_context = context->buffer();
+ const tflite::SubGraph *graph = context->graph();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : kernel
+ // input index 2 : bias (optional)
+ bool hasBias = (opinputs.size() == 3);
+ assert(opinputs.size() == 2 || hasBias);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ker_idx = opinputs.at(1);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+ const tensor::Shape &ker_shape = tensor_context.shape(ker_idx);
+
+ // Create an input feature map object
+ auto *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an an output feature map object
+ auto *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an kernel object
+ auto *ker_obj = m->entity()->object()->create<coco::KernelObject>();
+ auto *ker_bag = bags.bag(ker_idx);
+ ker_obj->bag(ker_bag);
+ ker_obj->layout(coco::KernelLayouts::NHWC::create(as_kernel_shape(ker_shape)));
+
+ // Create a Load op
+ auto load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a Conv2D op
+ auto coco_conv2d = m->entity()->op()->create<coco::Conv2D>();
+
+ // populating Conv2D objects and options such as stride and padding
+ coco_conv2d->ker(ker_obj);
+
+ auto *conv_params = op->builtin_options_as_Conv2DOptions();
+
+ coco_conv2d->stride()->vertical(conv_params->stride_h());
+ coco_conv2d->stride()->horizontal(conv_params->stride_w());
+
+ // conv_params->padding() to left, top, right, bottom
+ coco::Padding2D padding = conv2D_padding(conv_params, ifm_shape, ker_shape);
+
+ coco_conv2d->pad()->top(padding.top());
+ coco_conv2d->pad()->bottom(padding.bottom());
+ coco_conv2d->pad()->left(padding.left());
+ coco_conv2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_conv2d->arg(load);
+
+ // Object to store Conv2D output
+ auto *conv2d_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *conv2d_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ conv2d_obj->bag(conv2d_bag);
+ conv2d_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an Eval instruction for Conv2D
+ auto conv2d_ins = instr_builder(m).eval(conv2d_obj, coco_conv2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(conv2d_ins);
+
+ // Last Object to make a copy to Output Object
+ coco::FeatureObject *last_obj = conv2d_obj;
+
+ if (hasBias)
+ {
+ // When there is a bias, use btmp_obj as bias add output
+ // Bias is adding last_obj with bias weight values
+ auto *btmp_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *btmp_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ btmp_obj->bag(btmp_bag);
+ btmp_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_obj->shape()));
+
+ int bias_idx = opinputs.at(2);
+
+ // Create an object for bias
+ auto bias_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *bias_bag = bags.bag(bias_idx);
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(ofm_obj->shape()));
+
+ // Create Op of conv2d output (last_obj) + bias values(bias_obj)
+ auto bias_add = op_builder(m).load(last_obj).load(bias_obj).add().pop();
+
+ // Create Instr as bias add result write to btmp_obj
+ auto bias_add_ins = instr_builder(m).eval(btmp_obj, bias_add);
+
+ // Append the instruction
+ blk->instr()->append(bias_add_ins);
+
+ // Update last_obj to btmp_obj
+ last_obj = btmp_obj;
+ }
+
+ // fused activation
+ coco::FeatureObject *act_output =
+ build_activation(conv_params->fused_activation_function(), blk, last_obj);
+
+ // Create Copy Instr of last_obj to Output Object
+ auto copy_ins = instr_builder(m).copy(ofm_obj, act_output);
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Conv2D.h b/compiler/enco/frontend/tflite/src/Op/Conv2D.h
new file mode 100644
index 000000000..018815bd4
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Conv2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_CONV2D_H__
+#define __OP_CONV2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Conv2D operator
+ */
+class Conv2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *context) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_CONV2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp
new file mode 100644
index 000000000..e3d7b263e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthwiseConv2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+
+#include <coco/IR/Module.h>
+#include <coco/IR/KernelLayouts.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool DepthwiseConv2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_DepthwiseConv2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void DepthwiseConv2DGraphBuilder::build(const tflite::Operator *op,
+ GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ // preparation
+ coco::Module *m = context->m();
+ coco::Data *d = context->d();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+ TflBufferContext &buffer_context = context->buffer();
+ const tflite::SubGraph *graph = context->graph();
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : kernel
+ // input index 2 : bias (optional)
+ bool hasBias = (opinputs.size() == 3);
+ assert(opinputs.size() == 2 || hasBias);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ker_idx = opinputs.at(1);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+ tensor::Shape &ker_shape = const_cast<tensor::Shape &>(tensor_context.shape(ker_idx));
+
+ assert(ifm_shape.rank() == 4);
+ assert(ofm_shape.rank() == 4);
+ assert(ker_shape.rank() == 4);
+
+ assert(ker_shape.dim(0) == 1); // value > 1 was not tested. This value seems 1 in DepthwiseConv2D
+ assert(ifm_shape.dim(3) == ofm_shape.dim(3));
+ assert(ofm_shape.dim(3) == ker_shape.dim(3));
+
+ // Create an input feature map object
+ auto *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an an output feature map object
+ auto *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an kernel object
+ auto *ker_obj = m->entity()->object()->create<coco::KernelObject>();
+ auto *ker_bag = bags.bag(ker_idx);
+ ker_obj->bag(ker_bag);
+
+ // Adjust tflite kernel shape [1, h, w, channel_out] for coco::Kernel.
+ // coco::Kernel will have kernel.count = channel_out, kernel.depth = 1 ( == ker_shape.dim(0))
+ kernel::Shape new_shape{ker_shape.dim(3), 1, ker_shape.dim(1), ker_shape.dim(2)};
+ ker_obj->layout(coco::KernelLayouts::NHWC::create(new_shape));
+
+ // Create a kernel overlay for the kernel object
+ // TODO : support for other types
+ d->f32()->allocate(ker_bag);
+
+ TflBufferContext::TflBuffer<float> buffer = buffer_context.tensor_buffer<float>(graph, ker_idx);
+
+ auto ker_spn = d->f32()->weight(ker_bag);
+
+ // Copy data from tflBuffer of [1, h, w, channel_out] shape to coco::Data, which will be accessed
+ // by coco::KernelLayouts::NHWC
+ for (auto n = 0; n < new_shape.count(); n++)
+ {
+ auto tfl_c = n;
+ for (auto h = 0; h < new_shape.height(); h++)
+ {
+ for (auto w = 0; w < new_shape.width(); w++)
+ {
+ auto hw = new_shape.height() * new_shape.width();
+ for (auto c = 0; c < new_shape.depth(); c++)
+ {
+ auto tfl_n = c;
+ auto hwc = hw * new_shape.depth();
+ auto wc = new_shape.width() * new_shape.depth();
+
+ ker_spn[n * hwc + h * wc + w * new_shape.depth() + c] =
+ buffer.ptr[tfl_n * hw * new_shape.count() + /* new_shape.count() is old c */
+ h * new_shape.width() * new_shape.count() + w * new_shape.count() + tfl_c];
+ }
+ }
+ }
+ }
+
+ // Create a Load op
+ auto load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a coco::Conv2D op for DepthwiseConv2D
+ auto coco_dconv2d = m->entity()->op()->create<coco::Conv2D>();
+
+ // populating objects and options such as stride and padding for DepthwiseConv2D
+ coco_dconv2d->ker(ker_obj);
+
+ // setting params passed from TFLITE DepthwiseConv2DOptions
+ auto dconv_params = op->builtin_options_as_DepthwiseConv2DOptions();
+
+ assert(dconv_params->depth_multiplier() == 1); // other depth_multiplier was not tested
+
+ coco_dconv2d->group(ifm_obj->asFeature()->shape().depth());
+
+ coco_dconv2d->stride()->vertical(dconv_params->stride_h());
+ coco_dconv2d->stride()->horizontal(dconv_params->stride_w());
+
+ coco::Padding2D padding = depthwiseConv2D_padding(dconv_params, ifm_shape, ker_shape);
+ coco_dconv2d->pad()->top(padding.top());
+ coco_dconv2d->pad()->bottom(padding.bottom());
+ coco_dconv2d->pad()->left(padding.left());
+ coco_dconv2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_dconv2d->arg(load);
+
+ // Object to store output for DepthwiseConv2D
+ auto *dconv2d_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *dconv2d_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ dconv2d_obj->bag(dconv2d_bag);
+ dconv2d_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create an Eval instruction for DepthwiseConv2D
+ auto dconv2d_ins = instr_builder(m).eval(dconv2d_obj, coco_dconv2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(dconv2d_ins);
+
+ // Last Object to make a copy to Output Object
+ coco::FeatureObject *last_obj = dconv2d_obj;
+
+ if (hasBias)
+ {
+ // When there is a bias, use btmp_obj as bias add output
+ // Bias is adding last_obj with bias weight values
+ auto *btmp_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *btmp_bag = m->entity()->bag()->create(num_elements(ofm_shape));
+ btmp_obj->bag(btmp_bag);
+ btmp_obj->layout(coco::FeatureLayouts::BHWC::create(ofm_obj->shape()));
+
+ int bias_idx = opinputs.at(2);
+
+ // Create an object for bias
+ auto bias_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *bias_bag = bags.bag(bias_idx);
+ bias_obj->bag(bias_bag);
+ bias_obj->layout(coco::FeatureLayouts::BC::create(ofm_obj->shape()));
+
+ // Create Op of conv2d output (last_obj) + bias values(bias_obj)
+ auto bias_add = op_builder(m).load(last_obj).load(bias_obj).add().pop();
+
+ // Create Instr as bias add result write to btmp_obj
+ auto bias_add_ins = instr_builder(m).eval(btmp_obj, bias_add);
+
+ // Append the instruction
+ blk->instr()->append(bias_add_ins);
+
+ // Update last_obj to btmp_obj
+ last_obj = btmp_obj;
+ }
+
+ // fused activation
+ coco::FeatureObject *act_output =
+ build_activation(dconv_params->fused_activation_function(), blk, last_obj);
+
+ // Create Copy Instr of last_obj to Output Object
+ auto copy_ins = instr_builder(m).copy(ofm_obj, act_output);
+ blk->instr()->append(copy_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h
new file mode 100644
index 000000000..b36b36b8f
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/DepthwiseConv2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_DEPTHWISECONV2D_H__
+#define __OP_DEPTHWISECONV2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for DepthwiseConv2D operator
+ */
+class DepthwiseConv2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *context) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_DEPTHWISECONV2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Div.cpp b/compiler/enco/frontend/tflite/src/Op/Div.cpp
new file mode 100644
index 000000000..6b71be2e6
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Div.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Div.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void DivGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr);
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : numerator
+ // input index 1 : denominator
+ // output index 0 : result
+ assert(opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ tflite::ActivationFunctionType activation;
+ if (auto *options = op->builtin_options_as_DivOptions())
+ {
+ activation = options->fused_activation_function();
+ }
+ else
+ {
+ activation = tflite::ActivationFunctionType_NONE;
+ }
+
+ // TODO activation, e.g. ReLU
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ auto num_idx = opinputs.at(0);
+ auto denom_idx = opinputs.at(1);
+ auto out_idx = opoutputs.at(0);
+
+ const tensor::Shape &num_shape = tensor_context.shape(num_idx);
+ const tensor::Shape &denom_shape = tensor_context.shape(denom_idx);
+ const tensor::Shape &out_shape = tensor_context.shape(out_idx);
+
+ // TODO Now input/output assumes Feature map, but Div should support generic object type
+ // Create an object for an input
+ auto *num_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *num_bag = bags.bag(num_idx);
+ num_obj->bag(num_bag);
+ num_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(num_shape)));
+
+ auto *denom_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *denom_bag = bags.bag(denom_idx);
+ denom_obj->bag(denom_bag);
+ denom_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(denom_shape)));
+
+ // Create an object for an output
+ auto *out_obj = m->entity()->object()->create<coco::FeatureObject>();
+ auto *out_bag = bags.bag(out_idx);
+ out_obj->bag(out_bag);
+ out_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(out_shape)));
+
+ // Create a Load ops for each input
+ auto coco_load_num = op_builder(m).load(num_obj).pop();
+ auto coco_load_denom = op_builder(m).load(denom_obj).pop();
+
+ // Create a Div op
+ auto coco_div = m->entity()->op()->create<coco::Div>();
+
+ // Link ops
+ coco_div->left(coco_load_num);
+ coco_div->right(coco_load_denom);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(out_obj, coco_div);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Div.h b/compiler/enco/frontend/tflite/src/Op/Div.h
new file mode 100644
index 000000000..053d1a441
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Div.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_DIV_H__
+#define __OP_DIV_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Div operator
+ */
+class DivGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_DIV_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp
new file mode 100644
index 000000000..ee4406425
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPool2D.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Padding.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+bool MaxPool2DGraphBuilder::validate(const tflite::Operator *op) const
+{
+ auto const options = op->builtin_options_as_Pool2DOptions();
+
+ if ((options->stride_h() == 0) || (options->stride_w() == 0))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void MaxPool2DGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ coco::Op *coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a MaxPool2D
+ coco::MaxPool2D *coco_maxpool2d = m->entity()->op()->create<coco::MaxPool2D>();
+ const tflite::Pool2DOptions *params = op->builtin_options_as_Pool2DOptions();
+
+ coco_maxpool2d->window()->height(params->filter_height());
+ coco_maxpool2d->window()->width(params->filter_width());
+
+ coco_maxpool2d->stride()->vertical(params->stride_h());
+ coco_maxpool2d->stride()->horizontal(params->stride_w());
+
+ coco::Padding2D padding =
+ pool2D_padding(params, ifm_shape, params->filter_width(), params->filter_height());
+
+ coco_maxpool2d->pad()->top(padding.top());
+ coco_maxpool2d->pad()->bottom(padding.bottom());
+ coco_maxpool2d->pad()->left(padding.left());
+ coco_maxpool2d->pad()->right(padding.right());
+
+ // Link ops
+ coco_maxpool2d->arg(coco_load);
+
+ // Create an Eval instruction
+ coco::Eval *ins = instr_builder(m).eval(ofm_obj, coco_maxpool2d);
+
+ // Append the instruction to the block
+ blk->instr()->append(ins);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h
new file mode 100644
index 000000000..06a828528
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/MaxPool2D.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_MAXPOOL2D_H__
+#define __OP_MAXPOOL2D_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for AvgPool2D operator
+ */
+class MaxPool2DGraphBuilder : public GraphBuilder
+{
+public:
+ bool validate(const tflite::Operator *op) const override;
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_MAXPOOL2D_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Padding.cpp b/compiler/enco/frontend/tflite/src/Op/Padding.cpp
new file mode 100644
index 000000000..9a0e4ef41
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Padding.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Padding.h"
+
+#include "Convert.h"
+#include "TensorBags.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <map>
+#include <sstream>
+#include <algorithm>
+#include <cassert>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::Padding2D get_padding(const tensor::Shape &ifm_shape, const int kernel_w, const int kernel_h,
+ tflite::Padding padding, int stride_w, int stride_h,
+ int dilation_w_factor, int dilation_h_factor)
+{
+ assert(stride_w != 0);
+ assert(stride_h != 0);
+ assert(ifm_shape.rank() == 4);
+
+ /**
+ * Compute [top padding + bottom padding] (or [left padding + right padding]).
+ * If this returns an even number, top = return value / 2 and bottom = return value - top
+ * If this returns an odd number, top = return value / 2 and bottom = return value - top (so,
+ * bottom = top + 1)
+ *
+ * Code based on https://www.tensorflow.org/api_guides/python/nn#Convolution
+ */
+ auto compute_padding = [](tflite::Padding padding, int stride, int dilation_rate, int in_size,
+ int filter_size) {
+ int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
+ if (padding == tflite::Padding_SAME)
+ {
+ if (in_size % stride == 0)
+ return std::max(effective_filter_size - stride, 0);
+ else
+ return std::max(effective_filter_size - (in_size % stride), 0);
+ }
+ else // padding == VALID
+ {
+ return 0;
+ }
+ };
+
+ // ifm shape is from order of NHWC. ifm W = dim(2), ifm H = dim(1)
+ int padding_w = compute_padding(padding, stride_w, dilation_w_factor, ifm_shape.dim(2), kernel_w);
+ int padding_h = compute_padding(padding, stride_h, dilation_h_factor, ifm_shape.dim(1), kernel_h);
+
+ coco::Padding2D coco_padding;
+ coco_padding.top(padding_h / 2).bottom(padding_h - padding_h / 2);
+ coco_padding.left(padding_w / 2).right(padding_w - padding_w / 2);
+
+ return coco_padding;
+}
+
+coco::Padding2D pool2D_padding(const tflite::Pool2DOptions *options, const tensor::Shape &ifm_shape,
+ const int filter_w, const int filter_h)
+{
+ return get_padding(ifm_shape, filter_w, filter_h, options->padding(), options->stride_w(),
+ options->stride_h(), 1, 1);
+}
+
+coco::Padding2D conv2D_padding(const tflite::Conv2DOptions *options, const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape)
+{
+ return get_padding(ifm_shape, kernel_shape.dim(2), kernel_shape.dim(1), /* kernel layout: NHWC */
+ options->padding(), options->stride_w(), options->stride_h(),
+ options->dilation_w_factor(), options->dilation_h_factor());
+}
+
+coco::Padding2D depthwiseConv2D_padding(const tflite::DepthwiseConv2DOptions *options,
+ const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape)
+{
+ return get_padding(ifm_shape, kernel_shape.dim(2), kernel_shape.dim(1), /* kernel layout: NHWC */
+ options->padding(), options->stride_w(), options->stride_h(),
+ options->dilation_w_factor(), options->dilation_h_factor());
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Padding.h b/compiler/enco/frontend/tflite/src/Op/Padding.h
new file mode 100644
index 000000000..ac84adeb7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Padding.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_PADDING_H__
+#define __OP_PADDING_H__
+
+#include <coco/IR/Padding2D.h>
+#include <nncc/core/ADT/tensor/Shape.h>
+
+#include <schema_generated.h>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+coco::Padding2D pool2D_padding(const tflite::Pool2DOptions *options, const tensor::Shape &ifm_shape,
+ const int filter_w, const int filter_h);
+
+coco::Padding2D conv2D_padding(const tflite::Conv2DOptions *options, const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape);
+
+coco::Padding2D depthwiseConv2D_padding(const tflite::DepthwiseConv2DOptions *options,
+ const tensor::Shape &ifm_shape,
+ const tensor::Shape &kernel_shape);
+
+} // namespace tflimport
+
+#endif // __OP_PADDING_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU.cpp b/compiler/enco/frontend/tflite/src/Op/ReLU.cpp
new file mode 100644
index 000000000..4922f4d1f
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReLUGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ auto ifm_idx = opinputs.at(0);
+ auto ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a ReLU
+ auto coco_relu = m->entity()->op()->create<coco::ReLU>();
+
+ // Link ops
+ coco_relu->arg(coco_load);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(ofm_obj, coco_relu);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU.h b/compiler/enco/frontend/tflite/src/Op/ReLU.h
new file mode 100644
index 000000000..c78400d7e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RELU_H__
+#define __OP_RELU_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for ReLU operator
+ */
+class ReLUGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RELU_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp b/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp
new file mode 100644
index 000000000..936fda3e2
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU6.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU6.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReLU6GraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 1);
+ assert(opoutputs.size() == 1);
+
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ const tensor::Shape &ifm_shape = tensor_context.shape(ifm_idx);
+ const tensor::Shape &ofm_shape = tensor_context.shape(ofm_idx);
+
+ // Create an object for an input feature map
+ coco::FeatureObject *ifm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ifm_bag = bags.bag(ifm_idx);
+ ifm_obj->bag(ifm_bag);
+ ifm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ifm_shape)));
+
+ // Create an object for an output feature map
+ coco::FeatureObject *ofm_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *ofm_bag = bags.bag(ofm_idx);
+ ofm_obj->bag(ofm_bag);
+ ofm_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(ofm_shape)));
+
+ // Create a Load op
+ auto coco_load = op_builder(m).load(ifm_obj).pop();
+
+ // Create a ReLU6
+ auto coco_relu6 = m->entity()->op()->create<coco::ReLU6>();
+
+ // Link ops
+ coco_relu6->arg(coco_load);
+
+ // Create an Eval instruction
+ auto eval_ins = instr_builder(m).eval(ofm_obj, coco_relu6);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/ReLU6.h b/compiler/enco/frontend/tflite/src/Op/ReLU6.h
new file mode 100644
index 000000000..10bcd4f71
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/ReLU6.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RELU6_H__
+#define __OP_RELU6_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for ReLU6 operator
+ */
+class ReLU6GraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RELU6_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Reshape.cpp b/compiler/enco/frontend/tflite/src/Op/Reshape.cpp
new file mode 100644
index 000000000..9bd473fa9
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Reshape.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reshape.h"
+
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void ReshapeGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : input feature
+ // input index 1 : output shape (int32_t), (optional or not, is not clear)
+ // output index 0 : output feature
+ assert(opinputs.size() == 1 || opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ // Note: there are actually 3 places where we can get output shape from
+ // current TF lite implementation. From output operand shape, second input,
+ // and ReshapeOption (new_shape). Here we use output operand shape
+ int ifm_idx = opinputs.at(0);
+ int ofm_idx = opoutputs.at(0);
+
+ auto ifm_bag = bags.bag(ifm_idx);
+ auto ofm_bag = bags.bag(ofm_idx);
+
+ // TODO: move to InstrBuilder as 'shuffle_elements()'
+ // Create a 1:1 shuffle instruction from ifm into ofm
+ // Note: Reshape is change of shape information and there is no value change
+ // in the bag itself. We implement this as just make a element wise copy of
+ // the bag from input to output. So there is no need of 'reshape' operator
+ auto shuffle_ins = m->entity()->instr()->create<coco::Shuffle>();
+ auto num_elem = ifm_bag->size();
+
+ assert(num_elem == ofm_bag->size());
+
+ shuffle_ins->from(ifm_bag);
+ shuffle_ins->into(ofm_bag);
+
+ for (uint32_t n = 0; n < num_elem; ++n)
+ {
+ const auto from = coco::ElemID(n);
+ const auto into = coco::ElemID(n);
+
+ shuffle_ins->insert(from, into);
+ }
+
+ // Append the instruction
+ blk->instr()->append(shuffle_ins);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Reshape.h b/compiler/enco/frontend/tflite/src/Op/Reshape.h
new file mode 100644
index 000000000..7447b56c8
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Reshape.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_RESHAPE_H__
+#define __OP_RESHAPE_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Reshape operator
+ */
+class ReshapeGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_RESHAPE_H__
diff --git a/compiler/enco/frontend/tflite/src/Op/Sub.cpp b/compiler/enco/frontend/tflite/src/Op/Sub.cpp
new file mode 100644
index 000000000..62973bb22
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Sub.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sub.h"
+
+#include "Convert.h"
+#include "IRBuilder.h"
+#include "GraphBuilder.h"
+#include "Activation.h"
+
+#include <morph/tflite.h>
+#include <coco/IR/Module.h>
+#include <coco/IR/FeatureLayouts.h>
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <schema_generated.h>
+
+#include <cassert>
+
+using namespace nncc::core::ADT;
+using namespace morph::tflite;
+
+namespace tflimport
+{
+
+void SubGraphBuilder::build(const tflite::Operator *op, GraphBuilderContext *context) const
+{
+ assert(context != nullptr); // check if init(..) is called
+
+ coco::Module *m = context->m();
+ coco::Block *blk = context->block();
+ TensorContext &tensor_context = context->tensor();
+ TensorBags &bags = context->bags();
+
+ IndexVector opinputs = as_index_vector(op->inputs());
+ IndexVector opoutputs = as_index_vector(op->outputs());
+
+ // these are fixed in tflite
+ // input index 0 : left input feature
+ // input index 1 : right input feature
+ // output index 0 : output feature
+ assert(opinputs.size() == 2);
+ assert(opoutputs.size() == 1);
+
+ // Default parameter values are referenced from schema_generated.h
+ auto *params = op->builtin_options_as_SubOptions();
+ tflite::ActivationFunctionType activation = tflite::ActivationFunctionType_NONE;
+
+ if (auto *params = op->builtin_options_as_SubOptions())
+ {
+ activation = params->fused_activation_function();
+ }
+ assert(activation == tflite::ActivationFunctionType_NONE);
+
+ // Construct a vector of input objects
+ std::vector<coco::FeatureObject *> input_objects;
+
+ for (auto &input_index : opinputs)
+ {
+ // Add objects for input feature map
+ const tensor::Shape &input_shape = tensor_context.shape(input_index);
+ coco::FeatureObject *input_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *input_bag = bags.bag(input_index);
+ input_obj->bag(input_bag);
+ input_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(input_shape)));
+
+ input_objects.emplace_back(input_obj);
+ }
+
+ // Create an object for an output feature map
+ int const output_index = opoutputs.at(0);
+ const tensor::Shape &output_shape = tensor_context.shape(output_index);
+ coco::FeatureObject *output_obj = m->entity()->object()->create<coco::FeatureObject>();
+ coco::Bag *output_bag = bags.bag(output_index);
+ output_obj->bag(output_bag);
+ output_obj->layout(coco::FeatureLayouts::BHWC::create(as_feature_shape(output_shape)));
+
+ // Create Load ops
+ auto left_load = op_builder(m).load(input_objects[0]).pop();
+ auto right_load = op_builder(m).load(input_objects[1]).pop();
+
+ // Create a Sub
+ auto coco_sub = m->entity()->op()->create<coco::Sub>();
+
+ coco_sub->left(left_load);
+ coco_sub->right(right_load);
+
+ // Create an Eval instruction
+ auto eval = instr_builder(m).eval(output_obj, coco_sub);
+
+ // Append the instruction to the block
+ blk->instr()->append(eval);
+
+ // TODO activation, e.g., relu
+ assert(params->fused_activation_function() ==
+ tflite::ActivationFunctionType::ActivationFunctionType_NONE);
+}
+
+} // namespace tflimport
diff --git a/compiler/enco/frontend/tflite/src/Op/Sub.h b/compiler/enco/frontend/tflite/src/Op/Sub.h
new file mode 100644
index 000000000..580d8baa3
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/Op/Sub.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_SUB_H__
+#define __OP_SUB_H__
+
+#include "GraphBuilder.h"
+
+#include <schema_generated.h>
+
+namespace tflimport
+{
+
+/**
+ * @brief GraphBuilder for Sub operator
+ */
+class SubGraphBuilder : public GraphBuilder
+{
+public:
+ void build(const tflite::Operator *op, GraphBuilderContext *) const override;
+};
+
+} // namespace tflimport
+
+#endif // __OP_SUB_H__
diff --git a/compiler/enco/frontend/tflite/src/RawModel.h b/compiler/enco/frontend/tflite/src/RawModel.h
new file mode 100644
index 000000000..02946f1d7
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RAW_MODEL_H__
+#define __RAW_MODEL_H__
+
+#include "schema_generated.h"
+
+struct RawModel
+{
+ virtual ~RawModel() = default;
+
+ virtual const tflite::Model *model(void) const = 0;
+};
+
+#endif // __RAW_MODEL_H__
diff --git a/compiler/enco/frontend/tflite/src/RawModelLoader.cpp b/compiler/enco/frontend/tflite/src/RawModelLoader.cpp
new file mode 100644
index 000000000..5c127f37c
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModelLoader.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RawModelLoader.h"
+
+#include "cwrap/Fildes.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+namespace
+{
+
+class MemoryMappedRawModel final : public RawModel
+{
+public:
+ /**
+ * @require fd and data SHOULD be valid
+ */
+ explicit MemoryMappedRawModel(int fd, void *data, size_t size) : _fd{fd}, _data{data}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ~MemoryMappedRawModel()
+ {
+ munmap(_data, _size);
+ close(_fd);
+ }
+
+public:
+ MemoryMappedRawModel(const MemoryMappedRawModel &) = delete;
+ MemoryMappedRawModel(MemoryMappedRawModel &&) = delete;
+
+public:
+ const tflite::Model *model(void) const override { return tflite::GetModel(_data); }
+
+private:
+ int _fd = -1;
+ void *_data = nullptr;
+ size_t _size = 0;
+};
+
+} // namespace
+
+std::unique_ptr<RawModel> load_from(const std::string &path)
+{
+ cwrap::Fildes fildes{open(path.c_str(), O_RDONLY)};
+
+ if (fildes.get() == -1)
+ {
+ // Return nullptr on open failure
+ return nullptr;
+ }
+
+ struct stat st;
+ if (fstat(fildes.get(), &st) == -1)
+ {
+ // Return nullptr on fstat failure
+ return nullptr;
+ }
+
+ auto size = st.st_size;
+ auto data = mmap(nullptr, size, PROT_READ, MAP_SHARED, fildes.get(), 0);
+
+ if (data == MAP_FAILED)
+ {
+ // Return nullptr on mmap failure
+ return nullptr;
+ }
+
+ return std::unique_ptr<RawModel>{new MemoryMappedRawModel(fildes.release(), data, size)};
+}
diff --git a/compiler/enco/frontend/tflite/src/RawModelLoader.h b/compiler/enco/frontend/tflite/src/RawModelLoader.h
new file mode 100644
index 000000000..5d93528de
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/RawModelLoader.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __RAW_MODEL_LOADER_H__
+#define __RAW_MODEL_LOADER_H__
+
+#include "RawModel.h"
+
+/**
+ * @brief Load TensorFlow Lite model (as a RawModel) from a given path
+ *
+ * @note May return a nullptr
+ */
+std::unique_ptr<RawModel> load_from(const std::string &path);
+
+#endif // __RAW_MODEL_LOADER_H__
diff --git a/compiler/enco/frontend/tflite/src/TensorBags.h b/compiler/enco/frontend/tflite/src/TensorBags.h
new file mode 100644
index 000000000..29558b85e
--- /dev/null
+++ b/compiler/enco/frontend/tflite/src/TensorBags.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TENSOR_BAGS_H__
+#define __TENSOR_BAGS_H__
+
+#include "Convert.h"
+
+#include <coco/IR/Data.h>
+#include <coco/IR/Module.h>
+
+#include <schema_generated.h>
+
+#include <map>
+
+using namespace nncc::core::ADT;
+
+namespace tflimport
+{
+
+/**
+ * @brief Pre-creates coco:Bags for each operands(tensors)
+ */
+class TensorBags
+{
+public:
+ void prepare(const tflite::SubGraph *graph, std::unique_ptr<coco::Module> &m)
+ {
+ for (uint32_t tensor_id = 0; tensor_id < graph->tensors()->size(); ++tensor_id)
+ {
+ auto const tensor_info = graph->tensors()->Get(tensor_id);
+ auto const tensor_shape = as_tensor_shape(tensor_info->shape());
+ auto const tensor_bag = m->entity()->bag()->create(num_elements(tensor_shape));
+
+ _bag_ctx[tensor_id] = tensor_bag;
+ }
+ }
+
+ coco::Bag *bag(int32_t tensor_id) { return _bag_ctx[tensor_id]; }
+
+public:
+ std::map<uint32_t, coco::Bag *>::iterator begin() { return _bag_ctx.begin(); }
+
+ std::map<uint32_t, coco::Bag *>::iterator end() { return _bag_ctx.end(); }
+
+private:
+ std::map<uint32_t, coco::Bag *> _bag_ctx;
+};
+
+} // namespace tflimport
+
+#endif // __TENSOR_BAGS_H__