summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
author박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>2019-07-04 12:59:10 +0900
committerGitHub Enterprise <noreply-CODE@samsung.com>2019-07-04 12:59:10 +0900
commitcd904652bbd740139e2cc8fce3836e24c385ae90 (patch)
tree9b61499ccd211bbde73cc380054d7c5156938aed /contrib
parent9d6f1f8399ada5559509cc7fa4daeb76dd481cfb (diff)
downloadnnfw-cd904652bbd740139e2cc8fce3836e24c385ae90.tar.gz
nnfw-cd904652bbd740139e2cc8fce3836e24c385ae90.tar.bz2
nnfw-cd904652bbd740139e2cc8fce3836e24c385ae90.zip
[moco/tf] Introduce TFConv2D IR (#4068)
* [moco/tf] Introduce TFConv2D IR This will introduce TFConv2D TensorFlow dialect for Conv2D and related changes Signed-off-by: SaeHie Park <saehie.park@samsung.com> * use reference * as type
Diffstat (limited to 'contrib')
-rw-r--r--contrib/moco-tf/src/Dialect/TFNodes.h1
-rw-r--r--contrib/moco-tf/src/Dialect/TFNodes.lst1
-rw-r--r--contrib/moco-tf/src/IR/TFConv2D.h67
-rw-r--r--contrib/moco-tf/src/IR/TFConv2D.test.cpp35
-rw-r--r--contrib/moco-tf/src/Transforms/FixPaddingTransform.cpp6
-rw-r--r--contrib/moco-tf/src/Transforms/FixShapeTransform.cpp98
6 files changed, 208 insertions, 0 deletions
diff --git a/contrib/moco-tf/src/Dialect/TFNodes.h b/contrib/moco-tf/src/Dialect/TFNodes.h
index 7a7fd62f2..9ca83485d 100644
--- a/contrib/moco-tf/src/Dialect/TFNodes.h
+++ b/contrib/moco-tf/src/Dialect/TFNodes.h
@@ -18,6 +18,7 @@
#define __MOCO_TF_DIALECT_TFNODES_H__
#include "IR/TFAdd.h"
+#include "IR/TFConv2D.h"
#include "IR/TFFusedBatchNorm.h"
#include "IR/TFMul.h"
diff --git a/contrib/moco-tf/src/Dialect/TFNodes.lst b/contrib/moco-tf/src/Dialect/TFNodes.lst
index fcd00bcbd..3821ba9ce 100644
--- a/contrib/moco-tf/src/Dialect/TFNodes.lst
+++ b/contrib/moco-tf/src/Dialect/TFNodes.lst
@@ -8,5 +8,6 @@
// TENSORFLOW_NODE(OPCODE, CLASS)
TENSORFLOW_NODE(Add, TFAdd)
+TENSORFLOW_NODE(Conv2D, TFConv2D)
TENSORFLOW_NODE(FusedBatchNorm, TFFusedBatchNorm)
TENSORFLOW_NODE(Mul, TFMul)
diff --git a/contrib/moco-tf/src/IR/TFConv2D.h b/contrib/moco-tf/src/IR/TFConv2D.h
new file mode 100644
index 000000000..7fb6e3d96
--- /dev/null
+++ b/contrib/moco-tf/src/IR/TFConv2D.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOCO_TF_IR_TFCONV2D_H__
+#define __MOCO_TF_IR_TFCONV2D_H__
+
+#include "Dialect/TFNodeDecl.h"
+
+#include "Convert.h"
+
+#include <loco/IR/Stride.h>
+#include <loco/IR/Pad.h>
+
+#include <string>
+
+namespace moco
+{
+namespace tf
+{
+
+/// @note These may be introduced as separate class
+using TFPadding = std::string;
+using TFDataLayout = std::string;
+
+class TFConv2D final : public loco::FixedArityNode<2, TFNodeImpl<TFOpcode::Conv2D>>
+{
+public:
+ loco::Node *ifm(void) const { return at(0)->node(); }
+ void ifm(Node *node) { at(0)->node(node); }
+
+ loco::Node *ker(void) const { return at(1)->node(); }
+ void ker(Node *node) { at(1)->node(node); }
+
+public:
+ const TFPadding &padding(void) const { return _padding; }
+ void padding(const TFPadding &padding) { _padding = padding; }
+
+ const TFDataLayout &data_layout(void) const { return _data_layout; }
+ void data_layout(const TFDataLayout &data_layout) { _data_layout = data_layout; }
+
+ const std::vector<int64_t> &strides(void) const { return _strides; }
+ void strides(const std::vector<int64_t> &strides) { _strides = strides; }
+
+private:
+ TFPadding _padding;
+ TFDataLayout _data_layout;
+ std::vector<int64_t> _strides;
+ // TODO Support "Dilation"
+};
+
+} // namespace tf
+} // namespace moco
+
+#endif // __MOCO_TF_IR_TFCONV2D_H__
diff --git a/contrib/moco-tf/src/IR/TFConv2D.test.cpp b/contrib/moco-tf/src/IR/TFConv2D.test.cpp
new file mode 100644
index 000000000..3ea0a8586
--- /dev/null
+++ b/contrib/moco-tf/src/IR/TFConv2D.test.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IR/TFConv2D.h"
+
+#include "Dialect/TFDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(TFConv2DTest, constructor)
+{
+ moco::tf::TFConv2D conv2d_node;
+
+ ASSERT_EQ(conv2d_node.dialect(), moco::tf::TFDialect::get());
+ ASSERT_EQ(conv2d_node.opcode(), moco::tf::TFOpcode::Conv2D);
+
+ ASSERT_EQ(conv2d_node.ifm(), nullptr);
+ ASSERT_EQ(conv2d_node.ker(), nullptr);
+ ASSERT_EQ(conv2d_node.padding(), "");
+ ASSERT_EQ(conv2d_node.data_layout(), "");
+ ASSERT_EQ(conv2d_node.strides().size(), 0);
+}
diff --git a/contrib/moco-tf/src/Transforms/FixPaddingTransform.cpp b/contrib/moco-tf/src/Transforms/FixPaddingTransform.cpp
index 8c9c358a7..d7cb62ca7 100644
--- a/contrib/moco-tf/src/Transforms/FixPaddingTransform.cpp
+++ b/contrib/moco-tf/src/Transforms/FixPaddingTransform.cpp
@@ -318,6 +318,12 @@ bool fix_padding(moco::tf::TFAdd *node)
return false;
}
+bool fix_padding(moco::tf::TFConv2D *node)
+{
+ // Nothing to do with padding
+ return false;
+}
+
bool fix_padding(moco::tf::TFFusedBatchNorm *node)
{
// Nothing to do with padding
diff --git a/contrib/moco-tf/src/Transforms/FixShapeTransform.cpp b/contrib/moco-tf/src/Transforms/FixShapeTransform.cpp
index 34cf0eb9a..356a8bbab 100644
--- a/contrib/moco-tf/src/Transforms/FixShapeTransform.cpp
+++ b/contrib/moco-tf/src/Transforms/FixShapeTransform.cpp
@@ -556,6 +556,104 @@ bool fix_shape(moco::tf::TFAdd *node)
return copy_shapedata(x, node);
}
+bool fix_shape(moco::tf::TFConv2D *node)
+{
+ LOGGER(l);
+
+ auto shapedata = node->annot<ShapeInferenceData>();
+ if (shapedata != nullptr)
+ {
+ // shape inference is already done
+ return false;
+ }
+ auto ifm = node->ifm();
+ auto ifm_shapedata = ifm->annot<ShapeInferenceData>();
+ if (ifm_shapedata == nullptr)
+ {
+ // input node shape inference is not ready
+ return false;
+ }
+
+ auto ker = node->ker();
+ auto ker_shapedata = ker->annot<ShapeInferenceData>();
+ if (ker_shapedata == nullptr)
+ {
+ return false;
+ }
+
+ auto padding = node->padding();
+
+ // TODO move this to some new Transformation...
+ auto strides = node->strides();
+ auto data_layout = as_DataLayout(node->data_layout());
+ loco::Stride<2> stride;
+ if (data_layout == DataLayout::NHWC)
+ {
+ stride.vertical(strides[1]);
+ stride.horizontal(strides[2]);
+ }
+ else if (data_layout == DataLayout::NCHW)
+ {
+ stride.vertical(strides[2]);
+ stride.horizontal(strides[3]);
+ }
+
+ auto ifm_tensor_shape = ifm_shapedata->tensor_shape(); // in NHWC
+ auto ker_tensor_shape = ker_shapedata->tensor_shape(); // in HWIO
+ assert(ifm_tensor_shape.rank() == 4);
+ assert(ker_tensor_shape.rank() == 4);
+
+ uint32_t input_height = ifm_tensor_shape.dim(1).value();
+ uint32_t input_width = ifm_tensor_shape.dim(2).value();
+ uint32_t stride_height = stride.vertical();
+ uint32_t stride_width = stride.horizontal();
+ uint32_t ker_height = ker_tensor_shape.dim(0).value();
+ uint32_t ker_width = ker_tensor_shape.dim(1).value();
+ uint32_t dilation_height = 1; // TODO Consider dilation
+ uint32_t dilation_width = 1;
+ uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
+ uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
+ uint32_t output_height;
+ uint32_t output_width;
+
+ if (padding == "VALID")
+ {
+ output_height = (input_height + stride_height - effective_ker_height) / stride_height;
+ output_width = (input_width + stride_width - effective_ker_width) / stride_width;
+ }
+ else if (padding == "SAME")
+ {
+ output_height = (input_height + stride_height - 1) / stride_height;
+ output_width = (input_width + stride_width - 1) / stride_width;
+ }
+ else
+ {
+ assert(false && "Unknown padding in fix_shape for TFConv2D");
+ }
+
+ loco::TensorShape ofm_tensor_shape;
+ ofm_tensor_shape.rank(4);
+ ofm_tensor_shape.dim(0) = ifm_tensor_shape.dim(0);
+ ofm_tensor_shape.dim(1) = output_height;
+ ofm_tensor_shape.dim(2) = output_width;
+ ofm_tensor_shape.dim(3) = ker_tensor_shape.dim(3);
+
+ auto shape_data = stdex::make_unique<ShapeInferenceData>();
+ shape_data->tensor_shape(ofm_tensor_shape);
+ node->annot(std::move(shape_data));
+
+ INFO(l) << "Fix TFConv2D shape = ifm(" << ifm_tensor_shape.dim(0).value() << ","
+ << ifm_tensor_shape.dim(1).value() << "," << ifm_tensor_shape.dim(2).value() << ","
+ << ifm_tensor_shape.dim(3).value() << "), "
+ << "ker(" << ker_tensor_shape.dim(0).value() << "," << ker_tensor_shape.dim(1).value()
+ << "," << ker_tensor_shape.dim(2).value() << "," << ker_tensor_shape.dim(3).value()
+ << ") "
+ << "--> ofm(" << ofm_tensor_shape.dim(0).value() << "," << ofm_tensor_shape.dim(1).value()
+ << "," << ofm_tensor_shape.dim(2).value() << "," << ofm_tensor_shape.dim(3).value() << ")"
+ << std::endl;
+ return true;
+}
+
bool fix_shape(moco::tf::TFFusedBatchNorm *node)
{
// Output shape is same as the input