summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/include/util/ShapeInference.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/core/include/util/ShapeInference.h')
-rw-r--r--runtimes/neurun/core/include/util/ShapeInference.h61
1 files changed, 61 insertions, 0 deletions
diff --git a/runtimes/neurun/core/include/util/ShapeInference.h b/runtimes/neurun/core/include/util/ShapeInference.h
new file mode 100644
index 000000000..54076199b
--- /dev/null
+++ b/runtimes/neurun/core/include/util/ShapeInference.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_SHAPE_INFERENCE_H__
+#define __NEURUN_GRAPH_SHAPE_INFERENCE_H__
+
+#include "model/operation/AvgPool2DNode.h"
+#include "model/operation/ConcatNode.h"
+#include "model/operation/MaxPool2DNode.h"
+#include "model/operation/Conv2DNode.h"
+#include "model/operation/DepthwiseConv2DNode.h"
+#include "model/Operands.h"
+#include "model/Index.h"
+#include "model/Layout.h"
+
+namespace neurun
+{
+namespace shape_inference
+{
+
+using Shapes = std::vector<model::Shape>;
+
+Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape);
+
+Shapes inferAvgPoolShape(const model::Shape &in_shape,
+ const model::operation::AvgPool2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::ConcatNode::Param &param);
+
+Shapes inferMaxPoolShape(const model::Shape &in_shape,
+ const model::operation::MaxPool2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::Conv2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape,
+ const model::operation::DepthwiseConv2DNode::Param &param,
+ model::Layout layout = model::Layout::NHWC);
+
+Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape);
+
+} // namespace shape_inference
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_SHAPE_INFERENCE_H__