summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>2019-09-17 10:35:20 +0900
committerGitHub Enterprise <noreply-CODE@samsung.com>2019-09-17 10:35:20 +0900
commit61111e53b413ae93853bc0ef60f6663a68233e2a (patch)
tree507656a46c744695066134f09d3b00ea6bf0aa47
parent019a0cd293b2c698c6ca3c1085450b6e820a43f8 (diff)
downloadnnfw-61111e53b413ae93853bc0ef60f6663a68233e2a.tar.gz
nnfw-61111e53b413ae93853bc0ef60f6663a68233e2a.tar.bz2
nnfw-61111e53b413ae93853bc0ef60f6663a68233e2a.zip
[moco-tf] Broadcast shape for FixShape (#7487)
This will introduce methods for shape broadcast in FixShapeTransform that will be used in shape inference of binary operation nodes with inputs having different shapes Signed-off-by: SaeHie Park <saehie.park@samsung.com>
-rw-r--r--compiler/moco-tf/src/Transforms/FixShapeTransform.cpp100
1 files changed, 100 insertions, 0 deletions
diff --git a/compiler/moco-tf/src/Transforms/FixShapeTransform.cpp b/compiler/moco-tf/src/Transforms/FixShapeTransform.cpp
index d7ab27124..17de7c262 100644
--- a/compiler/moco-tf/src/Transforms/FixShapeTransform.cpp
+++ b/compiler/moco-tf/src/Transforms/FixShapeTransform.cpp
@@ -156,6 +156,106 @@ loco::NodeShape as_node_shape(const ShapeInferenceData *shapedata)
}
/**
+ * @brief Create a higher-rank TensorShape following NumPy broadcasting semantics
+ *
+ * HOW TO USE:
+ *
+ * auto expanded_tensor_shape = expand(tensor_shape).to(N);
+ */
+class TensorShapeExpander
+{
+public:
+ TensorShapeExpander(const loco::TensorShape &shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ loco::TensorShape to(uint32_t output_rank)
+ {
+ auto const &input_shape = _shape;
+ uint32_t const input_rank = input_shape.rank();
+
+ assert(input_rank <= output_rank && "Cannot shrink rank");
+ uint32_t const axis_shift = output_rank - input_rank;
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(output_rank);
+ for (uint32_t axis = 0; axis < output_rank; ++axis)
+ {
+ output_shape.dim(axis) = (axis < axis_shift) ? 1 : input_shape.dim(axis - axis_shift);
+ }
+
+ return output_shape;
+ }
+
+private:
+ const loco::TensorShape _shape;
+};
+
+/**
+ * @breif Expand shape x and y to same rank by align right and filling with 1
+ */
+void expand_rank(loco::TensorShape &x, loco::TensorShape &y)
+{
+ auto x_rank = x.rank();
+ auto y_rank = y.rank();
+
+ if (x_rank == y_rank)
+ return;
+
+ TensorShapeExpander x_exp(x);
+ TensorShapeExpander y_exp(y);
+
+ auto xy_rank = std::max(x_rank, y_rank);
+
+ x = x_rank > y_rank ? x : x_exp.to(xy_rank);
+ y = y_rank > x_rank ? y : y_exp.to(xy_rank);
+}
+
+/**
+ * @breif Returns shape of expanded dimension of input x and y having same rank
+ */
+loco::TensorShape expand_dimension(const loco::TensorShape &x, const loco::TensorShape &y)
+{
+ assert(x.rank() == y.rank());
+
+ auto rank = x.rank();
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(rank);
+ for (auto axis = 0; axis < rank; ++axis)
+ {
+ assert(x.dim(axis).known() && y.dim(axis).known());
+
+ auto x_dim = x.dim(axis).value();
+ auto y_dim = y.dim(axis).value();
+
+ // each dimension of x and y should be same or one must be 1 if different
+ if (!((x_dim == y_dim) || (x_dim == 1 || y_dim == 1)))
+ throw std::runtime_error("Cannot produce expand_dimension of two shapes");
+
+ output_shape.dim(axis) = std::max(x_dim, y_dim);
+ }
+
+ return output_shape;
+}
+
+loco::TensorShape broadcast_shape(const loco::TensorShape &x, const loco::TensorShape &y)
+{
+ auto x_match = x;
+ auto y_match = y;
+
+ expand_rank(x_match, y_match);
+
+ auto output_shape = expand_dimension(x_match, y_match);
+
+ return output_shape;
+}
+
+/**
* @brief Copy ShapeInferenceData from loco::Node pointer src to dst
*/
bool copy_shapedata(const loco::Node *src, loco::Node *dst)