summaryrefslogtreecommitdiff
path: root/model-optimizer/extensions/front/caffe
diff options
context:
space:
mode:
Diffstat (limited to 'model-optimizer/extensions/front/caffe')
-rw-r--r--model-optimizer/extensions/front/caffe/axpy.py43
-rw-r--r--model-optimizer/extensions/front/caffe/bn.py60
-rw-r--r--model-optimizer/extensions/front/caffe/detection_output.py13
-rw-r--r--model-optimizer/extensions/front/caffe/flatten_ext.py36
-rw-r--r--model-optimizer/extensions/front/caffe/interp_ext.py3
-rw-r--r--model-optimizer/extensions/front/caffe/pooling_ext.py5
-rw-r--r--model-optimizer/extensions/front/caffe/priorbox_ext.py2
-rw-r--r--model-optimizer/extensions/front/caffe/shufflechannel_ext.py33
-rw-r--r--model-optimizer/extensions/front/caffe/softmax_ext.py36
-rw-r--r--model-optimizer/extensions/front/caffe/split_to_identity.py38
10 files changed, 263 insertions, 6 deletions
diff --git a/model-optimizer/extensions/front/caffe/axpy.py b/model-optimizer/extensions/front/caffe/axpy.py
new file mode 100644
index 000000000..e5f575982
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/axpy.py
@@ -0,0 +1,43 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import networkx as nx
+
+from mo.front.common.replacement import FrontReplacementOp
+from mo.graph.graph import Node
+from mo.ops.lin_op import Add
+from mo.ops.scale_shift import ScaleShiftOp
+
+
+class AxpyToEltwise(FrontReplacementOp):
+ """
+ Replaces Axpy layer with ScaleShift and Eltwise.
+ """
+ op = "Axpy"
+ enabled = True
+
+ def replace_op(self, graph: nx.MultiDiGraph, node: Node):
+ in_node_0 = node.in_node(0)
+ in_node_1 = node.in_node(1)
+ in_node_2 = node.in_node(2)
+
+ ss = ScaleShiftOp(graph, {'name': node.id + "/ScaleShift_", 'axis': 0})
+ scale_shift = ss.create_node(inputs=[in_node_1, in_node_0])
+
+ el = Add(graph, {'name': node.id + "/Add_"})
+ el_node = el.create_node(inputs=[scale_shift, in_node_2])
+
+ return [el_node.id]
diff --git a/model-optimizer/extensions/front/caffe/bn.py b/model-optimizer/extensions/front/caffe/bn.py
new file mode 100644
index 000000000..06ad48629
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/bn.py
@@ -0,0 +1,60 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import networkx as nx
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.common.replacement import FrontReplacementOp
+from mo.graph.graph import Node
+from mo.ops.scale_shift import ScaleShiftOp
+from mo.utils.error import Error
+
+
+class BNToScaleShift(FrontReplacementOp):
+ """
+ Replaces BN layer with ScaleShift.
+ """
+ op = "BN"
+ enabled = True
+
+ def replace_op(self, graph: nx.MultiDiGraph, node: Node):
+ attrs = {'name': node.id + "/ScaleShift_"}
+
+ param = graph.node[node.id]['pb'].bn_param
+ pb_model = graph.node[node.id]['model_pb']
+ blobs = pb_model.blobs
+
+ if len(blobs) != 4:
+ raise Error("Incorrect number of blobs in BN layer {}".format(node.id))
+
+ mean = np.array(blobs[0].data)
+ var = np.array(blobs[1].data)
+ betta = np.array(blobs[2].data)
+ gamma = np.array(blobs[3].data)
+
+ gamma = gamma + np.repeat(param.eps, gamma.shape)
+
+ scale = 1.0 / np.sqrt(gamma) * mean
+ shift = var - betta * scale
+
+ embed_input(attrs, 1, 'scale', scale, 'weights')
+ embed_input(attrs, 2, 'bias', shift, 'biases')
+
+ ss = ScaleShiftOp(graph, attrs)
+ scale_shift = ss.create_node([node.in_node(0)])
+
+ return [scale_shift.id]
diff --git a/model-optimizer/extensions/front/caffe/detection_output.py b/model-optimizer/extensions/front/caffe/detection_output.py
index dd8fb96bc..296fcf316 100644
--- a/model-optimizer/extensions/front/caffe/detection_output.py
+++ b/model-optimizer/extensions/front/caffe/detection_output.py
@@ -129,11 +129,18 @@ class DetectionOutputFrontExtractor(FrontExtractorOp):
'pad_mode': pad_mode,
'pad_value': ','.join(str(x) for x in param.save_output_param.resize_param.pad_value),
'interp_mode': interp_mode,
- 'input_width': param.input_width,
- 'input_height': param.input_height,
- 'normalized': int(param.normalized)
}
+ # these params can be omitted in caffe.proto and in param as consequence,
+ # so check if it is set or set to default
+ fields = [field[0].name for field in param.ListFields()]
+ if 'input_width' in fields:
+ attrs['input_width'] = param.input_width
+ if 'input_height' in fields:
+ attrs['input_height'] = param.input_height
+ if 'normalized' in fields:
+ attrs['normalized'] = int(param.normalized)
+
mapping_rule = merge_attrs(param, attrs)
# force setting infer function because it doesn't exist in proto so merge_attrs will not set it
diff --git a/model-optimizer/extensions/front/caffe/flatten_ext.py b/model-optimizer/extensions/front/caffe/flatten_ext.py
new file mode 100644
index 000000000..a68d81c40
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/flatten_ext.py
@@ -0,0 +1,36 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.extractor import FrontExtractorOp
+from mo.ops.flatten import Flatten
+
+
+class FlattenFrontExtractor(FrontExtractorOp):
+ op = 'Flatten'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ proto_layer = node.pb
+ param = proto_layer.flatten_param
+
+ attrs = {
+ 'axis': param.axis,
+ 'end_axis': param.end_axis,
+ }
+
+ Flatten.update_node_stat(node, attrs)
+ return __class__.enabled
diff --git a/model-optimizer/extensions/front/caffe/interp_ext.py b/model-optimizer/extensions/front/caffe/interp_ext.py
index b1d5ba733..ae8a8da55 100644
--- a/model-optimizer/extensions/front/caffe/interp_ext.py
+++ b/model-optimizer/extensions/front/caffe/interp_ext.py
@@ -39,6 +39,9 @@ class InterpFrontExtractor(FrontExtractorOp):
mapping_rule = merge_attrs(param, update_attrs)
+ # in Caffe can be 2 inputs, shape should be got from shape of the second input
+ mapping_rule['parse_2nd_input'] = 'shape'
+
# update the attributes of the node
Op.get_op_class_by_name(__class__.op).update_node_stat(node, mapping_rule)
return __class__.enabled
diff --git a/model-optimizer/extensions/front/caffe/pooling_ext.py b/model-optimizer/extensions/front/caffe/pooling_ext.py
index ca584f4ca..96540a171 100644
--- a/model-optimizer/extensions/front/caffe/pooling_ext.py
+++ b/model-optimizer/extensions/front/caffe/pooling_ext.py
@@ -31,6 +31,7 @@ class PoolingFrontExtractor(FrontExtractorOp):
param = proto_layer.pooling_param
method = 'max'
+ exclude_pad = 'true'
kernel = [0, 0]
stride = [1, 1]
padding = [0, 0]
@@ -45,8 +46,10 @@ class PoolingFrontExtractor(FrontExtractorOp):
if param.pool == 0:
method = 'max'
+ exclude_pad = 'true'
elif param.pool == 1:
method = 'avg'
+ exclude_pad = 'false'
else:
raise ValueError('Unknown Pooling Method!')
@@ -64,7 +67,7 @@ class PoolingFrontExtractor(FrontExtractorOp):
'pad': np.array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pad_spatial_shape': np.array([[padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pool_method': method,
- 'exclude_pad': 'false',
+ 'exclude_pad': exclude_pad,
'global_pool': global_pooling,
'output_spatial_shape': None,
'rounding_type': rt
diff --git a/model-optimizer/extensions/front/caffe/priorbox_ext.py b/model-optimizer/extensions/front/caffe/priorbox_ext.py
index 644c37490..ae87dc444 100644
--- a/model-optimizer/extensions/front/caffe/priorbox_ext.py
+++ b/model-optimizer/extensions/front/caffe/priorbox_ext.py
@@ -38,8 +38,6 @@ class PriorBoxFrontExtractor(FrontExtractorOp):
'aspect_ratio': np.array(param.aspect_ratio),
'min_size': np.array(param.min_size),
'max_size': np.array(param.max_size),
- 'width': list(param.width),
- 'height': list(param.height),
'flip': int(param.flip),
'clip': int(param.clip),
'variance': list(variance),
diff --git a/model-optimizer/extensions/front/caffe/shufflechannel_ext.py b/model-optimizer/extensions/front/caffe/shufflechannel_ext.py
new file mode 100644
index 000000000..37b72219f
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/shufflechannel_ext.py
@@ -0,0 +1,33 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from mo.front.caffe.collect_attributes import collect_attributes
+from mo.front.common.extractors.utils import layout_attrs
+from mo.front.extractor import FrontExtractorOp
+from mo.ops.op import Op
+
+
+class PreluFrontExtractor(FrontExtractorOp):
+ op = 'ShuffleChannel'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ mapping_rule = collect_attributes(node.pb.shuffle_channel_param)
+ mapping_rule.update(layout_attrs())
+
+ # update the attributes of the node
+ Op.get_op_class_by_name(__class__.op).update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/extensions/front/caffe/softmax_ext.py b/model-optimizer/extensions/front/caffe/softmax_ext.py
new file mode 100644
index 000000000..6bb8d74ff
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/softmax_ext.py
@@ -0,0 +1,36 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.extractor import FrontExtractorOp
+from mo.ops.softmax import Softmax
+
+
+class SoftmaxFrontExtractor(FrontExtractorOp):
+ op = 'Softmax'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ proto_layer = node.pb
+ param = proto_layer.softmax_param
+
+ attrs = {
+ 'axis': param.axis
+ }
+
+ # update the attributes of the node
+ Softmax.update_node_stat(node, attrs)
+ return __class__.enabled
diff --git a/model-optimizer/extensions/front/caffe/split_to_identity.py b/model-optimizer/extensions/front/caffe/split_to_identity.py
new file mode 100644
index 000000000..d46c1c31d
--- /dev/null
+++ b/model-optimizer/extensions/front/caffe/split_to_identity.py
@@ -0,0 +1,38 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import networkx as nx
+
+from mo.front.common.replacement import FrontReplacementOp
+
+
+class SplitToIdentity(FrontReplacementOp):
+ """
+ The Split layer in Caffe copies input blob to a number of output layers. The Split layer in Inference Engine divides
+ the input blob into several peaces. The Caffe Split layer is redundant because Inference Engine takes care of
+ creation of the intermediate blobs if it is necessary.
+
+ The replacer changes the 'op' attribute of the node to 'Identity' and set all 'out' edge attributes to be 0. So the
+ Identity operations are removed further in the pipeline.
+ """
+ op = "Split"
+ enabled = True
+
+ def replace_sub_graph(self, graph: nx.MultiDiGraph, match: dict):
+ split_node = match['op']
+ split_node.op = 'Identity'
+ for u, v, edge_attrs in split_node.graph.out_edges(split_node.id, data=True):
+ edge_attrs['out'] = 0