summaryrefslogtreecommitdiff
path: root/model-optimizer/mo/front
diff options
context:
space:
mode:
authorAlexey Suhov <asuhov@users.noreply.github.com>2019-01-21 21:31:31 +0300
committeropenvino-pushbot <44090433+openvino-pushbot@users.noreply.github.com>2019-01-21 21:31:31 +0300
commit9de27f16bc8b712a5b8c99d1d4b4a66c9144942d (patch)
tree01a383efe94d92b9870d513c2c5ea5d15b07010a /model-optimizer/mo/front
parentfbc7a4a710c24def8ab199926a7da90a0394b87d (diff)
downloaddldt-9de27f16bc8b712a5b8c99d1d4b4a66c9144942d.tar.gz
dldt-9de27f16bc8b712a5b8c99d1d4b4a66c9144942d.tar.bz2
dldt-9de27f16bc8b712a5b8c99d1d4b4a66c9144942d.zip
Publishing R5 content (#72)
* Publishing R5 content * Updated ade revision * updated readme * add possibility to build CPU plugin with Intel MKL package
Diffstat (limited to 'model-optimizer/mo/front')
-rw-r--r--model-optimizer/mo/front/caffe/extractor.py6
-rw-r--r--model-optimizer/mo/front/caffe/extractors/elu.py7
-rw-r--r--model-optimizer/mo/front/caffe/extractors/flatten.py35
-rw-r--r--model-optimizer/mo/front/caffe/extractors/reshape.py8
-rw-r--r--model-optimizer/mo/front/caffe/extractors/scale.py17
-rw-r--r--model-optimizer/mo/front/caffe/extractors/slice.py2
-rw-r--r--model-optimizer/mo/front/caffe/extractors/softmax.py27
-rw-r--r--model-optimizer/mo/front/caffe/loader.py40
-rw-r--r--model-optimizer/mo/front/caffe/proto/caffe_pb2.py482
-rw-r--r--model-optimizer/mo/front/caffe/proto/generate_caffe_pb2.py17
-rw-r--r--model-optimizer/mo/front/caffe/proto/mo_caffe.proto7
-rw-r--r--model-optimizer/mo/front/common/layout.py3
-rw-r--r--model-optimizer/mo/front/common/partial_infer/elemental.py6
-rw-r--r--model-optimizer/mo/front/common/partial_infer/expand_dims.py4
-rw-r--r--model-optimizer/mo/front/common/partial_infer/flatten.py43
-rw-r--r--model-optimizer/mo/front/common/partial_infer/inner_product.py23
-rw-r--r--model-optimizer/mo/front/common/partial_infer/matmul.py40
-rw-r--r--model-optimizer/mo/front/common/partial_infer/reduce.py5
-rw-r--r--model-optimizer/mo/front/common/partial_infer/reshape.py22
-rw-r--r--model-optimizer/mo/front/common/partial_infer/slice.py1
-rw-r--r--model-optimizer/mo/front/common/partial_infer/split.py127
-rw-r--r--model-optimizer/mo/front/common/partial_infer/squeeze.py60
-rw-r--r--model-optimizer/mo/front/common/partial_infer/up_sampling.py28
-rw-r--r--model-optimizer/mo/front/common/partial_infer/utils.py12
-rw-r--r--model-optimizer/mo/front/common/register_custom_ops.py1
-rw-r--r--model-optimizer/mo/front/extractor.py277
-rw-r--r--model-optimizer/mo/front/kaldi/extractor.py24
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/add_shift_ext.py38
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/memory_ext.py)20
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py29
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/affine_transform_ext.py18
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/concat_ext.py7
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/convolution_ext.py61
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/convolutional_1d_component_ext.py96
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext.py88
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/copy_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/inner_product_ext.py)25
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext.py51
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/lstm_projected_streams_ext.py68
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/max_pooling_ext.py60
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py43
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/pooling_ext.py38
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/rectified_linear_component_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/clamp_ext.py)13
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/rescale_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/scale_shift.py)18
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/reshape.py68
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/sigmoid_ext.py2
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/slice_ext.py14
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/softmax_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/eltwise_ext.py)24
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/splice_component_ext.py53
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/split.py32
-rw-r--r--model-optimizer/mo/front/kaldi/extractors/tanh_component_ext.py (renamed from model-optimizer/mo/front/kaldi/extractors/activation_ext.py)12
-rw-r--r--model-optimizer/mo/front/kaldi/loader.py544
-rw-r--r--model-optimizer/mo/front/kaldi/loader/__init__.py0
-rw-r--r--model-optimizer/mo/front/kaldi/loader/loader.py215
-rw-r--r--model-optimizer/mo/front/kaldi/loader/utils.py299
-rw-r--r--model-optimizer/mo/front/kaldi/register_custom_ops.py2
-rw-r--r--model-optimizer/mo/front/kaldi/utils.py64
-rw-r--r--model-optimizer/mo/front/mxnet/extractor.py20
-rw-r--r--model-optimizer/mo/front/mxnet/extractors/flatten.py29
-rw-r--r--model-optimizer/mo/front/mxnet/extractors/reshape.py32
-rw-r--r--model-optimizer/mo/front/mxnet/extractors/up_sampling.py28
-rw-r--r--model-optimizer/mo/front/mxnet/extractors/utils.py8
-rw-r--r--model-optimizer/mo/front/onnx/extractor.py11
-rw-r--r--model-optimizer/mo/front/tf/change_placeholder_type.py7
-rw-r--r--model-optimizer/mo/front/tf/extractor.py9
-rw-r--r--model-optimizer/mo/front/tf/extractors/softmax.py24
-rw-r--r--model-optimizer/mo/front/tf/extractors/utils.py2
-rw-r--r--model-optimizer/mo/front/tf/graph_utils.py2
-rw-r--r--model-optimizer/mo/front/tf/loader.py95
68 files changed, 2005 insertions, 1588 deletions
diff --git a/model-optimizer/mo/front/caffe/extractor.py b/model-optimizer/mo/front/caffe/extractor.py
index b90396883..72e3283f7 100644
--- a/model-optimizer/mo/front/caffe/extractor.py
+++ b/model-optimizer/mo/front/caffe/extractor.py
@@ -17,7 +17,6 @@
from mo.front.caffe.extractors.batchnorm import batch_norm_ext
from mo.front.caffe.extractors.concat import concat_ext
from mo.front.caffe.extractors.eltwise import eltwise_ext
-from mo.front.caffe.extractors.flatten import flatten_ext
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.caffe.extractors.input import global_input_ext, input_ext
from mo.front.caffe.extractors.lrn import lrn_ext
@@ -29,10 +28,9 @@ from mo.front.caffe.extractors.reshape import reshape_ext
from mo.front.caffe.extractors.roipooling import roipooling_ext
from mo.front.caffe.extractors.scale import scale_ext
from mo.front.caffe.extractors.slice import slice_ext
-from mo.front.caffe.extractors.softmax import softmax_ext
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.register_custom_ops import extension_op_extractor
-from mo.front.extractor import CaffePythonFrontExtractorOp, FrontExtractorOp
+from mo.front.extractor import CaffePythonFrontExtractorOp
from mo.graph.graph import Node
from mo.ops.op import Op
from mo.utils.error import Error
@@ -69,10 +67,8 @@ caffe_type_extractors = {
# Utility Layers
'concat': node_pb_arg(concat_ext),
'eltwise': node_pb_arg(eltwise_ext),
- 'flatten': node_pb_arg(flatten_ext),
'reshape': node_pb_arg(reshape_ext),
'slice': node_pb_arg(slice_ext),
- 'softmax': node_pb_arg(softmax_ext),
# Custom, implemented in IE, SSD-specific
'permute': node_pb_arg(permute_ext),
diff --git a/model-optimizer/mo/front/caffe/extractors/elu.py b/model-optimizer/mo/front/caffe/extractors/elu.py
index 66f3a7b48..464a77f1e 100644
--- a/model-optimizer/mo/front/caffe/extractors/elu.py
+++ b/model-optimizer/mo/front/caffe/extractors/elu.py
@@ -14,6 +14,7 @@
limitations under the License.
"""
+from mo.front.caffe.collect_attributes import collect_attributes
from mo.front.extractor import FrontExtractorOp
from mo.ops.activation import Activation
@@ -24,5 +25,9 @@ class ELUFrontExtractor(FrontExtractorOp):
@staticmethod
def extract(node):
- Activation.update_node_stat(node, {'operation': 'elu'})
+ param = node.pb.elu_param
+ attrs = collect_attributes(param)
+ attrs['operation'] = 'elu'
+
+ Activation.update_node_stat(node, attrs)
return ELUFrontExtractor.enabled
diff --git a/model-optimizer/mo/front/caffe/extractors/flatten.py b/model-optimizer/mo/front/caffe/extractors/flatten.py
deleted file mode 100644
index 2eb5f2d4a..000000000
--- a/model-optimizer/mo/front/caffe/extractors/flatten.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import numpy as np
-
-from mo.front.caffe.collect_attributes import merge_attrs
-from mo.front.common.partial_infer.flatten import flatten_infer
-
-
-def flatten_ext(pl, ml):
- param = pl.flatten_param
- update_attrs = {
- 'axis': param.axis,
- 'end_axis': param.end_axis,
- 'num_axes': 0
- }
- mapping_rule = merge_attrs(param, update_attrs)
- mapping_rule.update({
- 'type': "Flatten",
- 'infer': flatten_infer
- })
- return mapping_rule
diff --git a/model-optimizer/mo/front/caffe/extractors/reshape.py b/model-optimizer/mo/front/caffe/extractors/reshape.py
index 734ff8c03..13deb9908 100644
--- a/model-optimizer/mo/front/caffe/extractors/reshape.py
+++ b/model-optimizer/mo/front/caffe/extractors/reshape.py
@@ -13,6 +13,7 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+import logging as log
from mo.front.common.partial_infer.elemental import single_output_infer
from mo.front.common.partial_infer.reshape import tf_reshape_shape_infer
@@ -29,5 +30,12 @@ def reshape_ext(pl, ml):
'dim': list(param.shape.dim),
'infer': lambda node: single_output_infer(node, tf_reshape_shape_infer)
}
+ if attrs['axis'] != 0:
+ log.error('The operation "Reshape" has attribute "axis" with unsupported value "{}"'.format(attrs['axis']))
+ return None
+ if attrs['num_axes'] != -1:
+ log.error('The operation "Reshape" has attribute "num_axes" with unsupported value "{}"'.format(
+ attrs['num_axes']))
+ return None
return attrs
diff --git a/model-optimizer/mo/front/caffe/extractors/scale.py b/model-optimizer/mo/front/caffe/extractors/scale.py
index 8ce0bd60e..196b7d56b 100644
--- a/model-optimizer/mo/front/caffe/extractors/scale.py
+++ b/model-optimizer/mo/front/caffe/extractors/scale.py
@@ -16,7 +16,7 @@
import numpy as np
-from mo.front.caffe.extractors.utils import weights_biases
+from mo.front.caffe.extractors.utils import embed_input, weights_biases
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.utils.utils import NamedAttrsClass
@@ -26,11 +26,22 @@ def scale_ext(pl, ml):
attrs = {
'op': 'ScaleShift',
'type': 'ScaleShift',
+ 'axis': param.axis,
'infer': copy_shape_infer
}
- if ml is None:
+ if ml is None and len(pl.bottom) == 1:
# default weights and biases for scale layer if the caffemodel file doesn't contain them
ml = NamedAttrsClass({'blobs': np.array([NamedAttrsClass({'data': np.array([1])}),
NamedAttrsClass({'data': np.array([0])})])})
- attrs.update(weights_biases(param.bias_term, ml))
+ # scale with 1 input and 1 or 2 blobs
+ if ml and len(ml.blobs) != 0 and len(pl.bottom) == 1:
+ attrs.update(weights_biases(param.bias_term, ml))
+ # 2 inputs + bias
+ elif len(pl.bottom) == 2 and param.bias_term:
+ if ml is None or len(ml.blobs) == 0:
+ # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them
+ ml = NamedAttrsClass({'blobs': np.array([NamedAttrsClass({'data': np.array([0])})])})
+
+ embed_input(attrs, 1, 'biases', ml.blobs[0].data)
+
return attrs
diff --git a/model-optimizer/mo/front/caffe/extractors/slice.py b/model-optimizer/mo/front/caffe/extractors/slice.py
index c8d1f379d..953f88c34 100644
--- a/model-optimizer/mo/front/caffe/extractors/slice.py
+++ b/model-optimizer/mo/front/caffe/extractors/slice.py
@@ -30,6 +30,8 @@ def slice_ext(proto_layer, model_layer):
'slice_point': param.slice_point,
}
mapping_rule = merge_attrs(param, update_attrs)
+ if 'slice_point' not in mapping_rule:
+ mapping_rule['slice_point'] = []
mapping_rule.update({
'type': 'Slice',
'infer': caffe_slice_infer
diff --git a/model-optimizer/mo/front/caffe/extractors/softmax.py b/model-optimizer/mo/front/caffe/extractors/softmax.py
deleted file mode 100644
index 245fdce0c..000000000
--- a/model-optimizer/mo/front/caffe/extractors/softmax.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from mo.front.common.partial_infer.elemental import copy_shape_infer
-
-
-def softmax_ext(pb_layer, pb_model):
- param = pb_layer.softmax_param
-
- return {
- 'type': 'SoftMax',
- 'axis': param.axis,
- 'infer': copy_shape_infer
- }
diff --git a/model-optimizer/mo/front/caffe/loader.py b/model-optimizer/mo/front/caffe/loader.py
index 729c442ef..69f63f213 100644
--- a/model-optimizer/mo/front/caffe/loader.py
+++ b/model-optimizer/mo/front/caffe/loader.py
@@ -26,7 +26,7 @@ from google.protobuf.internal import api_implementation
from mo.front.caffe.proto import caffe_pb2
from mo.graph.graph import Node, unique_id
-from mo.utils.error import Error
+from mo.utils.error import Error, FrameworkError
from mo.utils.utils import refer_to_faq_msg
@@ -103,18 +103,40 @@ def load_caffe_proto_model(proto_path: str, model_path: [str, None] = None):
'python -m easy_install protobuf-3.5.1-py($your_python_version)-win-amd64.egg \n' \
'set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp'
print(message + '\n\n' + refer_to_faq_msg(80))
+
# Read proto layers
- proto = caffe_pb2.NetParameter()
- with open(proto_path, "r") as file:
- text_format.Merge(str(file.read()), proto)
+ try:
+ proto = caffe_pb2.NetParameter()
+ with open(proto_path, "r") as file:
+ text_format.Merge(str(file.read()), proto)
+ except Exception as e:
+ log.error('Exception message: {}\n\n'.format(e) +
+ ' Possible reasons:\n' +
+ ' 1. {} does not exist\n'.format(proto_path) +
+ ' 2. {} does not have a valid structure, for example, it was downloaded as html\n'.format(proto_path) +
+ ' 3. {} contains custom layers or attributes that are not supported\n'.format(proto_path) +
+ ' in Model Optimizer by default.\n\n' +
+ ' After you made sure that {} has a valid structure and still see this issue, then\n'.format(proto_path) +
+ ' you need to generate a python parser for caffe.proto that was used when the model\n' +
+ ' was created.\n' +
+ ' Run "python3 generate_caffe_pb2.py --input_proto ${PATH_TO_CAFFE}/src/caffe/proto/caffe.proto"' +
+ refer_to_faq_msg(1) + '\n\n', extra={'framework_error': True})
+ raise FrameworkError('Model Optimizer is not able to parse {}'.format(proto_path)) from e
# Read model layer if exists
model = None
- if model_path:
- model = caffe_pb2.NetParameter()
- with open(model_path, "rb") as infile:
- map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
- model.MergeFromString(map)
+ try:
+ if model_path:
+ model = caffe_pb2.NetParameter()
+ with open(model_path, "rb") as infile:
+ map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
+ model.MergeFromString(map)
+ except Exception as e:
+ log.error('Exception message: {}\n\n'.format(e) +
+ ' Possible reasons:\n' +
+ ' 1. {} does not exist\n'.format(model_path) +
+ ' 2. {} does not have a valid structure\n'.format(model_path), extra={'framework_error': True})
+ raise FrameworkError('Model Optimizer is not able to parse {}'.format(model_path)) from e
return proto, model
diff --git a/model-optimizer/mo/front/caffe/proto/caffe_pb2.py b/model-optimizer/mo/front/caffe/proto/caffe_pb2.py
index e79d2d554..c32fa78c4 100644
--- a/model-optimizer/mo/front/caffe/proto/caffe_pb2.py
+++ b/model-optimizer/mo/front/caffe/proto/caffe_pb2.py
@@ -19,7 +19,7 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mo_caffe.proto',
package='mo_caffe',
- serialized_pb=_b('\n\x0emo_caffe.proto\x12\x08mo_caffe\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcf\x01\n\tBlobProto\x12\"\n\x05shape\x18\x07 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"5\n\x0f\x42lobProtoVector\x12\"\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobProto\"M\n\x1e\x43osineSimilarityBatchParameter\x12\x14\n\tpos_label\x18\x01 \x01(\x01:\x01\x31\x12\x15\n\tneg_label\x18\x02 \x01(\x01:\x02-1\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"A\n\x0cLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\"0\n\x08LabelMap\x12$\n\x04item\x18\x01 \x03(\x0b\x32\x16.mo_caffe.LabelMapItem\"\x87\x01\n\x0eNormalizedBBox\x12\x0c\n\x04xmin\x18\x01 \x01(\x02\x12\x0c\n\x04ymin\x18\x02 \x01(\x02\x12\x0c\n\x04xmax\x18\x03 \x01(\x02\x12\x0c\n\x04ymax\x18\x04 \x01(\x02\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x11\n\tdifficult\x18\x06 \x01(\x08\x12\r\n\x05score\x18\x07 \x01(\x02\x12\x0c\n\x04size\x18\x08 \x01(\x02\"\xad\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x45\n\rvariance_norm\x18\x08 \x01(\x0e\x32&.mo_caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\x12\x0c\n\x04\x66ile\x18\t \x01(\t\x12\x10\n\x08\x64iag_val\x18\n \x03(\x02\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\xed\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12(\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12!\n\x05state\x18\x06 \x01(\x0b\x32\x12.mo_caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cprofile_info\x18\t \x01(\x08:\x05\x66\x61lse\x12\x18\n\x0cprofile_iter\x18\n \x01(\x05:\x02\x35\x30\x12\x1a\n\x0eprofile_warmup\x18\x0b \x01(\x05:\x02\x31\x30\x12\'\n\x05layer\x18\x64 \x03(\x0b\x32\x18.mo_caffe.LayerParameter\x12*\n\x06layers\x18\x02 \x03(\x0b\x32\x1a.mo_caffe.V1LayerParameter\"\xf4\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12)\n\tnet_param\x18\x19 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12/\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12.\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x16.mo_caffe.NetParameter\x12\'\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x12.mo_caffe.NetState\x12&\n\ntest_state\x18\x1b \x03(\x0b\x32\x12.mo_caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x17\n\x0fplateau_winsize\x18* \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12N\n\x0fsnapshot_format\x18% \x01(\x0e\x32(.mo_caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12>\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12>\n\x0bsolver_type\x18\x1e \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"\xa8\x01\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12$\n\x07history\x18\x03 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\x12\x1b\n\x0cminimum_loss\x18\x05 \x01(\x02:\x05\x31\x65+38\x12\x1a\n\x0fiter_last_event\x18\x06 \x01(\x05:\x01\x30\"Q\n\x08NetState\x12$\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"v\n\x0cNetStateRule\x12\x1e\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xad\x02\n\x1bSpatialTransformerParameter\x12\x1e\n\x0etransform_type\x18\x01 \x01(\t:\x06\x61\x66\x66ine\x12\x1e\n\x0csampler_type\x18\x02 \x01(\t:\x08\x62ilinear\x12\x10\n\x08output_H\x18\x03 \x01(\x05\x12\x10\n\x08output_W\x18\x04 \x01(\x05\x12\x1b\n\rto_compute_dU\x18\x05 \x01(\x08:\x04true\x12\x11\n\ttheta_1_1\x18\x06 \x01(\x01\x12\x11\n\ttheta_1_2\x18\x07 \x01(\x01\x12\x11\n\ttheta_1_3\x18\x08 \x01(\x01\x12\x11\n\ttheta_2_1\x18\t \x01(\x01\x12\x11\n\ttheta_2_2\x18\n \x01(\x01\x12\x11\n\ttheta_2_3\x18\x0b \x01(\x01\x12\x1b\n\x0c\x64\x65_transform\x18\x0c \x01(\x08:\x05\x66\x61lse\"(\n\x12PowerFileParameter\x12\x12\n\nshift_file\x18\x01 \x01(\t\"5\n\x0fSTLossParameter\x12\x10\n\x08output_H\x18\x01 \x02(\x05\x12\x10\n\x08output_W\x18\x02 \x02(\x05\"%\n\x10LocLossParameter\x12\x11\n\tthreshold\x18\x01 \x02(\x01\"\xa6\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\nshare_mode\x18\x02 \x01(\x0e\x32 .mo_caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xb1#\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1e\n\x05phase\x18\n \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\"\n\x05param\x18\x06 \x03(\x0b\x32\x13.mo_caffe.ParamSpec\x12\"\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12\'\n\x07include\x18\x08 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18\t \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12:\n\x0ftransform_param\x18\x64 \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18\x65 \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12\x37\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x1c.mo_caffe.BatchNormParameter\x12,\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x17.mo_caffe.BiasParameter\x12I\n\x19\x63hannel_permutation_param\x18\x92? \x01(\x0b\x32%.mo_caffe.ChannelPermutationParameter\x12/\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12,\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x17.mo_caffe.CropParameter\x12\x39\n\x11\x63tc_decoder_param\x18\x95\x01 \x01(\x0b\x32\x1d.mo_caffe.CTCDecoderParameter\x12\x33\n\x0e\x63tc_loss_param\x18\x94\x01 \x01(\x0b\x32\x1a.mo_caffe.CTCLossParameter\x12+\n\ndata_param\x18k \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18l \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18n \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12*\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x16.mo_caffe.ELUParameter\x12.\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x18.mo_caffe.EmbedParameter\x12)\n\texp_param\x18o \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x32\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x1a.mo_caffe.FlattenParameter\x12*\n\tgrn_param\x18\xd5\x01 \x01(\x0b\x32\x16.mo_caffe.GRNParameter\x12\x34\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18s \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18u \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12.\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x18.mo_caffe.InputParameter\x12*\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x16.mo_caffe.LogParameter\x12)\n\tlrn_param\x18v \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18w \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18x \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x36\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x1c.mo_caffe.ParameterParameter\x12\x31\n\rpooling_param\x18y \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12\x32\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x1a.mo_caffe.PermuteParameter\x12-\n\x0bpower_param\x18z \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12.\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x18.mo_caffe.PReLUParameter\x12\x30\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x19.mo_caffe.PythonParameter\x12\x36\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x1c.mo_caffe.RecurrentParameter\x12\x36\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x1c.mo_caffe.ReductionParameter\x12+\n\nrelu_param\x18{ \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x32\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x1a.mo_caffe.ReshapeParameter\x12\x32\n\rreverse_param\x18\x93\x01 \x01(\x0b\x32\x1a.mo_caffe.ReverseParameter\x12.\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x18.mo_caffe.ScaleParameter\x12\x31\n\rsigmoid_param\x18| \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18} \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12*\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x16.mo_caffe.SPPParameter\x12-\n\x0bslice_param\x18~ \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18\x7f \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x36\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12,\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x17.mo_caffe.TileParameter\x12\x39\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12\x38\n\x08st_param\x18\x96\x01 \x01(\x0b\x32%.mo_caffe.SpatialTransformerParameter\x12\x31\n\rst_loss_param\x18\x97\x01 \x01(\x0b\x32\x19.mo_caffe.STLossParameter\x12\x37\n\x10power_file_param\x18\x98\x01 \x01(\x0b\x32\x1c.mo_caffe.PowerFileParameter\x12\x33\n\x0eloc_loss_param\x18\x99\x01 \x01(\x0b\x32\x1a.mo_caffe.LocLossParameter\x12\x34\n\x0eproposal_param\x18\xc9\x01 \x01(\x0b\x32\x1b.mo_caffe.ProposalParameter\x12P\n\x1d\x63osine_similarity_batch_param\x18\xca\x01 \x01(\x0b\x32(.mo_caffe.CosineSimilarityBatchParameter\x12\x45\n\x0erss_loss_param\x18\xcb\x01 \x01(\x0b\x32,.mo_caffe.RandomSamplingSoftmaxLossParameter\x12\x31\n\nnorm_param\x18\xcc\x01 \x01(\x0b\x32\x1c.mo_caffe.NormalizeParameter\x12\x39\n\x11roi_warping_param\x18\xcd\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIWarpingParameter\x12=\n\x13psroi_pooling_param\x18\xcf\x01 \x01(\x0b\x32\x1f.mo_caffe.PSROIPoolingParameter\x12\x39\n\x11roi_pooling_param\x18\xd0\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIPoolingParameter\x12>\n\x14smooth_l1_loss_param\x18\xd1\x01 \x01(\x0b\x32\x1f.mo_caffe.SmoothL1LossParameter\x12\x46\n\x18\x62ox_annotator_ohem_param\x18\xd2\x01 \x01(\x0b\x32#.mo_caffe.BoxAnnotatorOHEMParameter\x12\x43\n\x16\x64\x65tection_output_param\x18\xd3\x01 \x01(\x0b\x32\".mo_caffe.DetectionOutputParameter\x12\x35\n\x0fprior_box_param\x18\xd4\x01 \x01(\x0b\x32\x1b.mo_caffe.PriorBoxParameter\x12\x39\n\x11region_yolo_param\x18\xd6\x01 \x01(\x0b\x32\x1d.mo_caffe.RegionYoloParameter\x12\x37\n\x10reorg_yolo_param\x18\xd7\x01 \x01(\x0b\x32\x1c.mo_caffe.ReorgYoloParameter\x12.\n\x0brelu6_param\x18\xd8\x01 \x01(\x0b\x32\x18.mo_caffe.ReLU6Parameter\x12\x30\n\x0cinterp_param\x18\xd9\x01 \x01(\x0b\x32\x19.mo_caffe.InterpParameter\x12<\n\x12\x61ugmentation_param\x18\xda\x01 \x01(\x0b\x32\x1f.mo_caffe.AugmentationParameter\x12:\n\x11\x63orrelation_param\x18\xdb\x01 \x01(\x0b\x32\x1e.mo_caffe.CorrelationParameter\x12\x34\n\x0eresample_param\x18\xdc\x01 \x01(\x0b\x32\x1b.mo_caffe.ResampleParameter\x12\x35\n\x0f\x66low_warp_param\x18\xdd\x01 \x01(\x0b\x32\x1b.mo_caffe.FlowWarpParameter\x12.\n\x0b\x61\x63\x63um_param\x18\xde\x01 \x01(\x0b\x32\x18.mo_caffe.AccumParameter\x12?\n\x14\x63oeff_schedule_param\x18\xdf\x01 \x01(\x0b\x32 .mo_caffe.CoeffScheduleParameter\"\x90\x01\n\x0fInterpParameter\x12\x11\n\x06height\x18\x01 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bzoom_factor\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\rshrink_factor\x18\x04 \x01(\x05:\x01\x31\x12\x12\n\x07pad_beg\x18\x05 \x01(\x05:\x01\x30\x12\x12\n\x07pad_end\x18\x06 \x01(\x05:\x01\x30\"n\n\"RandomSamplingSoftmaxLossParameter\x12 \n\x13random_sampling_num\x18\x01 \x01(\x05:\x03\x31\x30\x30\x12&\n\x16random_sampling_policy\x18\x02 \x01(\t:\x06random\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\x95\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12/\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x12\n\x03\x65ps\x18\x04 \x01(\x02:\x05\x31\x65-10\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xb4\x02\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12G\n\rnormalization\x18\x03 \x01(\x0e\x32).mo_caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x12\x1f\n\x14pre_fixed_normalizer\x18\x04 \x01(\x02:\x01\x31\x12$\n\x15weight_by_label_freqs\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63lass_weighting\x18\x06 \x03(\x02\"Q\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\r\n\tPRE_FIXED\x10\x03\x12\x08\n\x04NONE\x10\x04\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"D\n\x18\x43hannelPermutationAction\x12\x0c\n\x04\x63han\x18\x01 \x02(\r\x12\x0c\n\x04\x63opy\x18\x02 \x01(\r\x12\x0c\n\x04\x66ill\x18\x03 \x01(\x02\"\x9a\x01\n\x1b\x43hannelPermutationParameter\x12\x32\n\x06\x61\x63tion\x18\x01 \x03(\x0b\x32\".mo_caffe.ChannelPermutationAction\x12\x12\n\nnum_output\x18\x10 \x02(\r\x12\x1f\n\x10inplace_possible\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x07version\x18\x12 \x01(\x05:\x01\x30\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"J\n\x19\x42oxAnnotatorOHEMParameter\x12\x13\n\x0broi_per_img\x18\x01 \x02(\r\x12\x18\n\x0cignore_label\x18\x02 \x01(\x05:\x02-1\"`\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x85\x04\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12\x30\n\rweight_filler\x18\x07 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12>\n\x06\x65ngine\x18\x0f \x01(\x0e\x32%.mo_caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"A\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\x12\x0f\n\x07\x64imsize\x18\x03 \x03(\r\"P\n\x13\x43TCDecoderParameter\x12\x17\n\x0b\x62lank_index\x18\x01 \x01(\x05:\x02-1\x12 \n\x12\x63tc_merge_repeated\x18\x02 \x01(\x08:\x04true\"\xb2\x01\n\x10\x43TCLossParameter\x12\x17\n\x0coutput_delay\x18\x01 \x01(\x05:\x01\x30\x12\x17\n\x0b\x62lank_index\x18\x02 \x01(\x05:\x02-1\x12+\n\x1cpreprocess_collapse_repeated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12 \n\x12\x63tc_merge_repeated\x18\x04 \x01(\x08:\x04true\x12\x1d\n\x12loss_calculation_t\x18\x05 \x01(\x05:\x01\x30\"\xa7\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x34\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x1a.mo_caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"[\n\x1eNonMaximumSuppressionParameter\x12\x1a\n\rnms_threshold\x18\x01 \x01(\x02:\x03\x30.3\x12\r\n\x05top_k\x18\x02 \x01(\x05\x12\x0e\n\x03\x65ta\x18\x03 \x01(\x02:\x01\x31\"\x99\x04\n\x0fResizeParameter\x12\x0f\n\x04prob\x18\x01 \x01(\x02:\x01\x31\x12@\n\x0bresize_mode\x18\x02 \x01(\x0e\x32%.mo_caffe.ResizeParameter.Resize_mode:\x04WARP\x12\x11\n\x06height\x18\x03 \x01(\r:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\r:\x01\x30\x12\x17\n\x0cheight_scale\x18\x08 \x01(\r:\x01\x30\x12\x16\n\x0bwidth_scale\x18\t \x01(\r:\x01\x30\x12>\n\x08pad_mode\x18\x05 \x01(\x0e\x32\".mo_caffe.ResizeParameter.Pad_mode:\x08\x43ONSTANT\x12\x11\n\tpad_value\x18\x06 \x03(\x02\x12:\n\x0binterp_mode\x18\x07 \x03(\x0e\x32%.mo_caffe.ResizeParameter.Interp_mode\"G\n\x0bResize_mode\x12\x08\n\x04WARP\x10\x01\x12\x12\n\x0e\x46IT_SMALL_SIZE\x10\x02\x12\x1a\n\x16\x46IT_LARGE_SIZE_AND_PAD\x10\x03\":\n\x08Pad_mode\x12\x0c\n\x08\x43ONSTANT\x10\x01\x12\x0c\n\x08MIRRORED\x10\x02\x12\x12\n\x0eREPEAT_NEAREST\x10\x03\"I\n\x0bInterp_mode\x12\n\n\x06LINEAR\x10\x01\x12\x08\n\x04\x41REA\x10\x02\x12\x0b\n\x07NEAREST\x10\x03\x12\t\n\x05\x43UBIC\x10\x04\x12\x0c\n\x08LANCZOS4\x10\x05\"\xdb\x01\n\x13SaveOutputParameter\x12\x18\n\x10output_directory\x18\x01 \x01(\t\x12\x1a\n\x12output_name_prefix\x18\x02 \x01(\t\x12\x15\n\routput_format\x18\x03 \x01(\t\x12\x16\n\x0elabel_map_file\x18\x04 \x01(\t\x12\x16\n\x0ename_size_file\x18\x05 \x01(\t\x12\x16\n\x0enum_test_image\x18\x06 \x01(\r\x12/\n\x0cresize_param\x18\x07 \x01(\x0b\x32\x19.mo_caffe.ResizeParameter\"\x9d\x04\n\x18\x44\x65tectionOutputParameter\x12\x13\n\x0bnum_classes\x18\x01 \x01(\r\x12\x1c\n\x0eshare_location\x18\x02 \x01(\x08:\x04true\x12\x1e\n\x13\x62\x61\x63kground_label_id\x18\x03 \x01(\x05:\x01\x30\x12;\n\tnms_param\x18\x04 \x01(\x0b\x32(.mo_caffe.NonMaximumSuppressionParameter\x12\x38\n\x11save_output_param\x18\x05 \x01(\x0b\x32\x1d.mo_caffe.SaveOutputParameter\x12?\n\tcode_type\x18\x06 \x01(\x0e\x32$.mo_caffe.PriorBoxParameter.CodeType:\x06\x43ORNER\x12)\n\x1avariance_encoded_in_target\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x16\n\nkeep_top_k\x18\x07 \x01(\x05:\x02-1\x12\x1c\n\x14\x63onfidence_threshold\x18\t \x01(\x02\x12\x18\n\tvisualize\x18\n \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x13visualize_threshold\x18\x0b \x01(\x02\x12\x11\n\tsave_file\x18\x0c \x01(\t\x12\x17\n\x0binput_width\x18\r \x01(\x05:\x02-1\x12\x18\n\x0cinput_height\x18\x0e \x01(\x05:\x02-1\x12\x18\n\nnormalized\x18\x0f \x01(\x08:\x04true\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa6\x01\n\x12\x44ummyDataParameter\x12.\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x19.mo_caffe.FillerParameter\x12\"\n\x05shape\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa8\x01\n\x10\x45ltwiseParameter\x12<\n\toperation\x18\x01 \x01(\x0e\x32$.mo_caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xb2\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"a\n\x12HingeLossParameter\x12\x33\n\x04norm\x18\x01 \x01(\x0e\x32!.mo_caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xd1\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"4\n\x0eInputParameter\x12\"\n\x05shape\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xbe\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12G\n\x0bnorm_region\x18\x04 \x01(\x0e\x32!.mo_caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1f\n\x0cGRNParameter\x12\x0f\n\x04\x62ias\x18\x01 \x01(\x02:\x01\x31\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"8\n\x12ParameterParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\"\xc1\x03\n\x10PoolingParameter\x12\x38\n\x04pool\x18\x01 \x01(\x0e\x32%.mo_caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12:\n\x06\x65ngine\x18\x0b \x01(\x0e\x32!.mo_caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x17\n\tceil_mode\x18\r \x01(\x08:\x04true\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xd4\x02\n\x11PriorBoxParameter\x12\x10\n\x08min_size\x18\x01 \x03(\x02\x12\x10\n\x08max_size\x18\x02 \x03(\x02\x12\x14\n\x0c\x61spect_ratio\x18\x03 \x03(\x02\x12\x12\n\x04\x66lip\x18\x04 \x01(\x08:\x04true\x12\x13\n\x04\x63lip\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x08variance\x18\x06 \x03(\x02\x12\x10\n\x08img_size\x18\x07 \x01(\r\x12\r\n\x05img_h\x18\x08 \x01(\r\x12\r\n\x05img_w\x18\t \x01(\r\x12\x0c\n\x04step\x18\n \x01(\x02\x12\x0e\n\x06step_h\x18\x0b \x01(\x02\x12\x0e\n\x06step_w\x18\x0c \x01(\x02\x12\x13\n\x06offset\x18\r \x01(\x02:\x03\x30.5\x12\r\n\x05width\x18\x0e \x03(\x02\x12\x0e\n\x06height\x18\x0f \x03(\x02\"8\n\x08\x43odeType\x12\n\n\x06\x43ORNER\x10\x01\x12\x0f\n\x0b\x43\x45NTER_SIZE\x10\x02\x12\x0f\n\x0b\x43ORNER_SIZE\x10\x03\"V\n\x15PSROIPoolingParameter\x12\x15\n\rspatial_scale\x18\x01 \x02(\x02\x12\x12\n\noutput_dim\x18\x02 \x02(\x05\x12\x12\n\ngroup_size\x18\x03 \x02(\x05\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc6\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12\x30\n\rweight_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xb0\x01\n\x12ReductionParameter\x12@\n\toperation\x18\x01 \x01(\x0e\x32(.mo_caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x90\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x37\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1e.mo_caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1e\n\x0eReLU6Parameter\x12\x0c\n\x01n\x18\x01 \x01(\x02:\x01\x36\"]\n\x10ReshapeParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"#\n\x10ReverseParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x30\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"]\n\x17ROIWarpingTestParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"Y\n\x13ROIWarpingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"\xab\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"{\n\x10SigmoidParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"\x8c\x01\n\x10SoftmaxParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"u\n\rTanHParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.mo_caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xf1\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x34\n\x04pool\x18\x02 \x01(\x0e\x32!.mo_caffe.SPPParameter.PoolMethod:\x03MAX\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xcc\x14\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\'\n\x07include\x18 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18! \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\x32\n\x04type\x18\x05 \x01(\x0e\x32$.mo_caffe.V1LayerParameter.LayerType\x12\"\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12\x41\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32\'.mo_caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12/\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12+\n\ndata_param\x18\x0b \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18\x0c \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18\x18 \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12)\n\texp_param\x18) \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x34\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12)\n\tlrn_param\x18\x12 \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18\" \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x31\n\rpooling_param\x18\x13 \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12-\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12+\n\nrelu_param\x18\x1e \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x31\n\rsigmoid_param\x18& \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18\' \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12-\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18% \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x35\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12\x38\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12:\n\x0ftransform_param\x18$ \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18* \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12)\n\x05layer\x18\x01 \x01(\x0b\x32\x1a.mo_caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x8c\x08\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x38\n\x04pool\x18\x0b \x01(\x0e\x32%.mo_caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\"\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x39\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"Z\n\x0ePReLUParameter\x12)\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x86\x01\n\x13RegionYoloParameter\x12\x11\n\x06\x63oords\x18\x01 \x01(\x05:\x01\x34\x12\x13\n\x07\x63lasses\x18\x02 \x01(\x05:\x02\x32\x30\x12\x0e\n\x03num\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\ndo_softmax\x18\x04 \x01(\x08:\x04true\x12\x0f\n\x07\x61nchors\x18\x05 \x03(\x02\x12\x0c\n\x04mask\x18\x06 \x03(\x05\"\'\n\x12ReorgYoloParameter\x12\x11\n\x06stride\x18\x01 \x01(\x05:\x01\x31\"\xcf\x01\n\x18RandomGeneratorParameter\x12\x1a\n\trand_type\x18\x01 \x01(\t:\x07uniform\x12\x12\n\x03\x65xp\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x0f\n\x04mean\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06spread\x18\x05 \x01(\x02:\x01\x30\x12\x0f\n\x04prob\x18\x06 \x01(\x02:\x01\x31\x12\x1c\n\x0e\x61pply_schedule\x18\x07 \x01(\x08:\x04true\x12\x19\n\ndiscretize\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nmultiplier\x18\t \x01(\x02:\x01\x31\"`\n\x16\x43oeffScheduleParameter\x12\x14\n\thalf_life\x18\x01 \x01(\x02:\x01\x31\x12\x18\n\rinitial_coeff\x18\x02 \x01(\x02:\x01\x31\x12\x16\n\x0b\x66inal_coeff\x18\x03 \x01(\x02:\x01\x31\"\xde\x07\n\x11\x41ugmentationCoeff\x12\x11\n\x06mirror\x18\x01 \x01(\x02:\x01\x30\x12\r\n\x02\x64x\x18\x02 \x01(\x02:\x01\x30\x12\r\n\x02\x64y\x18\x03 \x01(\x02:\x01\x30\x12\x10\n\x05\x61ngle\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06zoom_x\x18\x05 \x01(\x02:\x01\x31\x12\x11\n\x06zoom_y\x18\x06 \x01(\x02:\x01\x31\x12\x10\n\x05gamma\x18\x64 \x01(\x02:\x01\x31\x12\x15\n\nbrightness\x18\x65 \x01(\x02:\x01\x30\x12\x13\n\x08\x63ontrast\x18\x66 \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor1\x18g \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor2\x18h \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor3\x18i \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean0\x18\n \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean1\x18\x0b \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean2\x18\x0c \x01(\x02:\x01\x31\x12\x16\n\x0b\x61\x64\x64_nomean0\x18\r \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean1\x18\x0e \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean2\x18\x0f \x01(\x02:\x01\x30\x12\x17\n\x0cmult_nomean0\x18\x10 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean1\x18\x11 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean2\x18\x12 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean0\x18\x13 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean1\x18\x14 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean2\x18\x15 \x01(\x02:\x01\x31\x12\x18\n\radd_withmean0\x18\x16 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean1\x18\x17 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean2\x18\x18 \x01(\x02:\x01\x30\x12\x19\n\x0emult_withmean0\x18\x19 \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean1\x18\x1a \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean2\x18\x1b \x01(\x02:\x01\x31\x12\x14\n\tlmult_pow\x18\x1c \x01(\x02:\x01\x31\x12\x14\n\tlmult_add\x18\x1d \x01(\x02:\x01\x30\x12\x15\n\nlmult_mult\x18\x1e \x01(\x02:\x01\x31\x12\x14\n\tcol_angle\x18\x1f \x01(\x02:\x01\x30\x12\x15\n\nfog_amount\x18& \x01(\x02:\x01\x30\x12\x13\n\x08\x66og_size\x18\' \x01(\x02:\x01\x30\x12\x1c\n\x11motion_blur_angle\x18( \x01(\x02:\x01\x30\x12\x1b\n\x10motion_blur_size\x18) \x01(\x02:\x01\x30\x12\x17\n\x0cshadow_angle\x18* \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_distance\x18+ \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_strength\x18, \x01(\x02:\x01\x30\x12\x10\n\x05noise\x18- \x01(\x02:\x01\x30\"\xcc\x10\n\x15\x41ugmentationParameter\x12\x15\n\ncrop_width\x18! \x01(\r:\x01\x30\x12\x16\n\x0b\x63rop_height\x18\" \x01(\r:\x01\x30\x12\x19\n\x0fwrite_augmented\x18\x02 \x01(\t:\x00\x12\x1b\n\x0emax_multiplier\x18\x03 \x01(\x02:\x03\x32\x35\x35\x12\"\n\x13\x61ugment_during_test\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0erecompute_mean\x18\x05 \x01(\r:\x01\x30\x12\x14\n\nwrite_mean\x18\x06 \x01(\t:\x00\x12\x1c\n\x0emean_per_pixel\x18\x07 \x01(\x08:\x04true\x12\x0c\n\x04mean\x18\x12 \x03(\x02\x12\x11\n\x04mode\x18\x08 \x01(\t:\x03\x61\x64\x64\x12\x16\n\x0b\x62ottomwidth\x18P \x01(\r:\x01\x30\x12\x17\n\x0c\x62ottomheight\x18Q \x01(\r:\x01\x30\x12\x0e\n\x03num\x18R \x01(\r:\x01\x30\x12\x18\n\x10\x63hromatic_eigvec\x18S \x03(\x02\x12\x32\n\x06mirror\x18\n \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\ttranslate\x18\x0b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x32\n\x06rotate\x18\x0c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x30\n\x04zoom\x18\r \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07squeeze\x18\x0e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_x\x18\x0f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_y\x18\x10 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05gamma\x18# \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nbrightness\x18$ \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ontrast\x18% \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05\x63olor\x18& \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_pow\x18\x14 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nlmult_mult\x18\x15 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_add\x18\x16 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_pow\x18\x17 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08sat_mult\x18\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_add\x18\x19 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_pow\x18\x1a \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ol_mult\x18\x1b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_add\x18\x1c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_pow\x18\x1d \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tladd_mult\x18\x1e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_add\x18\x1f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\ncol_rotate\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nfog_amount\x18\x64 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x66og_size\x18\x65 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12=\n\x11motion_blur_angle\x18\x66 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12<\n\x10motion_blur_size\x18g \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x38\n\x0cshadow_angle\x18h \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_distance\x18i \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_strength\x18j \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05noise\x18k \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\"\x85\x01\n\x11\x46lowWarpParameter\x12\x43\n\nfill_value\x18\x01 \x01(\x0e\x32).mo_caffe.FlowWarpParameter.FillParameter:\x04ZERO\"+\n\rFillParameter\x12\x08\n\x04ZERO\x10\x01\x12\x10\n\x0cNOT_A_NUMBER\x10\x02\"\xb6\x02\n\x14\x43orrelationParameter\x12\x0e\n\x03pad\x18\x02 \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x03 \x01(\r\x12\x18\n\x10max_displacement\x18\x04 \x01(\r\x12\x13\n\x08stride_1\x18\x05 \x01(\r:\x01\x31\x12\x13\n\x08stride_2\x18\x06 \x01(\r:\x01\x31\x12\x1b\n\x10single_direction\x18\x08 \x01(\x05:\x01\x30\x12\x15\n\x06\x64o_abs\x18\x07 \x01(\x08:\x05\x66\x61lse\x12R\n\x10\x63orrelation_type\x18\x0f \x01(\x0e\x32..mo_caffe.CorrelationParameter.CorrelationType:\x08MULTIPLY\"-\n\x0f\x43orrelationType\x12\x0c\n\x08MULTIPLY\x10\x00\x12\x0c\n\x08SUBTRACT\x10\x01\"\xdc\x01\n\x11ResampleParameter\x12\x17\n\tantialias\x18\x04 \x01(\x08:\x04true\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\x12>\n\x04type\x18\x03 \x01(\x0e\x32(.mo_caffe.ResampleParameter.ResampleType:\x06LINEAR\x12\x11\n\x06\x66\x61\x63tor\x18\x05 \x01(\x02:\x01\x31\"<\n\x0cResampleType\x12\x0b\n\x07NEAREST\x10\x01\x12\n\n\x06LINEAR\x10\x02\x12\t\n\x05\x43UBIC\x10\x03\x12\x08\n\x04\x41REA\x10\x04\"z\n\x0e\x41\x63\x63umParameter\x12\x15\n\ntop_height\x18\x01 \x01(\r:\x01\x30\x12\x14\n\ttop_width\x18\x02 \x01(\r:\x01\x30\x12\x1c\n\x11size_divisible_by\x18\x03 \x01(\r:\x01\x30\x12\x1d\n\x0ehave_reference\x18\x04 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
+ serialized_pb=_b('\n\x0emo_caffe.proto\x12\x08mo_caffe\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcf\x01\n\tBlobProto\x12\"\n\x05shape\x18\x07 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"5\n\x0f\x42lobProtoVector\x12\"\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobProto\"M\n\x1e\x43osineSimilarityBatchParameter\x12\x14\n\tpos_label\x18\x01 \x01(\x01:\x01\x31\x12\x15\n\tneg_label\x18\x02 \x01(\x01:\x02-1\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"A\n\x0cLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\"0\n\x08LabelMap\x12$\n\x04item\x18\x01 \x03(\x0b\x32\x16.mo_caffe.LabelMapItem\"\x87\x01\n\x0eNormalizedBBox\x12\x0c\n\x04xmin\x18\x01 \x01(\x02\x12\x0c\n\x04ymin\x18\x02 \x01(\x02\x12\x0c\n\x04xmax\x18\x03 \x01(\x02\x12\x0c\n\x04ymax\x18\x04 \x01(\x02\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x11\n\tdifficult\x18\x06 \x01(\x08\x12\r\n\x05score\x18\x07 \x01(\x02\x12\x0c\n\x04size\x18\x08 \x01(\x02\"\xad\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x45\n\rvariance_norm\x18\x08 \x01(\x0e\x32&.mo_caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\x12\x0c\n\x04\x66ile\x18\t \x01(\t\x12\x10\n\x08\x64iag_val\x18\n \x03(\x02\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\xed\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12(\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12!\n\x05state\x18\x06 \x01(\x0b\x32\x12.mo_caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cprofile_info\x18\t \x01(\x08:\x05\x66\x61lse\x12\x18\n\x0cprofile_iter\x18\n \x01(\x05:\x02\x35\x30\x12\x1a\n\x0eprofile_warmup\x18\x0b \x01(\x05:\x02\x31\x30\x12\'\n\x05layer\x18\x64 \x03(\x0b\x32\x18.mo_caffe.LayerParameter\x12*\n\x06layers\x18\x02 \x03(\x0b\x32\x1a.mo_caffe.V1LayerParameter\"\xf4\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12)\n\tnet_param\x18\x19 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12/\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12.\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x16.mo_caffe.NetParameter\x12\'\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x12.mo_caffe.NetState\x12&\n\ntest_state\x18\x1b \x03(\x0b\x32\x12.mo_caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x17\n\x0fplateau_winsize\x18* \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12N\n\x0fsnapshot_format\x18% \x01(\x0e\x32(.mo_caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12>\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12>\n\x0bsolver_type\x18\x1e \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"\xa8\x01\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12$\n\x07history\x18\x03 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\x12\x1b\n\x0cminimum_loss\x18\x05 \x01(\x02:\x05\x31\x65+38\x12\x1a\n\x0fiter_last_event\x18\x06 \x01(\x05:\x01\x30\"Q\n\x08NetState\x12$\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"v\n\x0cNetStateRule\x12\x1e\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xad\x02\n\x1bSpatialTransformerParameter\x12\x1e\n\x0etransform_type\x18\x01 \x01(\t:\x06\x61\x66\x66ine\x12\x1e\n\x0csampler_type\x18\x02 \x01(\t:\x08\x62ilinear\x12\x10\n\x08output_H\x18\x03 \x01(\x05\x12\x10\n\x08output_W\x18\x04 \x01(\x05\x12\x1b\n\rto_compute_dU\x18\x05 \x01(\x08:\x04true\x12\x11\n\ttheta_1_1\x18\x06 \x01(\x01\x12\x11\n\ttheta_1_2\x18\x07 \x01(\x01\x12\x11\n\ttheta_1_3\x18\x08 \x01(\x01\x12\x11\n\ttheta_2_1\x18\t \x01(\x01\x12\x11\n\ttheta_2_2\x18\n \x01(\x01\x12\x11\n\ttheta_2_3\x18\x0b \x01(\x01\x12\x1b\n\x0c\x64\x65_transform\x18\x0c \x01(\x08:\x05\x66\x61lse\"(\n\x12PowerFileParameter\x12\x12\n\nshift_file\x18\x01 \x01(\t\"5\n\x0fSTLossParameter\x12\x10\n\x08output_H\x18\x01 \x02(\x05\x12\x10\n\x08output_W\x18\x02 \x02(\x05\"%\n\x10LocLossParameter\x12\x11\n\tthreshold\x18\x01 \x02(\x01\"\xa6\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\nshare_mode\x18\x02 \x01(\x0e\x32 .mo_caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xf4#\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1e\n\x05phase\x18\n \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\"\n\x05param\x18\x06 \x03(\x0b\x32\x13.mo_caffe.ParamSpec\x12\"\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12\'\n\x07include\x18\x08 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18\t \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12:\n\x0ftransform_param\x18\x64 \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18\x65 \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12\x37\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x1c.mo_caffe.BatchNormParameter\x12,\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x17.mo_caffe.BiasParameter\x12I\n\x19\x63hannel_permutation_param\x18\x92? \x01(\x0b\x32%.mo_caffe.ChannelPermutationParameter\x12/\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12,\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x17.mo_caffe.CropParameter\x12\x39\n\x11\x63tc_decoder_param\x18\x95\x01 \x01(\x0b\x32\x1d.mo_caffe.CTCDecoderParameter\x12\x33\n\x0e\x63tc_loss_param\x18\x94\x01 \x01(\x0b\x32\x1a.mo_caffe.CTCLossParameter\x12+\n\ndata_param\x18k \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18l \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18n \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12*\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x16.mo_caffe.ELUParameter\x12.\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x18.mo_caffe.EmbedParameter\x12)\n\texp_param\x18o \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x32\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x1a.mo_caffe.FlattenParameter\x12*\n\tgrn_param\x18\xd5\x01 \x01(\x0b\x32\x16.mo_caffe.GRNParameter\x12\x34\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18s \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18u \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12.\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x18.mo_caffe.InputParameter\x12*\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x16.mo_caffe.LogParameter\x12)\n\tlrn_param\x18v \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18w \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18x \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x36\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x1c.mo_caffe.ParameterParameter\x12\x31\n\rpooling_param\x18y \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12\x32\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x1a.mo_caffe.PermuteParameter\x12-\n\x0bpower_param\x18z \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12.\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x18.mo_caffe.PReLUParameter\x12\x30\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x19.mo_caffe.PythonParameter\x12\x36\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x1c.mo_caffe.RecurrentParameter\x12\x36\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x1c.mo_caffe.ReductionParameter\x12+\n\nrelu_param\x18{ \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x32\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x1a.mo_caffe.ReshapeParameter\x12\x32\n\rreverse_param\x18\x93\x01 \x01(\x0b\x32\x1a.mo_caffe.ReverseParameter\x12.\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x18.mo_caffe.ScaleParameter\x12\x31\n\rsigmoid_param\x18| \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18} \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12*\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x16.mo_caffe.SPPParameter\x12-\n\x0bslice_param\x18~ \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18\x7f \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x36\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12,\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x17.mo_caffe.TileParameter\x12\x39\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12\x38\n\x08st_param\x18\x96\x01 \x01(\x0b\x32%.mo_caffe.SpatialTransformerParameter\x12\x31\n\rst_loss_param\x18\x97\x01 \x01(\x0b\x32\x19.mo_caffe.STLossParameter\x12\x37\n\x10power_file_param\x18\x98\x01 \x01(\x0b\x32\x1c.mo_caffe.PowerFileParameter\x12\x33\n\x0eloc_loss_param\x18\x99\x01 \x01(\x0b\x32\x1a.mo_caffe.LocLossParameter\x12\x34\n\x0eproposal_param\x18\xc9\x01 \x01(\x0b\x32\x1b.mo_caffe.ProposalParameter\x12P\n\x1d\x63osine_similarity_batch_param\x18\xca\x01 \x01(\x0b\x32(.mo_caffe.CosineSimilarityBatchParameter\x12\x45\n\x0erss_loss_param\x18\xcb\x01 \x01(\x0b\x32,.mo_caffe.RandomSamplingSoftmaxLossParameter\x12\x31\n\nnorm_param\x18\xcc\x01 \x01(\x0b\x32\x1c.mo_caffe.NormalizeParameter\x12\x39\n\x11roi_warping_param\x18\xcd\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIWarpingParameter\x12=\n\x13psroi_pooling_param\x18\xcf\x01 \x01(\x0b\x32\x1f.mo_caffe.PSROIPoolingParameter\x12\x39\n\x11roi_pooling_param\x18\xd0\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIPoolingParameter\x12>\n\x14smooth_l1_loss_param\x18\xd1\x01 \x01(\x0b\x32\x1f.mo_caffe.SmoothL1LossParameter\x12\x46\n\x18\x62ox_annotator_ohem_param\x18\xd2\x01 \x01(\x0b\x32#.mo_caffe.BoxAnnotatorOHEMParameter\x12\x43\n\x16\x64\x65tection_output_param\x18\xd3\x01 \x01(\x0b\x32\".mo_caffe.DetectionOutputParameter\x12\x35\n\x0fprior_box_param\x18\xd4\x01 \x01(\x0b\x32\x1b.mo_caffe.PriorBoxParameter\x12\x39\n\x11region_yolo_param\x18\xd6\x01 \x01(\x0b\x32\x1d.mo_caffe.RegionYoloParameter\x12\x37\n\x10reorg_yolo_param\x18\xd7\x01 \x01(\x0b\x32\x1c.mo_caffe.ReorgYoloParameter\x12.\n\x0brelu6_param\x18\xd8\x01 \x01(\x0b\x32\x18.mo_caffe.ReLU6Parameter\x12\x30\n\x0cinterp_param\x18\xd9\x01 \x01(\x0b\x32\x19.mo_caffe.InterpParameter\x12<\n\x12\x61ugmentation_param\x18\xda\x01 \x01(\x0b\x32\x1f.mo_caffe.AugmentationParameter\x12:\n\x11\x63orrelation_param\x18\xdb\x01 \x01(\x0b\x32\x1e.mo_caffe.CorrelationParameter\x12\x34\n\x0eresample_param\x18\xdc\x01 \x01(\x0b\x32\x1b.mo_caffe.ResampleParameter\x12\x35\n\x0f\x66low_warp_param\x18\xdd\x01 \x01(\x0b\x32\x1b.mo_caffe.FlowWarpParameter\x12.\n\x0b\x61\x63\x63um_param\x18\xde\x01 \x01(\x0b\x32\x18.mo_caffe.AccumParameter\x12?\n\x14\x63oeff_schedule_param\x18\xdf\x01 \x01(\x0b\x32 .mo_caffe.CoeffScheduleParameter\x12\x41\n\x15shuffle_channel_param\x18\xe0\x01 \x01(\x0b\x32!.mo_caffe.ShuffleChannelParameter\"\x90\x01\n\x0fInterpParameter\x12\x11\n\x06height\x18\x01 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bzoom_factor\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\rshrink_factor\x18\x04 \x01(\x05:\x01\x31\x12\x12\n\x07pad_beg\x18\x05 \x01(\x05:\x01\x30\x12\x12\n\x07pad_end\x18\x06 \x01(\x05:\x01\x30\"n\n\"RandomSamplingSoftmaxLossParameter\x12 \n\x13random_sampling_num\x18\x01 \x01(\x05:\x03\x31\x30\x30\x12&\n\x16random_sampling_policy\x18\x02 \x01(\t:\x06random\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\x95\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12/\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x12\n\x03\x65ps\x18\x04 \x01(\x02:\x05\x31\x65-10\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xb4\x02\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12G\n\rnormalization\x18\x03 \x01(\x0e\x32).mo_caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x12\x1f\n\x14pre_fixed_normalizer\x18\x04 \x01(\x02:\x01\x31\x12$\n\x15weight_by_label_freqs\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63lass_weighting\x18\x06 \x03(\x02\"Q\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\r\n\tPRE_FIXED\x10\x03\x12\x08\n\x04NONE\x10\x04\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"D\n\x18\x43hannelPermutationAction\x12\x0c\n\x04\x63han\x18\x01 \x02(\r\x12\x0c\n\x04\x63opy\x18\x02 \x01(\r\x12\x0c\n\x04\x66ill\x18\x03 \x01(\x02\"\x9a\x01\n\x1b\x43hannelPermutationParameter\x12\x32\n\x06\x61\x63tion\x18\x01 \x03(\x0b\x32\".mo_caffe.ChannelPermutationAction\x12\x12\n\nnum_output\x18\x10 \x02(\r\x12\x1f\n\x10inplace_possible\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x07version\x18\x12 \x01(\x05:\x01\x30\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"J\n\x19\x42oxAnnotatorOHEMParameter\x12\x13\n\x0broi_per_img\x18\x01 \x02(\r\x12\x18\n\x0cignore_label\x18\x02 \x01(\x05:\x02-1\"`\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x85\x04\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12\x30\n\rweight_filler\x18\x07 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12>\n\x06\x65ngine\x18\x0f \x01(\x0e\x32%.mo_caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"A\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\x12\x0f\n\x07\x64imsize\x18\x03 \x03(\r\"P\n\x13\x43TCDecoderParameter\x12\x17\n\x0b\x62lank_index\x18\x01 \x01(\x05:\x02-1\x12 \n\x12\x63tc_merge_repeated\x18\x02 \x01(\x08:\x04true\"\xb2\x01\n\x10\x43TCLossParameter\x12\x17\n\x0coutput_delay\x18\x01 \x01(\x05:\x01\x30\x12\x17\n\x0b\x62lank_index\x18\x02 \x01(\x05:\x02-1\x12+\n\x1cpreprocess_collapse_repeated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12 \n\x12\x63tc_merge_repeated\x18\x04 \x01(\x08:\x04true\x12\x1d\n\x12loss_calculation_t\x18\x05 \x01(\x05:\x01\x30\"\xa7\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x34\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x1a.mo_caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"[\n\x1eNonMaximumSuppressionParameter\x12\x1a\n\rnms_threshold\x18\x01 \x01(\x02:\x03\x30.3\x12\r\n\x05top_k\x18\x02 \x01(\x05\x12\x0e\n\x03\x65ta\x18\x03 \x01(\x02:\x01\x31\"\x99\x04\n\x0fResizeParameter\x12\x0f\n\x04prob\x18\x01 \x01(\x02:\x01\x31\x12@\n\x0bresize_mode\x18\x02 \x01(\x0e\x32%.mo_caffe.ResizeParameter.Resize_mode:\x04WARP\x12\x11\n\x06height\x18\x03 \x01(\r:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\r:\x01\x30\x12\x17\n\x0cheight_scale\x18\x08 \x01(\r:\x01\x30\x12\x16\n\x0bwidth_scale\x18\t \x01(\r:\x01\x30\x12>\n\x08pad_mode\x18\x05 \x01(\x0e\x32\".mo_caffe.ResizeParameter.Pad_mode:\x08\x43ONSTANT\x12\x11\n\tpad_value\x18\x06 \x03(\x02\x12:\n\x0binterp_mode\x18\x07 \x03(\x0e\x32%.mo_caffe.ResizeParameter.Interp_mode\"G\n\x0bResize_mode\x12\x08\n\x04WARP\x10\x01\x12\x12\n\x0e\x46IT_SMALL_SIZE\x10\x02\x12\x1a\n\x16\x46IT_LARGE_SIZE_AND_PAD\x10\x03\":\n\x08Pad_mode\x12\x0c\n\x08\x43ONSTANT\x10\x01\x12\x0c\n\x08MIRRORED\x10\x02\x12\x12\n\x0eREPEAT_NEAREST\x10\x03\"I\n\x0bInterp_mode\x12\n\n\x06LINEAR\x10\x01\x12\x08\n\x04\x41REA\x10\x02\x12\x0b\n\x07NEAREST\x10\x03\x12\t\n\x05\x43UBIC\x10\x04\x12\x0c\n\x08LANCZOS4\x10\x05\"\xdb\x01\n\x13SaveOutputParameter\x12\x18\n\x10output_directory\x18\x01 \x01(\t\x12\x1a\n\x12output_name_prefix\x18\x02 \x01(\t\x12\x15\n\routput_format\x18\x03 \x01(\t\x12\x16\n\x0elabel_map_file\x18\x04 \x01(\t\x12\x16\n\x0ename_size_file\x18\x05 \x01(\t\x12\x16\n\x0enum_test_image\x18\x06 \x01(\r\x12/\n\x0cresize_param\x18\x07 \x01(\x0b\x32\x19.mo_caffe.ResizeParameter\"\x9d\x04\n\x18\x44\x65tectionOutputParameter\x12\x13\n\x0bnum_classes\x18\x01 \x01(\r\x12\x1c\n\x0eshare_location\x18\x02 \x01(\x08:\x04true\x12\x1e\n\x13\x62\x61\x63kground_label_id\x18\x03 \x01(\x05:\x01\x30\x12;\n\tnms_param\x18\x04 \x01(\x0b\x32(.mo_caffe.NonMaximumSuppressionParameter\x12\x38\n\x11save_output_param\x18\x05 \x01(\x0b\x32\x1d.mo_caffe.SaveOutputParameter\x12?\n\tcode_type\x18\x06 \x01(\x0e\x32$.mo_caffe.PriorBoxParameter.CodeType:\x06\x43ORNER\x12)\n\x1avariance_encoded_in_target\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x16\n\nkeep_top_k\x18\x07 \x01(\x05:\x02-1\x12\x1c\n\x14\x63onfidence_threshold\x18\t \x01(\x02\x12\x18\n\tvisualize\x18\n \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x13visualize_threshold\x18\x0b \x01(\x02\x12\x11\n\tsave_file\x18\x0c \x01(\t\x12\x17\n\x0binput_width\x18\r \x01(\x05:\x02-1\x12\x18\n\x0cinput_height\x18\x0e \x01(\x05:\x02-1\x12\x18\n\nnormalized\x18\x0f \x01(\x08:\x04true\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa6\x01\n\x12\x44ummyDataParameter\x12.\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x19.mo_caffe.FillerParameter\x12\"\n\x05shape\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa8\x01\n\x10\x45ltwiseParameter\x12<\n\toperation\x18\x01 \x01(\x0e\x32$.mo_caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xb2\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"a\n\x12HingeLossParameter\x12\x33\n\x04norm\x18\x01 \x01(\x0e\x32!.mo_caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xd1\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"4\n\x0eInputParameter\x12\"\n\x05shape\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xbe\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12G\n\x0bnorm_region\x18\x04 \x01(\x0e\x32!.mo_caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1f\n\x0cGRNParameter\x12\x0f\n\x04\x62ias\x18\x01 \x01(\x02:\x01\x31\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"8\n\x12ParameterParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\"\xc1\x03\n\x10PoolingParameter\x12\x38\n\x04pool\x18\x01 \x01(\x0e\x32%.mo_caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12:\n\x06\x65ngine\x18\x0b \x01(\x0e\x32!.mo_caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x17\n\tceil_mode\x18\r \x01(\x08:\x04true\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xd4\x02\n\x11PriorBoxParameter\x12\x10\n\x08min_size\x18\x01 \x03(\x02\x12\x10\n\x08max_size\x18\x02 \x03(\x02\x12\x14\n\x0c\x61spect_ratio\x18\x03 \x03(\x02\x12\x12\n\x04\x66lip\x18\x04 \x01(\x08:\x04true\x12\x13\n\x04\x63lip\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x08variance\x18\x06 \x03(\x02\x12\x10\n\x08img_size\x18\x07 \x01(\r\x12\r\n\x05img_h\x18\x08 \x01(\r\x12\r\n\x05img_w\x18\t \x01(\r\x12\x0c\n\x04step\x18\n \x01(\x02\x12\x0e\n\x06step_h\x18\x0b \x01(\x02\x12\x0e\n\x06step_w\x18\x0c \x01(\x02\x12\x13\n\x06offset\x18\r \x01(\x02:\x03\x30.5\x12\r\n\x05width\x18\x0e \x03(\x02\x12\x0e\n\x06height\x18\x0f \x03(\x02\"8\n\x08\x43odeType\x12\n\n\x06\x43ORNER\x10\x01\x12\x0f\n\x0b\x43\x45NTER_SIZE\x10\x02\x12\x0f\n\x0b\x43ORNER_SIZE\x10\x03\"V\n\x15PSROIPoolingParameter\x12\x15\n\rspatial_scale\x18\x01 \x02(\x02\x12\x12\n\noutput_dim\x18\x02 \x02(\x05\x12\x12\n\ngroup_size\x18\x03 \x02(\x05\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc6\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12\x30\n\rweight_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xb0\x01\n\x12ReductionParameter\x12@\n\toperation\x18\x01 \x01(\x0e\x32(.mo_caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x90\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x37\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1e.mo_caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1e\n\x0eReLU6Parameter\x12\x0c\n\x01n\x18\x01 \x01(\x02:\x01\x36\"]\n\x10ReshapeParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"#\n\x10ReverseParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x30\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"]\n\x17ROIWarpingTestParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"Y\n\x13ROIWarpingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"\xab\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"{\n\x10SigmoidParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"\x8c\x01\n\x10SoftmaxParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"u\n\rTanHParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.mo_caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xf1\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x34\n\x04pool\x18\x02 \x01(\x0e\x32!.mo_caffe.SPPParameter.PoolMethod:\x03MAX\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xcc\x14\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\'\n\x07include\x18 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18! \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\x32\n\x04type\x18\x05 \x01(\x0e\x32$.mo_caffe.V1LayerParameter.LayerType\x12\"\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12\x41\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32\'.mo_caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12/\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12+\n\ndata_param\x18\x0b \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18\x0c \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18\x18 \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12)\n\texp_param\x18) \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x34\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12)\n\tlrn_param\x18\x12 \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18\" \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x31\n\rpooling_param\x18\x13 \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12-\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12+\n\nrelu_param\x18\x1e \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x31\n\rsigmoid_param\x18& \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18\' \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12-\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18% \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x35\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12\x38\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12:\n\x0ftransform_param\x18$ \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18* \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12)\n\x05layer\x18\x01 \x01(\x0b\x32\x1a.mo_caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x8c\x08\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x38\n\x04pool\x18\x0b \x01(\x0e\x32%.mo_caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\"\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x39\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"Z\n\x0ePReLUParameter\x12)\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x86\x01\n\x13RegionYoloParameter\x12\x11\n\x06\x63oords\x18\x01 \x01(\x05:\x01\x34\x12\x13\n\x07\x63lasses\x18\x02 \x01(\x05:\x02\x32\x30\x12\x0e\n\x03num\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\ndo_softmax\x18\x04 \x01(\x08:\x04true\x12\x0f\n\x07\x61nchors\x18\x05 \x03(\x02\x12\x0c\n\x04mask\x18\x06 \x03(\x05\"\'\n\x12ReorgYoloParameter\x12\x11\n\x06stride\x18\x01 \x01(\x05:\x01\x31\"\xcf\x01\n\x18RandomGeneratorParameter\x12\x1a\n\trand_type\x18\x01 \x01(\t:\x07uniform\x12\x12\n\x03\x65xp\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x0f\n\x04mean\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06spread\x18\x05 \x01(\x02:\x01\x30\x12\x0f\n\x04prob\x18\x06 \x01(\x02:\x01\x31\x12\x1c\n\x0e\x61pply_schedule\x18\x07 \x01(\x08:\x04true\x12\x19\n\ndiscretize\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nmultiplier\x18\t \x01(\x02:\x01\x31\"`\n\x16\x43oeffScheduleParameter\x12\x14\n\thalf_life\x18\x01 \x01(\x02:\x01\x31\x12\x18\n\rinitial_coeff\x18\x02 \x01(\x02:\x01\x31\x12\x16\n\x0b\x66inal_coeff\x18\x03 \x01(\x02:\x01\x31\"\xde\x07\n\x11\x41ugmentationCoeff\x12\x11\n\x06mirror\x18\x01 \x01(\x02:\x01\x30\x12\r\n\x02\x64x\x18\x02 \x01(\x02:\x01\x30\x12\r\n\x02\x64y\x18\x03 \x01(\x02:\x01\x30\x12\x10\n\x05\x61ngle\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06zoom_x\x18\x05 \x01(\x02:\x01\x31\x12\x11\n\x06zoom_y\x18\x06 \x01(\x02:\x01\x31\x12\x10\n\x05gamma\x18\x64 \x01(\x02:\x01\x31\x12\x15\n\nbrightness\x18\x65 \x01(\x02:\x01\x30\x12\x13\n\x08\x63ontrast\x18\x66 \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor1\x18g \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor2\x18h \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor3\x18i \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean0\x18\n \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean1\x18\x0b \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean2\x18\x0c \x01(\x02:\x01\x31\x12\x16\n\x0b\x61\x64\x64_nomean0\x18\r \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean1\x18\x0e \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean2\x18\x0f \x01(\x02:\x01\x30\x12\x17\n\x0cmult_nomean0\x18\x10 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean1\x18\x11 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean2\x18\x12 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean0\x18\x13 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean1\x18\x14 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean2\x18\x15 \x01(\x02:\x01\x31\x12\x18\n\radd_withmean0\x18\x16 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean1\x18\x17 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean2\x18\x18 \x01(\x02:\x01\x30\x12\x19\n\x0emult_withmean0\x18\x19 \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean1\x18\x1a \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean2\x18\x1b \x01(\x02:\x01\x31\x12\x14\n\tlmult_pow\x18\x1c \x01(\x02:\x01\x31\x12\x14\n\tlmult_add\x18\x1d \x01(\x02:\x01\x30\x12\x15\n\nlmult_mult\x18\x1e \x01(\x02:\x01\x31\x12\x14\n\tcol_angle\x18\x1f \x01(\x02:\x01\x30\x12\x15\n\nfog_amount\x18& \x01(\x02:\x01\x30\x12\x13\n\x08\x66og_size\x18\' \x01(\x02:\x01\x30\x12\x1c\n\x11motion_blur_angle\x18( \x01(\x02:\x01\x30\x12\x1b\n\x10motion_blur_size\x18) \x01(\x02:\x01\x30\x12\x17\n\x0cshadow_angle\x18* \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_distance\x18+ \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_strength\x18, \x01(\x02:\x01\x30\x12\x10\n\x05noise\x18- \x01(\x02:\x01\x30\"\xcc\x10\n\x15\x41ugmentationParameter\x12\x15\n\ncrop_width\x18! \x01(\r:\x01\x30\x12\x16\n\x0b\x63rop_height\x18\" \x01(\r:\x01\x30\x12\x19\n\x0fwrite_augmented\x18\x02 \x01(\t:\x00\x12\x1b\n\x0emax_multiplier\x18\x03 \x01(\x02:\x03\x32\x35\x35\x12\"\n\x13\x61ugment_during_test\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0erecompute_mean\x18\x05 \x01(\r:\x01\x30\x12\x14\n\nwrite_mean\x18\x06 \x01(\t:\x00\x12\x1c\n\x0emean_per_pixel\x18\x07 \x01(\x08:\x04true\x12\x0c\n\x04mean\x18\x12 \x03(\x02\x12\x11\n\x04mode\x18\x08 \x01(\t:\x03\x61\x64\x64\x12\x16\n\x0b\x62ottomwidth\x18P \x01(\r:\x01\x30\x12\x17\n\x0c\x62ottomheight\x18Q \x01(\r:\x01\x30\x12\x0e\n\x03num\x18R \x01(\r:\x01\x30\x12\x18\n\x10\x63hromatic_eigvec\x18S \x03(\x02\x12\x32\n\x06mirror\x18\n \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\ttranslate\x18\x0b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x32\n\x06rotate\x18\x0c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x30\n\x04zoom\x18\r \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07squeeze\x18\x0e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_x\x18\x0f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_y\x18\x10 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05gamma\x18# \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nbrightness\x18$ \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ontrast\x18% \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05\x63olor\x18& \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_pow\x18\x14 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nlmult_mult\x18\x15 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_add\x18\x16 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_pow\x18\x17 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08sat_mult\x18\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_add\x18\x19 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_pow\x18\x1a \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ol_mult\x18\x1b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_add\x18\x1c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_pow\x18\x1d \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tladd_mult\x18\x1e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_add\x18\x1f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\ncol_rotate\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nfog_amount\x18\x64 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x66og_size\x18\x65 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12=\n\x11motion_blur_angle\x18\x66 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12<\n\x10motion_blur_size\x18g \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x38\n\x0cshadow_angle\x18h \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_distance\x18i \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_strength\x18j \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05noise\x18k \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\"\x85\x01\n\x11\x46lowWarpParameter\x12\x43\n\nfill_value\x18\x01 \x01(\x0e\x32).mo_caffe.FlowWarpParameter.FillParameter:\x04ZERO\"+\n\rFillParameter\x12\x08\n\x04ZERO\x10\x01\x12\x10\n\x0cNOT_A_NUMBER\x10\x02\"\xb6\x02\n\x14\x43orrelationParameter\x12\x0e\n\x03pad\x18\x02 \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x03 \x01(\r\x12\x18\n\x10max_displacement\x18\x04 \x01(\r\x12\x13\n\x08stride_1\x18\x05 \x01(\r:\x01\x31\x12\x13\n\x08stride_2\x18\x06 \x01(\r:\x01\x31\x12\x1b\n\x10single_direction\x18\x08 \x01(\x05:\x01\x30\x12\x15\n\x06\x64o_abs\x18\x07 \x01(\x08:\x05\x66\x61lse\x12R\n\x10\x63orrelation_type\x18\x0f \x01(\x0e\x32..mo_caffe.CorrelationParameter.CorrelationType:\x08MULTIPLY\"-\n\x0f\x43orrelationType\x12\x0c\n\x08MULTIPLY\x10\x00\x12\x0c\n\x08SUBTRACT\x10\x01\"\xdc\x01\n\x11ResampleParameter\x12\x17\n\tantialias\x18\x04 \x01(\x08:\x04true\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\x12>\n\x04type\x18\x03 \x01(\x0e\x32(.mo_caffe.ResampleParameter.ResampleType:\x06LINEAR\x12\x11\n\x06\x66\x61\x63tor\x18\x05 \x01(\x02:\x01\x31\"<\n\x0cResampleType\x12\x0b\n\x07NEAREST\x10\x01\x12\n\n\x06LINEAR\x10\x02\x12\t\n\x05\x43UBIC\x10\x03\x12\x08\n\x04\x41REA\x10\x04\"z\n\x0e\x41\x63\x63umParameter\x12\x15\n\ntop_height\x18\x01 \x01(\r:\x01\x30\x12\x14\n\ttop_width\x18\x02 \x01(\r:\x01\x30\x12\x1c\n\x11size_divisible_by\x18\x03 \x01(\r:\x01\x30\x12\x1d\n\x0ehave_reference\x18\x04 \x01(\x08:\x05\x66\x61lse\"(\n\x17ShuffleChannelParameter\x12\r\n\x05group\x18\x01 \x02(\r*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@@ -40,8 +40,8 @@ _PHASE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=26632,
- serialized_end=26660,
+ serialized_start=26741,
+ serialized_end=26769,
)
_sym_db.RegisterEnumDescriptor(_PHASE)
@@ -209,8 +209,8 @@ _LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=9437,
- serialized_end=9518,
+ serialized_start=9504,
+ serialized_end=9585,
)
_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE)
@@ -235,8 +235,8 @@ _CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE)
@@ -257,8 +257,8 @@ _DATAPARAMETER_DB = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=11442,
- serialized_end=11469,
+ serialized_start=11509,
+ serialized_end=11536,
)
_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB)
@@ -283,8 +283,8 @@ _RESIZEPARAMETER_RESIZE_MODE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=11896,
- serialized_end=11967,
+ serialized_start=11963,
+ serialized_end=12034,
)
_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_RESIZE_MODE)
@@ -309,8 +309,8 @@ _RESIZEPARAMETER_PAD_MODE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=11969,
- serialized_end=12027,
+ serialized_start=12036,
+ serialized_end=12094,
)
_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_PAD_MODE)
@@ -343,8 +343,8 @@ _RESIZEPARAMETER_INTERP_MODE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=12029,
- serialized_end=12102,
+ serialized_start=12096,
+ serialized_end=12169,
)
_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_INTERP_MODE)
@@ -369,8 +369,8 @@ _ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=13217,
- serialized_end=13256,
+ serialized_start=13284,
+ serialized_end=13323,
)
_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP)
@@ -391,8 +391,8 @@ _HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=13800,
- serialized_end=13822,
+ serialized_start=13867,
+ serialized_end=13889,
)
_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM)
@@ -413,8 +413,8 @@ _LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=14704,
- serialized_end=14757,
+ serialized_start=14771,
+ serialized_end=14824,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION)
@@ -439,8 +439,8 @@ _LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE)
@@ -465,8 +465,8 @@ _POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=15448,
- serialized_end=15494,
+ serialized_start=15515,
+ serialized_end=15561,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD)
@@ -491,8 +491,8 @@ _POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE)
@@ -517,8 +517,8 @@ _PRIORBOXPARAMETER_CODETYPE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=15898,
- serialized_end=15954,
+ serialized_start=15965,
+ serialized_end=16021,
)
_sym_db.RegisterEnumDescriptor(_PRIORBOXPARAMETER_CODETYPE)
@@ -547,8 +547,8 @@ _REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=16474,
- serialized_end=16527,
+ serialized_start=16541,
+ serialized_end=16594,
)
_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP)
@@ -573,8 +573,8 @@ _RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE)
@@ -599,8 +599,8 @@ _SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE)
@@ -625,8 +625,8 @@ _SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE)
@@ -651,8 +651,8 @@ _TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE)
@@ -677,8 +677,8 @@ _SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=15448,
- serialized_end=15494,
+ serialized_start=15515,
+ serialized_end=15561,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD)
@@ -703,8 +703,8 @@ _SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=10798,
- serialized_end=10841,
+ serialized_start=10865,
+ serialized_end=10908,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE)
@@ -877,8 +877,8 @@ _V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=20453,
- serialized_end=21053,
+ serialized_start=20520,
+ serialized_end=21120,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE)
@@ -925,8 +925,8 @@ _V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=15448,
- serialized_end=15494,
+ serialized_start=15515,
+ serialized_end=15561,
)
_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD)
@@ -947,8 +947,8 @@ _FLOWWARPPARAMETER_FILLPARAMETER = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=25927,
- serialized_end=25970,
+ serialized_start=25994,
+ serialized_end=26037,
)
_sym_db.RegisterEnumDescriptor(_FLOWWARPPARAMETER_FILLPARAMETER)
@@ -969,8 +969,8 @@ _CORRELATIONPARAMETER_CORRELATIONTYPE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=26238,
- serialized_end=26283,
+ serialized_start=26305,
+ serialized_end=26350,
)
_sym_db.RegisterEnumDescriptor(_CORRELATIONPARAMETER_CORRELATIONTYPE)
@@ -999,8 +999,8 @@ _RESAMPLEPARAMETER_RESAMPLETYPE = _descriptor.EnumDescriptor(
],
containing_type=None,
options=None,
- serialized_start=26446,
- serialized_end=26506,
+ serialized_start=26513,
+ serialized_end=26573,
)
_sym_db.RegisterEnumDescriptor(_RESAMPLEPARAMETER_RESAMPLETYPE)
@@ -2987,6 +2987,13 @@ _LAYERPARAMETER = _descriptor.Descriptor(
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
+ _descriptor.FieldDescriptor(
+ name='shuffle_channel_param', full_name='mo_caffe.LayerParameter.shuffle_channel_param', index=89,
+ number=224, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
],
extensions=[
],
@@ -2999,7 +3006,7 @@ _LAYERPARAMETER = _descriptor.Descriptor(
oneofs=[
],
serialized_start=3844,
- serialized_end=8373,
+ serialized_end=8440,
)
@@ -3063,8 +3070,8 @@ _INTERPPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=8376,
- serialized_end=8520,
+ serialized_start=8443,
+ serialized_end=8587,
)
@@ -3100,8 +3107,8 @@ _RANDOMSAMPLINGSOFTMAXLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=8522,
- serialized_end=8632,
+ serialized_start=8589,
+ serialized_end=8699,
)
@@ -3179,8 +3186,8 @@ _PROPOSALPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=8635,
- serialized_end=8835,
+ serialized_start=8702,
+ serialized_end=8902,
)
@@ -3230,8 +3237,8 @@ _NORMALIZEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=8838,
- serialized_end=8987,
+ serialized_start=8905,
+ serialized_end=9054,
)
@@ -3260,8 +3267,8 @@ _PERMUTEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=8989,
- serialized_end=9022,
+ serialized_start=9056,
+ serialized_end=9089,
)
@@ -3332,8 +3339,8 @@ _TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9025,
- serialized_end=9207,
+ serialized_start=9092,
+ serialized_end=9274,
)
@@ -3398,8 +3405,8 @@ _LOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9210,
- serialized_end=9518,
+ serialized_start=9277,
+ serialized_end=9585,
)
@@ -3442,8 +3449,8 @@ _ACCURACYPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9520,
- serialized_end=9596,
+ serialized_start=9587,
+ serialized_end=9663,
)
@@ -3486,8 +3493,8 @@ _ARGMAXPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9598,
- serialized_end=9675,
+ serialized_start=9665,
+ serialized_end=9742,
)
@@ -3530,8 +3537,8 @@ _CHANNELPERMUTATIONACTION = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9677,
- serialized_end=9745,
+ serialized_start=9744,
+ serialized_end=9812,
)
@@ -3581,8 +3588,8 @@ _CHANNELPERMUTATIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9748,
- serialized_end=9902,
+ serialized_start=9815,
+ serialized_end=9969,
)
@@ -3618,8 +3625,8 @@ _CONCATPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9904,
- serialized_end=9961,
+ serialized_start=9971,
+ serialized_end=10028,
)
@@ -3662,8 +3669,8 @@ _BATCHNORMPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=9963,
- serialized_end=10069,
+ serialized_start=10030,
+ serialized_end=10136,
)
@@ -3699,8 +3706,8 @@ _BOXANNOTATOROHEMPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10071,
- serialized_end=10145,
+ serialized_start=10138,
+ serialized_end=10212,
)
@@ -3743,8 +3750,8 @@ _BIASPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10147,
- serialized_end=10243,
+ serialized_start=10214,
+ serialized_end=10310,
)
@@ -3780,8 +3787,8 @@ _CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10245,
- serialized_end=10321,
+ serialized_start=10312,
+ serialized_end=10388,
)
@@ -3930,8 +3937,8 @@ _CONVOLUTIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10324,
- serialized_end=10841,
+ serialized_start=10391,
+ serialized_end=10908,
)
@@ -3974,8 +3981,8 @@ _CROPPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10843,
- serialized_end=10908,
+ serialized_start=10910,
+ serialized_end=10975,
)
@@ -4011,8 +4018,8 @@ _CTCDECODERPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10910,
- serialized_end=10990,
+ serialized_start=10977,
+ serialized_end=11057,
)
@@ -4069,8 +4076,8 @@ _CTCLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=10993,
- serialized_end=11171,
+ serialized_start=11060,
+ serialized_end=11238,
)
@@ -4163,8 +4170,8 @@ _DATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=11174,
- serialized_end=11469,
+ serialized_start=11241,
+ serialized_end=11536,
)
@@ -4207,8 +4214,8 @@ _NONMAXIMUMSUPPRESSIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=11471,
- serialized_end=11562,
+ serialized_start=11538,
+ serialized_end=11629,
)
@@ -4296,8 +4303,8 @@ _RESIZEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=11565,
- serialized_end=12102,
+ serialized_start=11632,
+ serialized_end=12169,
)
@@ -4368,8 +4375,8 @@ _SAVEOUTPUTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=12105,
- serialized_end=12324,
+ serialized_start=12172,
+ serialized_end=12391,
)
@@ -4496,8 +4503,8 @@ _DETECTIONOUTPUTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=12327,
- serialized_end=12868,
+ serialized_start=12394,
+ serialized_end=12935,
)
@@ -4526,8 +4533,8 @@ _DROPOUTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=12870,
- serialized_end=12916,
+ serialized_start=12937,
+ serialized_end=12983,
)
@@ -4591,8 +4598,8 @@ _DUMMYDATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=12919,
- serialized_end=13085,
+ serialized_start=12986,
+ serialized_end=13152,
)
@@ -4636,8 +4643,8 @@ _ELTWISEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13088,
- serialized_end=13256,
+ serialized_start=13155,
+ serialized_end=13323,
)
@@ -4666,8 +4673,8 @@ _ELUPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13258,
- serialized_end=13290,
+ serialized_start=13325,
+ serialized_end=13357,
)
@@ -4724,8 +4731,8 @@ _EMBEDPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13293,
- serialized_end=13471,
+ serialized_start=13360,
+ serialized_end=13538,
)
@@ -4768,8 +4775,8 @@ _EXPPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13473,
- serialized_end=13541,
+ serialized_start=13540,
+ serialized_end=13608,
)
@@ -4805,8 +4812,8 @@ _FLATTENPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13543,
- serialized_end=13600,
+ serialized_start=13610,
+ serialized_end=13667,
)
@@ -4849,8 +4856,8 @@ _HDF5DATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13602,
- serialized_end=13681,
+ serialized_start=13669,
+ serialized_end=13748,
)
@@ -4879,8 +4886,8 @@ _HDF5OUTPUTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13683,
- serialized_end=13723,
+ serialized_start=13750,
+ serialized_end=13790,
)
@@ -4910,8 +4917,8 @@ _HINGELOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13725,
- serialized_end=13822,
+ serialized_start=13792,
+ serialized_end=13889,
)
@@ -5017,8 +5024,8 @@ _IMAGEDATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=13825,
- serialized_end=14104,
+ serialized_start=13892,
+ serialized_end=14171,
)
@@ -5047,8 +5054,8 @@ _INFOGAINLOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14106,
- serialized_end=14145,
+ serialized_start=14173,
+ serialized_end=14212,
)
@@ -5112,8 +5119,8 @@ _INNERPRODUCTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14148,
- serialized_end=14357,
+ serialized_start=14215,
+ serialized_end=14424,
)
@@ -5142,8 +5149,8 @@ _INPUTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14359,
- serialized_end=14411,
+ serialized_start=14426,
+ serialized_end=14478,
)
@@ -5186,8 +5193,8 @@ _LOGPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14413,
- serialized_end=14481,
+ serialized_start=14480,
+ serialized_end=14548,
)
@@ -5253,8 +5260,8 @@ _LRNPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14484,
- serialized_end=14802,
+ serialized_start=14551,
+ serialized_end=14869,
)
@@ -5283,8 +5290,8 @@ _GRNPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14804,
- serialized_end=14835,
+ serialized_start=14871,
+ serialized_end=14902,
)
@@ -5334,8 +5341,8 @@ _MEMORYDATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14837,
- serialized_end=14927,
+ serialized_start=14904,
+ serialized_end=14994,
)
@@ -5378,8 +5385,8 @@ _MVNPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=14929,
- serialized_end=15029,
+ serialized_start=14996,
+ serialized_end=15096,
)
@@ -5408,8 +5415,8 @@ _PARAMETERPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=15031,
- serialized_end=15087,
+ serialized_start=15098,
+ serialized_end=15154,
)
@@ -5524,8 +5531,8 @@ _POOLINGPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=15090,
- serialized_end=15539,
+ serialized_start=15157,
+ serialized_end=15606,
)
@@ -5568,8 +5575,8 @@ _POWERPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=15541,
- serialized_end=15611,
+ serialized_start=15608,
+ serialized_end=15678,
)
@@ -5697,8 +5704,8 @@ _PRIORBOXPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=15614,
- serialized_end=15954,
+ serialized_start=15681,
+ serialized_end=16021,
)
@@ -5741,8 +5748,8 @@ _PSROIPOOLINGPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=15956,
- serialized_end=16042,
+ serialized_start=16023,
+ serialized_end=16109,
)
@@ -5792,8 +5799,8 @@ _PYTHONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16044,
- serialized_end=16147,
+ serialized_start=16111,
+ serialized_end=16214,
)
@@ -5850,8 +5857,8 @@ _RECURRENTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16150,
- serialized_end=16348,
+ serialized_start=16217,
+ serialized_end=16415,
)
@@ -5895,8 +5902,8 @@ _REDUCTIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16351,
- serialized_end=16527,
+ serialized_start=16418,
+ serialized_end=16594,
)
@@ -5933,8 +5940,8 @@ _RELUPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16530,
- serialized_end=16674,
+ serialized_start=16597,
+ serialized_end=16741,
)
@@ -5963,8 +5970,8 @@ _RELU6PARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16676,
- serialized_end=16706,
+ serialized_start=16743,
+ serialized_end=16773,
)
@@ -6007,8 +6014,8 @@ _RESHAPEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16708,
- serialized_end=16801,
+ serialized_start=16775,
+ serialized_end=16868,
)
@@ -6037,8 +6044,8 @@ _REVERSEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16803,
- serialized_end=16838,
+ serialized_start=16870,
+ serialized_end=16905,
)
@@ -6081,8 +6088,8 @@ _ROIPOOLINGPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16840,
- serialized_end=16929,
+ serialized_start=16907,
+ serialized_end=16996,
)
@@ -6125,8 +6132,8 @@ _ROIWARPINGTESTPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=16931,
- serialized_end=17024,
+ serialized_start=16998,
+ serialized_end=17091,
)
@@ -6169,8 +6176,8 @@ _ROIWARPINGPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17026,
- serialized_end=17115,
+ serialized_start=17093,
+ serialized_end=17182,
)
@@ -6227,8 +6234,8 @@ _SCALEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17118,
- serialized_end=17289,
+ serialized_start=17185,
+ serialized_end=17356,
)
@@ -6258,8 +6265,8 @@ _SIGMOIDPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17291,
- serialized_end=17414,
+ serialized_start=17358,
+ serialized_end=17481,
)
@@ -6302,8 +6309,8 @@ _SLICEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17416,
- serialized_end=17492,
+ serialized_start=17483,
+ serialized_end=17559,
)
@@ -6332,8 +6339,8 @@ _SMOOTHL1LOSSPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17494,
- serialized_end=17535,
+ serialized_start=17561,
+ serialized_end=17602,
)
@@ -6370,8 +6377,8 @@ _SOFTMAXPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17538,
- serialized_end=17678,
+ serialized_start=17605,
+ serialized_end=17745,
)
@@ -6401,8 +6408,8 @@ _TANHPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17680,
- serialized_end=17797,
+ serialized_start=17747,
+ serialized_end=17864,
)
@@ -6438,8 +6445,8 @@ _TILEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17799,
- serialized_end=17846,
+ serialized_start=17866,
+ serialized_end=17913,
)
@@ -6468,8 +6475,8 @@ _THRESHOLDPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17848,
- serialized_end=17890,
+ serialized_start=17915,
+ serialized_end=17957,
)
@@ -6582,8 +6589,8 @@ _WINDOWDATAPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=17893,
- serialized_end=18214,
+ serialized_start=17960,
+ serialized_end=18281,
)
@@ -6628,8 +6635,8 @@ _SPPPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=18217,
- serialized_end=18458,
+ serialized_start=18284,
+ serialized_end=18525,
)
@@ -6954,8 +6961,8 @@ _V1LAYERPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=18461,
- serialized_end=21097,
+ serialized_start=18528,
+ serialized_end=21164,
)
@@ -7244,8 +7251,8 @@ _V0LAYERPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=21100,
- serialized_end=22136,
+ serialized_start=21167,
+ serialized_end=22203,
)
@@ -7281,8 +7288,8 @@ _PRELUPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22138,
- serialized_end=22228,
+ serialized_start=22205,
+ serialized_end=22295,
)
@@ -7346,8 +7353,8 @@ _REGIONYOLOPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22231,
- serialized_end=22365,
+ serialized_start=22298,
+ serialized_end=22432,
)
@@ -7376,8 +7383,8 @@ _REORGYOLOPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22367,
- serialized_end=22406,
+ serialized_start=22434,
+ serialized_end=22473,
)
@@ -7455,8 +7462,8 @@ _RANDOMGENERATORPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22409,
- serialized_end=22616,
+ serialized_start=22476,
+ serialized_end=22683,
)
@@ -7499,8 +7506,8 @@ _COEFFSCHEDULEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22618,
- serialized_end=22714,
+ serialized_start=22685,
+ serialized_end=22781,
)
@@ -7816,8 +7823,8 @@ _AUGMENTATIONCOEFF = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=22717,
- serialized_end=23707,
+ serialized_start=22784,
+ serialized_end=23774,
)
@@ -8161,8 +8168,8 @@ _AUGMENTATIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=23710,
- serialized_end=25834,
+ serialized_start=23777,
+ serialized_end=25901,
)
@@ -8192,8 +8199,8 @@ _FLOWWARPPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=25837,
- serialized_end=25970,
+ serialized_start=25904,
+ serialized_end=26037,
)
@@ -8272,8 +8279,8 @@ _CORRELATIONPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=25973,
- serialized_end=26283,
+ serialized_start=26040,
+ serialized_end=26350,
)
@@ -8331,8 +8338,8 @@ _RESAMPLEPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=26286,
- serialized_end=26506,
+ serialized_start=26353,
+ serialized_end=26573,
)
@@ -8382,8 +8389,38 @@ _ACCUMPARAMETER = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=26508,
- serialized_end=26630,
+ serialized_start=26575,
+ serialized_end=26697,
+)
+
+
+_SHUFFLECHANNELPARAMETER = _descriptor.Descriptor(
+ name='ShuffleChannelParameter',
+ full_name='mo_caffe.ShuffleChannelParameter',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='group', full_name='mo_caffe.ShuffleChannelParameter.group', index=0,
+ number=1, type=13, cpp_type=3, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=26699,
+ serialized_end=26739,
)
_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE
@@ -8494,6 +8531,7 @@ _LAYERPARAMETER.fields_by_name['resample_param'].message_type = _RESAMPLEPARAMET
_LAYERPARAMETER.fields_by_name['flow_warp_param'].message_type = _FLOWWARPPARAMETER
_LAYERPARAMETER.fields_by_name['accum_param'].message_type = _ACCUMPARAMETER
_LAYERPARAMETER.fields_by_name['coeff_schedule_param'].message_type = _COEFFSCHEDULEPARAMETER
+_LAYERPARAMETER.fields_by_name['shuffle_channel_param'].message_type = _SHUFFLECHANNELPARAMETER
_NORMALIZEPARAMETER.fields_by_name['scale_filler'].message_type = _FILLERPARAMETER
_LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE
_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER
@@ -8740,6 +8778,7 @@ DESCRIPTOR.message_types_by_name['FlowWarpParameter'] = _FLOWWARPPARAMETER
DESCRIPTOR.message_types_by_name['CorrelationParameter'] = _CORRELATIONPARAMETER
DESCRIPTOR.message_types_by_name['ResampleParameter'] = _RESAMPLEPARAMETER
DESCRIPTOR.message_types_by_name['AccumParameter'] = _ACCUMPARAMETER
+DESCRIPTOR.message_types_by_name['ShuffleChannelParameter'] = _SHUFFLECHANNELPARAMETER
DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE
BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict(
@@ -9456,6 +9495,13 @@ AccumParameter = _reflection.GeneratedProtocolMessageType('AccumParameter', (_me
))
_sym_db.RegisterMessage(AccumParameter)
+ShuffleChannelParameter = _reflection.GeneratedProtocolMessageType('ShuffleChannelParameter', (_message.Message,), dict(
+ DESCRIPTOR = _SHUFFLECHANNELPARAMETER,
+ __module__ = 'mo_caffe_pb2'
+ # @@protoc_insertion_point(class_scope:mo_caffe.ShuffleChannelParameter)
+ ))
+_sym_db.RegisterMessage(ShuffleChannelParameter)
+
_BLOBSHAPE.fields_by_name['dim'].has_options = True
_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
diff --git a/model-optimizer/mo/front/caffe/proto/generate_caffe_pb2.py b/model-optimizer/mo/front/caffe/proto/generate_caffe_pb2.py
index 941f65c1f..2a86db4d1 100644
--- a/model-optimizer/mo/front/caffe/proto/generate_caffe_pb2.py
+++ b/model-optimizer/mo/front/caffe/proto/generate_caffe_pb2.py
@@ -22,12 +22,8 @@ import sys
def shell(cmd, env=None, cwd=None):
- kwargs = dict(cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if sys.platform.startswith('linux') or sys.platform == 'darwin':
- cmd = ['/bin/bash', '-c', "".join(cmd)]
- else:
- kwargs.update({'shell': True})
- print('Running: "{}"'.format(''.join(cmd)))
+ kwargs = dict(cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
+ print('Running: "{}"'.format(' '.join(cmd)))
p = subprocess.Popen(cmd, **kwargs)
(stdout, stderr) = p.communicate()
return p.returncode, stdout, stderr
@@ -42,21 +38,19 @@ def get_cli_parser():
def build_proto(proto_file_path, python_path):
- retcode, out, err = shell('protoc -h')
+ retcode, out, err = shell(['protoc', '-h'])
if retcode:
print(err)
return 1
if not (os.path.exists(proto_file_path) and os.path.isfile(proto_file_path)):
print('File {} does not exist'.format(proto_file_path))
return 1
- if not os.path.exists(proto_file_path):
- os.makedirs(python_path)
proto_path = os.path.split(proto_file_path)[0]
if not proto_path:
proto_path = os.getcwd()
proto_file = os.path.split(proto_file_path)[1]
- command = 'protoc {} --python_out={}'.format(proto_file, python_path)
+ command = ['protoc', proto_file, '--python_out={}'.format(python_path)]
retcode, out, err = shell(command, cwd=proto_path)
@@ -78,5 +72,8 @@ if __name__ == "__main__":
argv = get_cli_parser().parse_args()
proto_file_path = argv.input_proto
python_path = argv.output
+ if not os.path.exists(python_path):
+ print("Output directory {} does not exist".format(python_path))
+ sys.exit(1)
status = build_proto(proto_file_path, python_path)
exit(status)
diff --git a/model-optimizer/mo/front/caffe/proto/mo_caffe.proto b/model-optimizer/mo/front/caffe/proto/mo_caffe.proto
index 852fccfca..82f83a5d2 100644
--- a/model-optimizer/mo/front/caffe/proto/mo_caffe.proto
+++ b/model-optimizer/mo/front/caffe/proto/mo_caffe.proto
@@ -534,6 +534,9 @@ message LayerParameter {
optional FlowWarpParameter flow_warp_param = 221;
optional AccumParameter accum_param = 222;
optional CoeffScheduleParameter coeff_schedule_param = 223;
+
+ // for Shufflenet v2
+ optional ShuffleChannelParameter shuffle_channel_param= 224;
}
message InterpParameter {
@@ -2078,3 +2081,7 @@ message AccumParameter {
optional uint32 size_divisible_by = 3 [default = 0]; // Upscales to the minimal size divisible by the given number
optional bool have_reference = 4 [ default = false ];
}
+
+message ShuffleChannelParameter {
+ required uint32 group = 1;
+}
diff --git a/model-optimizer/mo/front/common/layout.py b/model-optimizer/mo/front/common/layout.py
index 2f368dc26..6da786138 100644
--- a/model-optimizer/mo/front/common/layout.py
+++ b/model-optimizer/mo/front/common/layout.py
@@ -14,9 +14,10 @@
limitations under the License.
"""
+import logging as log
+
import numpy as np
-from mo.graph.graph import Node
from mo.utils.error import Error
nchw_to_nhwc_permute = np.array([0, 2, 3, 1], dtype=np.int64)
diff --git a/model-optimizer/mo/front/common/partial_infer/elemental.py b/model-optimizer/mo/front/common/partial_infer/elemental.py
index adbf6ec03..99adb1ff9 100644
--- a/model-optimizer/mo/front/common/partial_infer/elemental.py
+++ b/model-optimizer/mo/front/common/partial_infer/elemental.py
@@ -17,9 +17,11 @@
def single_output_infer(node, shape_infer, value_infer=None):
node.out_node(0).shape = shape_infer(node)
- if value_infer is not None:
- node.out_node(0).value = value_infer(node)
+ if value_infer is not None and \
+ 'value' in node.in_node() and \
+ node.in_node().value is not None:
+ node.out_node(0).value = value_infer(node)
def copy_shape_infer(node):
"""
diff --git a/model-optimizer/mo/front/common/partial_infer/expand_dims.py b/model-optimizer/mo/front/common/partial_infer/expand_dims.py
index bf0803bd6..50ac4f06f 100644
--- a/model-optimizer/mo/front/common/partial_infer/expand_dims.py
+++ b/model-optimizer/mo/front/common/partial_infer/expand_dims.py
@@ -50,9 +50,7 @@ def tf_expand_dims_infer(node):
if input_node.value is not None:
output_node.value = np.array(np.reshape(input_node.value, output_node.shape))
- node['axis'] = 0
- node['num_axes'] = -1
node['dim'] = output_node.shape
- PermuteAttrs.create_permute_attrs(node, attrs=[('axis','output:0'), ('dim','output:0')])
+ PermuteAttrs.create_permute_attrs(node, attrs=[('dim', 'output:0')])
diff --git a/model-optimizer/mo/front/common/partial_infer/flatten.py b/model-optimizer/mo/front/common/partial_infer/flatten.py
deleted file mode 100644
index 223f53d86..000000000
--- a/model-optimizer/mo/front/common/partial_infer/flatten.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import logging as log
-
-import numpy as np
-
-from mo.front.caffe.extractors.utils import get_canonical_axis_index
-
-
-def flatten_infer(node):
- """
- Infers shape of flatten node as it is done in Caffe.
- Output shape: [Batch is the same, Production of other dims]
- Args:
- node: graph flatten node
-
- """
- input_shape = node.in_node(0).shape
- if input_shape is None:
- return
-
- # TODO: Should check that input_shape[1:] part doesn't contain -1 elements
- axis = get_canonical_axis_index(input_shape, node.axis)
- end_axis = node.end_axis if node.has('end_axis') else -1
- end_axis = get_canonical_axis_index(input_shape, end_axis)
- prod_axes = np.prod(input_shape[axis: end_axis + 1])
- node.out_node(0).shape = np.array([*input_shape[0: axis], prod_axes, *input_shape[end_axis + 1:]], dtype=np.int64)
- log.debug('input_shape: {}, output_shape: {}'.format(input_shape, node.out_node().shape))
-
diff --git a/model-optimizer/mo/front/common/partial_infer/inner_product.py b/model-optimizer/mo/front/common/partial_infer/inner_product.py
index b1d7189b5..765363b2f 100644
--- a/model-optimizer/mo/front/common/partial_infer/inner_product.py
+++ b/model-optimizer/mo/front/common/partial_infer/inner_product.py
@@ -52,20 +52,13 @@ def onnx_matmul_infer(node):
if len(node.in_nodes()) != 2:
raise Error("Wrong number of input nodes for {} node. Should be 2 instead of {}".format(node.name,
len(node.in_nodes())))
+ input_0_shape = node.in_node(0).shape
+ input_1_shape = node.in_node(1).shape
- input_node = node.in_node(0)
- weights_node = node.in_node(1)
+ input_shapes = [node.in_node(port).shape for port in node.in_nodes()]
+ max_len = max([len(shape) for shape in input_shapes])
+ new_input_shapes = [np.concatenate([np.ones(max_len - len(input_shapes[i])), input_shapes[i]], axis=0)
+ for i in range(len(input_shapes))]
- input_shape = input_node.shape
- weights_shape = weights_node.shape
-
- if len(weights_shape) > 2:
- raise Error("MatMul {} with weights shape != 2 is not supported".format(node.name))
-
- mark_input_bins(node)
- assign_dims_to_weights(weights_node, None, 0, 1, 2)
- PermuteAttrs.set_permutation(weights_node, node, PermuteAttrs.Permutation(perm=int64_array([1, 0]),
- inv=int64_array([0, 1])))
-
- node['out-size'] = weights_shape[1]
- node.out_node().shape = np.array([*input_shape[0:-1], weights_shape[1]])
+ node.out_node().shape = np.concatenate([np.maximum(*[shape[0:-2] for shape in new_input_shapes]),
+ [input_0_shape[-2], input_1_shape[-1]]], axis=0)
diff --git a/model-optimizer/mo/front/common/partial_infer/matmul.py b/model-optimizer/mo/front/common/partial_infer/matmul.py
index f776a1274..157402c9c 100644
--- a/model-optimizer/mo/front/common/partial_infer/matmul.py
+++ b/model-optimizer/mo/front/common/partial_infer/matmul.py
@@ -18,16 +18,44 @@ import logging as log
import numpy as np
+from mo.front.common.partial_infer.utils import int64_array
+from mo.ops.op import PermuteAttrs
from mo.utils.error import Error
def tf_matmul_infer(node):
assert (len(node.in_nodes()) == 2)
- shapes = [node.in_node(i).shape for i in range(2)]
+
+ shapes = [node.in_node(i).shape.copy() for i in range(2)]
log.debug('matmul shapes: {}'.format(shapes))
- if node.transpose_a or node.transpose_b or any(s is None or len(s) < 2 for s in shapes):
+ if any(s is None or len(s) < 2 for s in shapes):
log.error("MatMul wasn't able to infer shape")
return
+
+ if node.transpose_a:
+ if not node.in_node(0).has_valid('value'):
+ log.error("MatMul wasn't able to infer shape")
+ return
+ else:
+ perm = np.array(range(len(node.in_node(0).shape)), dtype=np.int64)
+ perm[-1], perm[-2] = perm[-2], perm[-1]
+ inv = PermuteAttrs.get_inverse_permutation(perm)
+ permutation = PermuteAttrs.Permutation(perm=perm, inv=int64_array(inv))
+ PermuteAttrs.set_permutation(node.in_node(0), node, permutation)
+ shapes[0] = shapes[0][perm]
+
+ if node.transpose_b:
+ if not node.in_node(1).has_valid('value'):
+ log.error("MatMul wasn't able to infer shape")
+ return
+ else:
+ perm = np.array(range(len(node.in_node(1).shape)), dtype=np.int64)
+ perm[-1], perm[-2] = perm[-2], perm[-1]
+ inv = PermuteAttrs.get_inverse_permutation(perm)
+ permutation = PermuteAttrs.Permutation(perm=perm, inv=int64_array(inv))
+ PermuteAttrs.set_permutation(node.in_node(1), node, permutation)
+ shapes[1] = shapes[1][perm]
+
if any(shapes[0][:-2] != shapes[1][:-2]) or shapes[0][-1] != shapes[1][-2]:
log.error("MatMul wasn't able to infer shape because input dimensions are not compatible")
return
@@ -49,7 +77,6 @@ def tf_matmul_infer(node):
log.debug('matmul shape: {}'.format(node.out_node().shape))
-
def onnx_gemm_infer(node):
assert (len(node.in_nodes()) == 3)
shapeA = node.in_node(0).shape
@@ -60,16 +87,15 @@ def onnx_gemm_infer(node):
if shapeA.size > 2 and node.transpose_a:
raise Error(
- 'ONNX Gemm operation do not support {}dimensional input with set transA key'.format(shapeA.size))
+ 'ONNX Gemm operation do not support {} dimensional input with set transA key'.format(shapeA.size))
# apply transposes and broadcasts
if node.transpose_a:
- shapeA = shapeA[[1,0]]
+ shapeA = shapeA[[1, 0]]
if node.transpose_b:
- shapeB = shapeB[[1,0]]
+ shapeB = shapeB[[1, 0]]
if node.broadcast_c and shapeC.size == 1:
shapeC = np.array([shapeA[0], shapeC[0]])
node.out_node().shape = shapeC
return
-
diff --git a/model-optimizer/mo/front/common/partial_infer/reduce.py b/model-optimizer/mo/front/common/partial_infer/reduce.py
index 74fdd40e9..627badced 100644
--- a/model-optimizer/mo/front/common/partial_infer/reduce.py
+++ b/model-optimizer/mo/front/common/partial_infer/reduce.py
@@ -26,6 +26,9 @@ def tf_reduce_infer(node, op=None):
if input_shape is None or axis is None or input_shape.ndim != 1 or axis.ndim > 1:
return
output_shape = np.array(input_shape)
+ if len(axis.shape) == 0: # fix since np.delete deprecate negative idxs
+ axis = axis.reshape([1])
+ axis[axis < 0] += output_shape.shape[0]
if node.keep_dims:
output_shape[axis] = 1
else:
@@ -34,4 +37,4 @@ def tf_reduce_infer(node, op=None):
if op is not None and node.in_node(0).value is not None:
node.out_node(0).value = np.array([op(node.in_node(0).value, (*axis,))],
dtype=node.in_node(0).value.dtype) # TODO extend to multi-dimensional axis
- log.debug("value: {}".format(node.out_node(0).value))
+ log.debug("value: {}".format(node.out_node(0).value)) \ No newline at end of file
diff --git a/model-optimizer/mo/front/common/partial_infer/reshape.py b/model-optimizer/mo/front/common/partial_infer/reshape.py
index db7cc98c9..ae616022f 100644
--- a/model-optimizer/mo/front/common/partial_infer/reshape.py
+++ b/model-optimizer/mo/front/common/partial_infer/reshape.py
@@ -14,11 +14,9 @@
limitations under the License.
"""
-import logging as log
-
-import numpy as np
-
+from mo.front.common.partial_infer.utils import int64_array
from mo.ops.op import PermuteAttrs
+from mo.utils.error import Error
def tf_reshape_shape_infer(node):
@@ -32,12 +30,9 @@ def tf_reshape_shape_infer(node):
input_shape = node.in_node(0).shape
reshape_output = node.in_node(1).value if len(node.in_nodes()) > 1 else node.dim
- # In case if Reshape operation was created with two inputs and dim attr wasn't set, we set in automatically
- if not node.has_valid('dim'):
- node['dim'] = np.array(reshape_output, dtype=np.int64)
-
if node.in_node(0).shape is None:
return None
+
total = 1
for index, i in enumerate(input_shape):
total *= i
@@ -65,11 +60,16 @@ def tf_reshape_shape_infer(node):
out_shape_total *= i
if total != out_shape_total:
- log.error(
+ raise Error(
"Number of elements in input {} and output {} of reshape node {} mismatch".format(input_shape, output_shape,
node.name))
- return None
PermuteAttrs.create_permute_attrs(node, attrs=[('dim', 'output:0')])
- return np.array(output_shape, dtype=np.int64)
+ output_shape = int64_array(output_shape)
+
+ # In case if Reshape operation was created with two inputs and dim attr wasn't set, we set in automatically
+ if not node.has_valid('dim'):
+ node['dim'] = output_shape
+
+ return output_shape
diff --git a/model-optimizer/mo/front/common/partial_infer/slice.py b/model-optimizer/mo/front/common/partial_infer/slice.py
index cac47d45c..bf23763fe 100644
--- a/model-optimizer/mo/front/common/partial_infer/slice.py
+++ b/model-optimizer/mo/front/common/partial_infer/slice.py
@@ -71,7 +71,6 @@ def tf_strided_slice_infer(node):
new_axis_mask.append(False)
value = node.in_node(0).value if node.in_node(0).value is not None else np.zeros(shape)
-
# fix for the warning: "FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated use
# `arr[tuple(seq)]` instead of `arr[seq]`"
value = value[tuple(slice_idx)]
diff --git a/model-optimizer/mo/front/common/partial_infer/split.py b/model-optimizer/mo/front/common/partial_infer/split.py
index 24bf678b4..ff8abb8eb 100644
--- a/model-optimizer/mo/front/common/partial_infer/split.py
+++ b/model-optimizer/mo/front/common/partial_infer/split.py
@@ -19,7 +19,7 @@ import logging as log
import numpy as np
from mo.ops.op import PermuteAttrs
-from mo.utils.error import Error
+from mo.graph.graph import Node
def part_sizes_to_indices(part_sizes: list):
@@ -39,13 +39,13 @@ def part_sizes_to_indices(part_sizes: list):
return np.array(indices)
-def split(input, node, outputs, axis, part_sizes):
+def split(input_data_node: Node, node: Node, axis: int, part_sizes: list):
"""
Partial inference of generic split node.
Args:
@input: input tensor node, subject to split
- @outputs: output tensor nodes where we put inferred output shapes
+ @node: node of one of the Split types
@axis: split dimension index
@part_sizes: a NumPy array with sizes of all pieces that we split to
@@ -54,18 +54,14 @@ def split(input, node, outputs, axis, part_sizes):
"""
- if input.shape is None:
- return
-
- if len(outputs) != len(part_sizes):
- log.error('Number of outputs do not match the number of parts with sizes.')
+ if input_data_node.shape is None:
return
# normalize axis
if axis < 0:
- axis = input.shape.size + axis
+ axis = input_data_node.shape.size + axis
- if axis < 0 or axis >= input.shape.size:
+ if axis < 0 or axis >= input_data_node.shape.size:
log.error('Model is incorrect: axis for split node is out of range')
return
@@ -77,64 +73,67 @@ def split(input, node, outputs, axis, part_sizes):
if undef_indices.size == 1:
undef_index = undef_indices[0]
part_sizes[undef_index] = 0
- deduced_dim = input.shape[axis] - np.add.reduce(part_sizes)
+ deduced_dim = input_data_node.shape[axis] - np.add.reduce(part_sizes)
if deduced_dim < 0:
- log.error(
- 'Just deduced dimension for the split has negative value that means that split input shape and desired parts are not compatible')
+ log.error('Just deduced dimension for the split has negative value that means that split input shape and '
+ 'desired parts are not compatible')
return
all_parts_size = np.add.reduce(part_sizes)
- if all_parts_size != input.shape[axis]:
- log.error("input.shape[{}] = {} != {} = sum of all parts in part_sizes".format(axis, input.shape[axis],
+ if all_parts_size != input_data_node.shape[axis]:
+ log.error("input.shape[{}] = {} != {} = sum of all parts in part_sizes".format(axis,
+ input_data_node.shape[axis],
all_parts_size))
return
- for i, part_size in enumerate(part_sizes):
- shape = input.shape.copy()
- shape[axis] = part_size
- outputs[i].shape = shape
+ splitted = None
+ if input_data_node.value is not None:
+ splitted = np.split(input_data_node.value, part_sizes_to_indices(part_sizes), axis)
- if input.value is not None:
- splitted = np.split(input.value, part_sizes_to_indices(part_sizes), axis)
- # log.debug("splitted = {}".format(splitted))
- for i, part in enumerate(splitted):
- outputs[i].value = part
- # log.debug('outputs[i].value.shape = {}, outputs[i].shape = {}'.format(outputs[i].value.shape, outputs[i].shape))
- assert all(outputs[i].value.shape == outputs[i].shape)
+ # not all outputs from the split could be used so it is necessary to iterate over output edges and infer shape for
+ # necessary nodes only
+ for _, dst, edge_attrs in node.graph.out_edges(node.id, data=True):
+ out_port = edge_attrs['out']
+ out_node = node.out_node(out_port)
+
+ new_out_shape = input_data_node.shape.copy()
+ new_out_shape[axis] = part_sizes[out_port]
+ node.out_node(out_port).shape = new_out_shape
+ if splitted is not None:
+ out_node.value = splitted[out_port]
+ assert all(out_node.value.shape == out_node.shape)
assert not node.has_valid('axis') or node.axis == axis
node.axis = axis
- # WARNING: != 4 is supposed to work for NHWC to NCHW translation only; if other global permutations happen this will fail
+ # WARNING: != 4 is supposed to work for NHWC to NCHW translation only.
+ # if other global permutations happen this will fail
# TODO: redesign it to have this logic built in NHWC to NCHW translation pass; it requires
# additional attributes with layout to be propagated through the network
- if len(input.shape) != 4 and node.has_valid('dim_attrs') and 'axis' in node.dim_attrs:
- log.warning(
- 'Removed "axis" attribute from the scope of the model relayout pass because len(input.shape) == {} != 4 for node {}'.format(
- len(input.shape),
- node.name if node.has_valid('name') else '<UNKNOWN>'))
+ if len(input_data_node.shape) != 4 and node.has_valid('dim_attrs') and 'axis' in node.dim_attrs:
+ log.warning('Removed "axis" attribute from the scope of the model relayout pass because len(input.shape) == {} '
+ '!= 4 for node {}'.format(len(input_data_node.shape), node.soft_get('name')))
node.dim_attrs.remove('axis')
assert 'axis' not in node.dim_attrs
+ log.debug('output shapes after split: {}'.format([v.shape for k, v in node.out_nodes().items()]))
def tf_split_infer(node):
"""
Partial infer of split node similar to Split op of TF.
"""
-
- if len(node.in_nodes()) == 1:
- return True
-
- # Two inputs: [split_dim, input)
- assert (len(node.in_nodes()) == 2)
+ # Two inputs: [split_dim, input]
+ assert len(node.in_nodes()) == 2, 'Node "{}" must have exactly two inputs'.format(node.soft_get('name'))
split_dim = node.in_node(0).value
if split_dim is None:
log.error('split_dim value for node {} is None. Cannot do shape inference.')
return
- assert split_dim.ndim == 0
+
+ assert split_dim.ndim == 0, 'The split dimension for node "{}" must be a scalar.'.format(node.soft_get('name'))
split_dim = split_dim.item()
input = node.in_node(1)
- if split_dim is None or input.shape is None:
+ if input.shape is None:
+ log.error('Input shape for node {} is not defined'.format(node.soft_get('name')))
return
log.debug('input shape for split: {}, should be split along {} dim'.format(input.shape, split_dim))
@@ -145,42 +144,36 @@ def tf_split_infer(node):
log.error("split_dim cannot be evenly divided by a given number of parts")
return
- outputs = node.out_nodes()
# split_dim is a numpy array, axis is split_dim[0]
- log.debug(
- 'split_dim_size = {}, node.num_split = {}, div = {}, typeof div = {}'.format(split_dim_size, node.num_split,
- split_dim_size / node.num_split,
- type(
- split_dim_size / node.num_split)))
- split(input, node, [outputs[i] for i in range(len(outputs))], split_dim,
- [int(split_dim_size / node.num_split)] * node.num_split)
- log.debug('output shapes after split: {}'.format([v.shape for k, v in outputs.items()]))
+ log.debug('split_dim_size = {}, node.num_split = {}, div = {}, typeof div = {}'.format(
+ split_dim_size, node.num_split, split_dim_size / node.num_split, type(split_dim_size / node.num_split)))
+ split(input, node, split_dim, [int(split_dim_size / node.num_split)] * node.num_split)
node.graph.remove_edge(node.in_node(0).id, node.id)
node['input_port'] = 1
PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:1')])
-def tf_split_v_infer(node):
+def tf_split_v_infer(node: Node):
"""
Partial infer of split node similar to SplitV op of TF.
"""
if len(node.in_nodes()) == 1 and not (node.has_valid('axis') and node.has_valid('size_splits')):
- return True
+ return
if len(node.in_nodes()) == 3 and (node.has_valid('axis') or node.has_valid('size_splits')):
- return True
+ return
# Three inputs: [input, size_splits, split_dim)
- if len(node.in_nodes())==3 :
+ if len(node.in_nodes()) == 3:
split_dim = node.in_node(2).value
assert split_dim.ndim == 0
split_dim = split_dim.item()
size_splits = node.in_node(1).value
node.graph.remove_edge(node.in_node(1).id, node.id)
node.graph.remove_edge(node.in_node(2).id, node.id)
- else :
+ else:
split_dim = node.axis
size_splits = node.size_splits
@@ -189,21 +182,19 @@ def tf_split_v_infer(node):
return
input = node.in_node(0)
-
- log.debug(
- 'split_dim = {}, input.shape = {}, size_splits.value = {}'.format(split_dim, input.shape, size_splits))
-
- if split_dim is None or input.shape is None or size_splits is None:
+ if input.shape is None or size_splits is None:
+ log.error('input shape or size of splits are not defined for node {}'.format(node.soft_get('name')))
return
- outputs = node.out_nodes()
+ log.debug('split_dim = {}, input.shape = {}, size_splits.value = {}'.format(split_dim, input.shape, size_splits))
+
# split_dim is a numpy array, axis is split_dim
- split(input, node, [outputs[i] for i in range(len(outputs))], split_dim, size_splits)
- log.debug('output shapes after split: {}'.format([v.shape for k, v in outputs.items()]))
-
- PermuteAttrs.create_permute_attrs(node, attrs=[('axis','input:0')])
+ split(input, node, split_dim, size_splits)
+
+ PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
-def tf_unpack_infer(node):
+
+def tf_unpack_infer(node: Node):
if len(node.in_nodes()) != 1:
log.debug('Unpack node "{}" must have one input.'.format(node.name))
return
@@ -229,9 +220,5 @@ def tf_unpack_infer(node):
log.error("split_dim cannot be evenly divided by a given number of parts")
return
- outputs = node.out_nodes()
- split(node.in_node(), node, [outputs[i] for i in range(len(outputs))], split_dim,
- [int(split_dim_size / node.num_split)] * node.num_split)
-
+ split(node.in_node(), node, split_dim, [int(split_dim_size / node.num_split)] * node.num_split)
# node shapes will be squeezed in the separate pass
- log.debug('output shapes after split: {}'.format([v.shape for k, v in outputs.items()]))
diff --git a/model-optimizer/mo/front/common/partial_infer/squeeze.py b/model-optimizer/mo/front/common/partial_infer/squeeze.py
index 351a391be..574ba85c5 100644
--- a/model-optimizer/mo/front/common/partial_infer/squeeze.py
+++ b/model-optimizer/mo/front/common/partial_infer/squeeze.py
@@ -13,30 +13,68 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+import logging as log
import numpy as np
-from mo.front.caffe.extractors.utils import get_canonical_axis_index
+from mo.front.caffe.extractors.utils import get_canonical_axis_index
+from mo.front.common.layout import get_height_dim, get_width_dim, get_depth_dim
+from mo.front.common.partial_infer.utils import int64_array
from mo.ops.op import PermuteAttrs
+from mo.utils.error import Error
+
+
+def is_spatial_squeeze(layout: str, input_shape: np.ndarray, squeeze_dims: np.ndarray):
+ """
+ Checks that the squeeze operation removes all spatial dimensions.
+ :param layout: graph layout.
+ :param input_shape: numpy array with input shape.
+ :param squeeze_dims: numpy array with dims to squeeze.
+ :return: result of the check.
+ """
+ if len(input_shape) < 4 or len(input_shape) > 5:
+ return False
+ spatial_dims = [get_height_dim(layout, len(input_shape)), get_width_dim(layout, len(input_shape))]
+ if len(input_shape) == 5:
+ spatial_dims.append(get_depth_dim(layout, len(input_shape)))
+ for dim in spatial_dims:
+ if input_shape[dim] != 1:
+ log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
+ squeeze_dims))
+ return False
+ if len(squeeze_dims) != len(spatial_dims):
+ log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
+ squeeze_dims))
+ return False
+ log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
+ squeeze_dims))
+ return True
def tf_squeeze_infer(node):
if node.squeeze_dims is None:
# TODO: implement; there is no implementation now because no test
return
+
real_squeeze_dims = []
- shape = node.in_node().shape
- if shape is None:
+ input_shape = node.in_node().shape
+ if input_shape is None:
return
# UGLY
- shape = shape.copy()
+ output_shape = input_shape.copy()
for n in node.squeeze_dims:
- if shape[n] == 1:
- real_squeeze_dims.append(get_canonical_axis_index(shape, n))
- shape = np.delete(shape, real_squeeze_dims)
- node.out_node().shape = shape
- node['dim'] = shape
+ if output_shape[n] == 1:
+ real_squeeze_dims.append(get_canonical_axis_index(output_shape, n))
+ else:
+ raise Error('Trying to squeeze dimension not equal to 1 for node "{}"'.format(node.soft_get('name')))
+
+ output_shape = np.delete(output_shape, real_squeeze_dims)
+ node.out_node().shape = output_shape
+
+ if is_spatial_squeeze(node.graph.graph['layout'], input_shape, output_shape):
+ output_shape = int64_array([0, -1])
+ node['dim'] = output_shape
if node.in_node().value is not None:
- node.out_node().value = np.array(np.reshape(node.in_node().value, shape))
+ node.out_node().value = np.array(np.reshape(node.in_node().value, output_shape))
- PermuteAttrs.create_permute_attrs(node, attrs =[('dim','output:0')]) \ No newline at end of file
+ PermuteAttrs.create_permute_attrs(node, attrs=[('dim', 'output:0')])
diff --git a/model-optimizer/mo/front/common/partial_infer/up_sampling.py b/model-optimizer/mo/front/common/partial_infer/up_sampling.py
deleted file mode 100644
index 8a2dc6a76..000000000
--- a/model-optimizer/mo/front/common/partial_infer/up_sampling.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import numpy as np
-
-
-def up_sampling_infer(node):
- if node.scale is None:
- return
- input_shape = node.in_node(0).shape
- batch = input_shape[0]
- channel = input_shape[1]
- y = input_shape[2] * node.scale
- x = input_shape[3] * node.scale
- node.out_node(0).shape = np.array([batch, channel, y, x])
diff --git a/model-optimizer/mo/front/common/partial_infer/utils.py b/model-optimizer/mo/front/common/partial_infer/utils.py
index dbb76a5cd..0056a0a57 100644
--- a/model-optimizer/mo/front/common/partial_infer/utils.py
+++ b/model-optimizer/mo/front/common/partial_infer/utils.py
@@ -24,21 +24,17 @@ def int64_array(l: list):
def float_array(l: list):
- return np.array(l, dtype=np.int64)
+ return np.array(l, dtype=np.float64)
def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1):
"""
Preparing necessary attributes for edges at input ports starting from start_port.
- It is applicable for convolution and other operations that has constant inputs which
+ It is applicable for convolution and other operations that have constant inputs which
are intended to be dumped as IE IR bin file.
"""
- nports = len(node.in_nodes())
- for i, name in enumerate(names):
- port = i + start_port
- if port >= nports:
- break
- if node.in_node(port).value is not None:
+ for port, name in enumerate(names, start=start_port):
+ if port in node.in_nodes() and node.in_node(port).has_valid('value'):
node.in_edge(port)['bin'] = name
diff --git a/model-optimizer/mo/front/common/register_custom_ops.py b/model-optimizer/mo/front/common/register_custom_ops.py
index 89607f8e3..1172bf303 100644
--- a/model-optimizer/mo/front/common/register_custom_ops.py
+++ b/model-optimizer/mo/front/common/register_custom_ops.py
@@ -53,7 +53,6 @@ def check_for_duplicates(extractors_collection: dict):
Check if extractors_collection has case-insensitive duplicates, if it does,
raise exception with information about duplicates
"""
- assert extractors_collection, 'Extractors collection can not be empty.'
# Check if extractors_collection is a normal form, that is it doesn't have case-insensitive duplicates
duplicates, keys = find_case_insensitive_duplicates(extractors_collection)
if len(duplicates) > 0:
diff --git a/model-optimizer/mo/front/extractor.py b/model-optimizer/mo/front/extractor.py
index bf555187f..6ba1ea4c4 100644
--- a/model-optimizer/mo/front/extractor.py
+++ b/model-optimizer/mo/front/extractor.py
@@ -15,6 +15,8 @@
"""
import ast
import logging as log
+from collections import defaultdict
+from copy import copy
import networkx as nx
import numpy as np
@@ -27,8 +29,6 @@ from mo.utils.error import Error
from mo.utils.graph import dfs
from mo.utils.unsupported_ops import UnsupportedOps
from mo.utils.utils import refer_to_faq_msg
-from collections import defaultdict
-from copy import copy
def restore_edges(graph: nx.DiGraph, get_edges: callable):
@@ -473,6 +473,7 @@ def update_ie_fields(attrs: dict, ir_version = None):
ir_version_mapping = {
# Default behaviour is IR V3 attributes
None: ir_v3_attrs,
+ 4: ir_v3_attrs,
3: ir_v3_attrs,
2: ir_v2_attrs
}
@@ -884,15 +885,9 @@ def add_output_ops(graph: nx.MultiDiGraph, user_defined_outputs: dict, inputs: d
return sinks
-def set_is_input_true(graph: nx.MultiDiGraph, placeholders: list):
+def set_is_input(graph: nx.MultiDiGraph, placeholders: list, is_input: bool):
for placeholder in placeholders:
- graph.node[placeholder]['is_input'] = True
-
-
-def set_is_input_false(graph: nx.MultiDiGraph):
- for node, data in list(graph.nodes(data=True)):
- if 'op' in data and data['op'] == 'Placeholder':
- graph.node[node]['is_input'] = False
+ graph.node[placeholder]['is_input'] = is_input
def check_input(graph: nx.MultiDiGraph, node_name: str):
@@ -912,120 +907,240 @@ def split_node_in_port(node_id: str):
node_name = separator.join(parts[1:])
try:
port = int(parts[0])
- return (node_name, port)
+ return node_name, port
except ValueError as err:
log.warning('Didn\'t recognize port:node format for "{}" because port is not an integer.'.format(
node_id))
- return (node_id, None)
-
-
-def add_input_op(graph: nx.MultiDiGraph, node_id: str, port: int = 0, data: bool = False, shape=None):
- # we import it here because Op imports add_attrs_props and update_ie_fields from this file
+ return node_id, None
+
+
+def add_input_op_input_port_without_data(graph: nx.MultiDiGraph, node_id: str, input_op, edge_attrs: dict):
+ input_node = input_op.create_node()
+ graph.add_edge(input_node.id, node_id, **edge_attrs)
+ log.debug('Input: {} for node {}'.format(input_node.id, node_id))
+ log.debug("Add edge from {} to {}".format(input_node.id, node_id))
+ return input_node.id
+
+
+def add_input_op_input_port_with_data(graph: nx.MultiDiGraph, node_id: str, input_op, edge_attrs: dict):
+ input_data_node = input_op.create_node_with_data()
+ input_node = input_data_node.in_node()
+ graph.add_edge(input_data_node.id, node_id, **edge_attrs)
+ update_ie_fields(graph.node[input_node.id])
+ log.debug('Input: {} for node {}'.format(input_node.id, node_id))
+ log.debug("Add edge from {} to {}".format(input_node.id, input_data_node.id))
+ log.debug("Add edge from {} to {}".format(input_data_node.id, node_id))
+ return input_node.id
+
+
+def add_input_op_output_port_without_data(graph: nx.MultiDiGraph, node_id: str, input_op, port: int):
+ input_node = input_op.create_node()
+ # In this case it can be more than one out edge from one port and we should iterate over all output edges
+ for _, out_node, attrs in graph.out_edges(node_id, data=True):
+ if attrs['out'] == port:
+ # new out port = 0
+ attrs = attrs.copy()
+ attrs['out'] = 0
+ graph.add_edge(input_node.id, out_node, **attrs)
+ log.debug('Input: {} for node {} output port {}'.format(input_node.id, node_id, port))
+ log.debug("Add edge from {} to {}".format(input_node.id, out_node))
+ return input_node.id
+
+
+def add_input_op_output_port_with_data(graph: nx.MultiDiGraph, node_id: str, input_op, port: int):
+ # we assume that after op always data node
+ data_node = Node(graph, node_id).out_node(port)
+ assert data_node.has_valid('kind') and data_node.kind == 'data'
+ input_op.create_node_with_data(data_nodes=data_node)
+ input_node = data_node.in_node()
+ update_ie_fields(graph.node[input_node.id])
+ log.debug('Input: {} for node {}'.format(input_node.id, node_id))
+ log.debug("Add edge from {} to {}".format(input_node.id, node_id))
+ return input_node.id
+
+
+def add_input_op(graph: nx.MultiDiGraph, node_id: str, port: int = 0, data: bool = False, shape=None,
+ is_out_port: bool = False):
+ """
+ This function adds Input node to node with id==node_id to specified port (in or out defined with is_out_port).
+ :param graph: graph to operate on.
+ :param node_id: node_id for node to which we should add new input.
+ :param port: number of port of node_id node for adding input node.
+ :param data: flag that define whether data nodes is needed or not.
+ :param shape: shape for new input node.
+ :param is_out_port: flag that define whether port is output port or not.
+ :return: id of new Input operation
+ """
+ # We import it here because Op imports add_attrs_props and update_ie_fields from this file
from mo.ops.input import Input
- input = Input(graph, dict(shape=shape, initial_node_name=node_id, name='{}/placeholder_port_{}'.format(node_id, port)))
+ port_type = '_out' if is_out_port else ''
+ input_op = Input(graph, dict(shape=shape, initial_node_name=node_id,
+ name='{}/placeholder{}_port_{}'.format(node_id, port_type, port)))
edge_attrs = {'in': port, 'out': 0, 'in_attrs': ['in'], 'out_attrs': ['out'],
'fw_tensor_debug_info': [(Node(graph, node_id).soft_get('name'), port)],
'data_attrs': ['fw_tensor_debug_info']}
if not data:
- input_node = input.create_node()
- graph.add_edge(input_node.id, node_id, **edge_attrs)
- log.debug('Input: {} for node {}'.format(input_node.id, node_id))
- log.debug("Add edge from {} to {}".format(node_id, input_node.id))
- return input_node.id
+ if is_out_port:
+ new_input_id = add_input_op_output_port_without_data(graph=graph, node_id=node_id, input_op=input_op,
+ port=port)
+ else:
+ new_input_id = add_input_op_input_port_without_data(graph=graph, node_id=node_id, input_op=input_op,
+ edge_attrs=edge_attrs)
else:
- input_data_node = input.create_node_with_data()
- input = input_data_node.in_node()
- graph.add_edge(input_data_node.id, node_id, **edge_attrs)
- update_ie_fields(graph.node[input.id])
- log.debug('Input: {} for node {}'.format(input.id, node_id))
- log.debug("Add edge from {} to {}".format(input.id, input_data_node.id))
- log.debug("Add edge from {} to {}".format(input_data_node.id, node_id))
- return input.id
+ if is_out_port:
+ new_input_id = add_input_op_output_port_with_data(graph=graph, node_id=node_id, input_op=input_op,
+ port=port)
+ else:
+ new_input_id = add_input_op_input_port_with_data(graph=graph, node_id=node_id, input_op=input_op,
+ edge_attrs=edge_attrs)
+ return new_input_id
+
+
+def add_input_ops_helper_before_infer_input_port(graph: nx.MultiDiGraph, smart_node: Node, port: int, node_id: str,
+ shape: np.array, inputs: list, edges_to_remove: list):
+ n_inputs = len(smart_node.in_nodes())
+ if n_inputs > 1 and port is None:
+ raise Error(
+ 'Node {} has more than 1 input and input shapes were provided. Try not to provide input'
+ ' shapes or specify input port with port:node notation, where port is an integer. '
+ '{}'.format(smart_node.soft_get('name'), refer_to_faq_msg(30)))
+ port = port if port is not None else 0
+ edges_to_remove.append((smart_node.in_node(port).id, smart_node.id))
+ inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=False,
+ shape=shape))
+
+
+def add_input_ops_helper_after_infer_input_port(graph: nx.MultiDiGraph, smart_node: Node, port:int, node_id: str,
+ inputs: list, edges_to_remove: list):
+ n_inputs = len(smart_node.in_nodes())
+ if n_inputs > 1 and port is not None and port != 0:
+ raise Error(
+ 'Input port > 0 in --input is not supported if --input_shape is not provided. Node:'
+ ' "{}". Omit port index and all input ports will be replaced by placeholders. '
+ 'Or provide --input_shape. ' + refer_to_faq_msg(31), node_id)
+ port = port if port is not None else 0
+ in_node = smart_node.in_node(port)
+ shape = in_node['shape'] if 'shape' in in_node else None
+ if shape is None:
+ raise Error('Shape for tensor "{}" is not defined. Can not proceed.' + refer_to_faq_msg(41),
+ in_node.soft_get('name'))
+ inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=True,
+ shape=shape.copy()))
+ edges_to_remove.append((in_node.id, node_id))
+
+
+def add_input_ops_helper_before_infer_output_port(graph: nx.MultiDiGraph, port:int, node_id: str,
+ shape: np.array, inputs: list, edges_to_remove: list):
+ for u, v, edge_attrs in graph.out_edges(node_id, data=True):
+ if edge_attrs['out'] == port:
+ edges_to_remove.append((u, v)) # we need to remove all edges from this port
+ inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=False,
+ shape=shape, is_out_port=True))
+
+def add_input_ops_helper_after_infer_output_port(graph: nx.MultiDiGraph, smart_node: Node, port:int, node_id: str,
+ inputs: list, edges_to_remove: list):
+ out_node = smart_node.out_node(port)
+ shape = out_node['shape'] if 'shape' in out_node else None
+ if shape is None:
+ raise Error('Shape for tensor "{}" is not defined. Can not proceed.' + refer_to_faq_msg(41),
+ out_node.soft_get('name'))
+ inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=True,
+ shape=shape.copy(), is_out_port=True))
+ edges_to_remove.append((node_id, out_node.id))
def add_input_ops(graph: nx.MultiDiGraph, user_defined_inputs: dict, before_infer: bool):
+ """
+ This function add user defined input operations.
+ For cutting without port:
+ Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape':[1, 2]}} =>
+ Op_1, New_input (op=Placeholder, shape=[1, 2]) -> Op_2 -> output
+
+ For cutting with input port:
+ Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape':[1, 2], 'in': 0}} =>
+ Op_1, New_input (op=Placeholder, shape=[1, 2]) -> Op_2 -> output
+
+ For cutting with output port:
+ Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape':[1, 2], 'out': 0}} =>
+ Op_1 -> Op_2, New_input (op=Placeholder, shape=[1, 2]) -> output
+
+ For case with before_infer=False data nodes are added to this schemes.
+ """
inputs = []
- set_is_input_false(graph)
+ set_is_input(graph, get_nodes_with_attributes(graph, op='Placeholder'), False)
if user_defined_inputs is None:
inputs = get_nodes_with_attributes(graph, op='Placeholder')
else:
# cutting the net by inputs
assert isinstance(user_defined_inputs, dict)
- for key, values in user_defined_inputs.items():
- for value in values:
- if 'out' in value:
- raise Error(
- 'Cutting the net by output ports of nodes is forbidden. Can not cut the edge from output port '
- '{} of node {}'.format(value['out'], key))
-
edges_to_remove = []
for node_id in user_defined_inputs:
for port_and_shape_info in user_defined_inputs[node_id]:
if 'added' in port_and_shape_info and port_and_shape_info['added']:
continue
+
+ is_out_port = 'out' in port_and_shape_info # by default we assume input port or input node without port
shape = port_and_shape_info['shape'] if 'shape' in port_and_shape_info else None
- port = port_and_shape_info['in'] if 'in' in port_and_shape_info else None
smart_node = Node(graph, node_id)
- n_inputs = len(smart_node.in_nodes())
- # specific Placeholder cases
+
+ # Common port index check
+ if is_out_port:
+ port = port_and_shape_info['out'] # we check that 'out' in port_and_shape_info earlier
+ if port is None:
+ raise Error('Output port for input node {} should be specified, it cannot be None!'.format(
+ node_id
+ ))
+ if port is not None and port not in smart_node.out_nodes():
+ raise Error('Output port index {} is out of number of available output ports for node "{}". ' +
+ refer_to_faq_msg(29), port, node_id)
+ else:
+ port = port_and_shape_info['in'] if 'in' in port_and_shape_info else None
+ if port is not None and port not in smart_node.in_nodes():
+ raise Error('Input port index {} is out of number of available input ports for node "{}". ' +
+ refer_to_faq_msg(29), port, node_id)
+
+ # specific Placeholder case
if smart_node.op == 'Placeholder':
if port is not None:
- raise Error('Placeholder node "{}" doesn\'t have input port, but input port {} was provided. ' +
- refer_to_faq_msg(28), node_id, port)
+ raise Error(
+ 'Placeholder node "{}" doesn\'t have input port, but input port {} was provided. ' +
+ refer_to_faq_msg(28), node_id, port)
if shape is not None:
graph.node[node_id]['shape'] = shape
inputs.append(node_id)
port_and_shape_info['added'] = True
continue
- # common port index check
- if port is not None and port >= n_inputs:
- raise Error('Port index {} is out of number of available input ports for node "{}". ' +
- refer_to_faq_msg(29), port, n_inputs)
+
if before_infer:
if shape is None:
continue
- # we cut with shapes provided by user and there is no need to wait till infer
- if n_inputs > 1 and port is None:
- raise Error('Node {} has more than 1 input and input shapes were provided. Try not to provide input'
- ' shapes or specify input port with port:node notation, where port is an integer. '
- '{}'.format(smart_node.soft_get('name'), refer_to_faq_msg(30)))
- if port is None:
- assert n_inputs == 1
- port = 0
- edges_to_remove = [(smart_node.in_node(port).id, smart_node.id)]
- inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=False, shape=shape))
- port_and_shape_info['added'] = True
+ # We cut with shapes provided by user and there is no need to wait till infer
+ if is_out_port:
+ add_input_ops_helper_before_infer_output_port(graph, port, node_id, shape, inputs,
+ edges_to_remove)
+ else:
+ add_input_ops_helper_before_infer_input_port(graph, smart_node, port, node_id, shape, inputs,
+ edges_to_remove)
else:
-
- # we cut after infer and
- if n_inputs > 1 and port is not None and port != 0:
- raise Error('Input port > 0 in --input is not supported if --input_shape is not provided. Node:'
- ' "{}". Omit port index and all input ports will be replaced by placeholders. '
- 'Or provide --input_shape. ' + refer_to_faq_msg(31), node_id)
- for first, second, edge_attrs in list(graph.in_edges(node_id, data=True)):
- if graph.node[first]['value'] is not None:
- continue
- if port is not None and edge_attrs['in'] != port:
- continue
- shape = graph.node[first]['shape'].copy()
- if shape is None:
- raise Error('Shape for tensor "{}" is not defined. Can not proceed. ' + refer_to_faq_msg(41),
- first)
- port = port if port is not None else edge_attrs['in']
- inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=True, shape=shape))
- port_and_shape_info['added'] = True
- edges_to_remove.append((first, second))
- graph.remove_edges_from(edges_to_remove)
- edges_to_remove = []
+ # We cut after infer and we need inferred shapes in nodes
+ if is_out_port:
+ add_input_ops_helper_after_infer_output_port(graph, smart_node, port, node_id, inputs,
+ edges_to_remove)
+ else:
+ add_input_ops_helper_after_infer_input_port(graph, smart_node, port, node_id, inputs,
+ edges_to_remove)
+ port_and_shape_info['added'] = True
+ graph.remove_edges_from(edges_to_remove)
# if len(inputs) == 0, shapes were not provided for all nodes in input-cut request,
# we didn't cut inputs before infer, so this check is useless and invalid
if len(inputs):
- set_is_input_true(graph, inputs)
+ set_is_input(graph, inputs, True)
# Check if there are inputs that are not listed in user_defined_inputs and are needed to calculate outputs
outputs = get_nodes_with_attributes(graph, is_output=True)
+ visited = set()
for output_name in outputs:
- reverse_dfs(graph, output_name, check_input)
+ reverse_dfs(graph, output_name, check_input, visited)
return inputs
diff --git a/model-optimizer/mo/front/kaldi/extractor.py b/model-optimizer/mo/front/kaldi/extractor.py
index 2d4e9e1cd..f0e3b3b11 100644
--- a/model-optimizer/mo/front/kaldi/extractor.py
+++ b/model-optimizer/mo/front/kaldi/extractor.py
@@ -21,39 +21,27 @@ from mo.utils.utils import refer_to_faq_msg
def node_pb_arg(pb_extractor):
- return lambda node: pb_extractor(node.pb)
+ return lambda node: pb_extractor(node.parameters)
-kaldi_type_extractors = {
- # Data Layers
- 'globalinput': node_pb_arg(lambda x: dict(op='Placeholder', type='Input',
- infer=lambda node: single_output_infer(node, lambda n: n.shape))),
-
- # Utility Layers
- 'softmax': node_pb_arg(lambda _: dict(op='SoftMax', type='SoftMax', infer=copy_shape_infer)),
-}
+kaldi_type_extractors = {}
def common_kaldi_fields(node: Node) -> dict:
- pb = node.pb if node.pb else node
- layer_type = pb.type
+ layer_type = node.op
return {
'kind': 'op',
- 'name': pb.name,
- 'type': layer_type,
+ 'name': node.id,
'op': layer_type,
# generic code relies on op; it should be overridden by specific op extractor
'infer': None,
- 'precision': 'FP32' # TODO use real precision derived from the model
+ 'precision': 'FP32'
}
def kaldi_extractor(node: Node) -> (bool, dict):
- if node.has_valid('op') and node.op == 'Identity':
- return True, {}
result = common_kaldi_fields(node)
-
- layer_type = result['type'].lower()
+ layer_type = result['op']
if layer_type not in kaldi_type_extractors:
raise Error('Found unsupported layer {}. '.format(node.id) +
'Model Optimizer does not support this layer type: {}. '.format(layer_type) +
diff --git a/model-optimizer/mo/front/kaldi/extractors/add_shift_ext.py b/model-optimizer/mo/front/kaldi/extractors/add_shift_ext.py
new file mode 100644
index 000000000..ff5dff90b
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/add_shift_ext.py
@@ -0,0 +1,38 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.utils import read_binary_vector, read_learning_info
+from mo.ops.scale_shift import ScaleShiftOp
+
+
+class AddShiftFrontExtractor(FrontExtractorOp):
+ op = 'addshift'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ pb = node.parameters
+ read_learning_info(pb)
+ biases = read_binary_vector(pb)
+ bias_term = True
+ mapping_rule = {'bias_term': bias_term}
+ embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape))
+ embed_input(mapping_rule, 2, 'biases', biases)
+ ScaleShiftOp.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/memory_ext.py b/model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py
index 58bcf43cd..7900639b2 100644
--- a/model-optimizer/mo/front/kaldi/extractors/memory_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py
@@ -13,21 +13,17 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
+from mo.front.kaldi.utils import read_learning_info
+from mo.graph.graph import Node
-class MemoryFrontExtractor(FrontExtractorOp):
- op = 'memory'
+class AffineComponentFrontExtractor(FrontExtractorOp):
+ op = 'affinecomponent'
enabled = True
@staticmethod
- def extract(node):
- mapping_rule = {
- 'id': node.pb.id,
- 'index': node.pb.index,
- 'size': node.pb.size
- }
- Op.get_op_class_by_name('Memory').update_node_stat(node, mapping_rule)
- return __class__.enabled
+ def extract(node: Node):
+ read_learning_info(node.parameters)
+ return FixedAffineComponentFrontExtractor.extract(node)
diff --git a/model-optimizer/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py b/model-optimizer/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py
new file mode 100644
index 000000000..70a8c41f9
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py
@@ -0,0 +1,29 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
+from mo.front.kaldi.utils import read_learning_info
+from mo.graph.graph import Node
+
+
+class AffineComponentFrontExtractor(FrontExtractorOp):
+ op = 'affinecomponentpreconditionedonline'
+ enabled = True
+
+ @staticmethod
+ def extract(node: Node):
+ read_learning_info(node.parameters)
+ return FixedAffineComponentFrontExtractor.extract(node)
diff --git a/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext.py b/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext.py
index c335d92ac..8175fb108 100644
--- a/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext.py
@@ -13,10 +13,10 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
-from mo.front.caffe.extractors.utils import weights_biases
+from mo.front.caffe.extractors.utils import embed_input
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.front.kaldi.utils import read_binary_matrix, read_binary_vector, read_learning_info
+from mo.ops.inner_product import InnerProduct
class AffineTransformFrontExtractor(FrontExtractorOp):
@@ -25,11 +25,17 @@ class AffineTransformFrontExtractor(FrontExtractorOp):
@staticmethod
def extract(node):
+ pb = node.parameters
+ read_learning_info(pb)
+ weights, weights_shape = read_binary_matrix(pb)
+ biases = read_binary_vector(pb)
+
mapping_rule = {
- 'out-size': node.pb.num_output,
+ 'out-size': weights_shape[0],
'layout': 'NCHW'
}
- mapping_rule.update(weights_biases(node.pb.bias_term, node.pb))
+ embed_input(mapping_rule, 1, 'weights', weights)
+ embed_input(mapping_rule, 2, 'biases', biases)
- Op.get_op_class_by_name('FullyConnected').update_node_stat(node, mapping_rule)
+ InnerProduct.update_node_stat(node, mapping_rule)
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/concat_ext.py b/model-optimizer/mo/front/kaldi/extractors/concat_ext.py
index b0cd82214..9299c7c2d 100644
--- a/model-optimizer/mo/front/kaldi/extractors/concat_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/concat_ext.py
@@ -15,7 +15,7 @@
"""
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.ops.concat import Concat
class ConcatFrontExtractor(FrontExtractorOp):
@@ -25,8 +25,7 @@ class ConcatFrontExtractor(FrontExtractorOp):
@staticmethod
def extract(node):
mapping_rule = {
- 'axis': node.pb.axis
+ 'axis': 1
}
-
- Op.get_op_class_by_name('Concat').update_node_stat(node, mapping_rule)
+ Concat.update_node_stat(node, mapping_rule)
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/convolution_ext.py b/model-optimizer/mo/front/kaldi/extractors/convolution_ext.py
deleted file mode 100644
index 16998d83e..000000000
--- a/model-optimizer/mo/front/kaldi/extractors/convolution_ext.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import copy
-
-import numpy as np
-
-from mo.front.caffe.extractors.utils import weights_biases
-from mo.front.common.extractors.utils import layout_attrs
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.graph.graph import Node
-from mo.ops.convolution import Convolution
-from mo.ops.op import Op
-
-
-class Convolution1DFrontExtractor(FrontExtractorOp):
- op = 'convolution'
- enabled = True
-
- @staticmethod
- def extract(node: Node) -> bool:
- params = node.pb
- mapping_rule = {
- 'output': params.output,
- 'patch_stride': params.patch_stride,
- 'bias_term': None,
- 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
- 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
- 'dilation': int64_array([1, 1, 1, 1]),
- 'kernel': int64_array([1, 1, 1, params.kernel]),
- 'stride': int64_array([1, 1, 1, params.stride]),
- 'kernel_spatial': int64_array([1, params.kernel]),
- 'input_feature_channel': 1,
- 'output_feature_channel': 0,
- 'kernel_spatial_idx': [2,3],
- 'group': 1,
- 'reshape_kernel': True,
- }
- mapping_rule.update(layout_attrs())
- mapping_rule.update(weights_biases(params.bias_term, params))
- if len(params.blobs) > 1 and len(params.blobs[1]) > 0:
- mapping_rule['bias_addable'] = True
- else:
- mapping_rule['bias_addable'] = False
-
- Op.get_op_class_by_name('Convolution').update_node_stat(node, mapping_rule)
- return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/convolutional_1d_component_ext.py b/model-optimizer/mo/front/kaldi/extractors/convolutional_1d_component_ext.py
new file mode 100644
index 000000000..d77eeb33e
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/convolutional_1d_component_ext.py
@@ -0,0 +1,96 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.common.extractors.utils import layout_attrs
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace, find_next_tag
+from mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector
+from mo.graph.graph import Node
+from mo.ops.convolution import Convolution
+from mo.utils.error import Error
+from mo.utils.utils import refer_to_faq_msg
+
+
+class ConvolutionalComponentFrontExtractor(FrontExtractorOp):
+ op = 'convolutional1dcomponent' # Naming like in Kaldi
+ enabled = True
+
+ @staticmethod
+ def extract(node: Node) -> bool:
+ """
+ Extract conv parameters from node.parameters.
+ node.parameters like file descriptor object.
+ :param node: Convolution node
+ :return:
+ """
+ pb = node.parameters
+ read_learning_info(pb)
+
+ kernel = read_token_value(pb, b'<PatchDim>')
+ stride = read_token_value(pb, b'<PatchStep>')
+ patch_stride = read_token_value(pb, b'<PatchStride>')
+
+ token = find_next_tag(pb)
+ if token == '<AppendedConv>':
+ appended_conv = True
+ token = find_next_tag(pb)
+ if token != '<FilterParams>':
+ raise Error('Can not load token {} from Kaldi model'.format(token) +
+ refer_to_faq_msg(94))
+ collect_until_whitespace(pb)
+ weights, weights_shape = read_binary_matrix(pb)
+
+ collect_until_whitespace(pb)
+ biases = read_binary_vector(pb)
+
+ if (patch_stride - kernel) % stride != 0:
+ raise Error(
+ 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
+ refer_to_faq_msg(93))
+
+ output = biases.shape[0]
+ if weights_shape[0] != output:
+ raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
+ refer_to_faq_msg(93))
+
+ mapping_rule = {
+ 'output': output,
+ 'patch_stride': patch_stride,
+ 'bias_term': None,
+ 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
+ 'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
+ 'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
+ 'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
+ 'stride': np.array([1, 1, 1, stride], dtype=np.int64),
+ 'kernel_spatial': np.array([1, kernel], dtype=np.int64),
+ 'input_feature_channel': 1,
+ 'output_feature_channel': 0,
+ 'kernel_spatial_idx': [2, 3],
+ 'group': 1,
+ 'reshape_kernel': True,
+ 'appended_conv': appended_conv
+ }
+
+ mapping_rule.update(layout_attrs())
+ embed_input(mapping_rule, 1, 'weights', weights)
+ embed_input(mapping_rule, 2, 'biases', biases)
+
+ mapping_rule['bias_addable'] = len(biases) > 0
+
+ Convolution.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext.py b/model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext.py
new file mode 100644
index 000000000..21a1e3350
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext.py
@@ -0,0 +1,88 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.common.extractors.utils import layout_attrs
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace
+from mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector
+from mo.graph.graph import Node
+from mo.ops.convolution import Convolution
+from mo.utils.error import Error
+from mo.utils.utils import refer_to_faq_msg
+
+
+class ConvolutionalComponentFrontExtractor(FrontExtractorOp):
+ op = 'convolutionalcomponent' # Naming like in Kaldi
+ enabled = True
+
+ @staticmethod
+ def extract(node: Node) -> bool:
+ """
+ Extract conv parameters from node.parameters.
+ node.parameters like file descriptor object.
+ :param node: Convolution node
+ :return:
+ """
+ pb = node.parameters
+ kernel = read_token_value(pb, b'<PatchDim>')
+ stride = read_token_value(pb, b'<PatchStep>')
+ patch_stride = read_token_value(pb, b'<PatchStride>')
+
+ read_learning_info(pb)
+
+ collect_until_whitespace(pb)
+ weights, weights_shape = read_binary_matrix(pb)
+
+ collect_until_whitespace(pb)
+ biases = read_binary_vector(pb)
+
+ if (patch_stride - kernel) % stride != 0:
+ raise Error(
+ 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
+ refer_to_faq_msg(93))
+
+ output = biases.shape[0]
+ if weights_shape[0] != output:
+ raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
+ refer_to_faq_msg(93))
+
+ mapping_rule = {
+ 'output': output,
+ 'patch_stride': patch_stride,
+ 'bias_term': None,
+ 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
+ 'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
+ 'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
+ 'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
+ 'stride': np.array([1, 1, 1, stride], dtype=np.int64),
+ 'kernel_spatial': np.array([1, kernel], dtype=np.int64),
+ 'input_feature_channel': 1,
+ 'output_feature_channel': 0,
+ 'kernel_spatial_idx': [2, 3],
+ 'group': 1,
+ 'reshape_kernel': True,
+ }
+
+ mapping_rule.update(layout_attrs())
+ embed_input(mapping_rule, 1, 'weights', weights)
+ embed_input(mapping_rule, 2, 'biases', biases)
+
+ mapping_rule['bias_addable'] = len(biases) > 0
+
+ Convolution.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/inner_product_ext.py b/model-optimizer/mo/front/kaldi/extractors/copy_ext.py
index 0ec3f5726..3348ef14c 100644
--- a/model-optimizer/mo/front/kaldi/extractors/inner_product_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/copy_ext.py
@@ -14,22 +14,27 @@
limitations under the License.
"""
-from mo.front.caffe.extractors.utils import weights_biases
-from mo.front.common.extractors.utils import layout_attrs
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.front.kaldi.loader.utils import read_binary_integer32_token, read_blob
+from mo.ops.permute import Permute
-class InnerProductFrontExtractor(FrontExtractorOp):
- op = 'fullyconnected'
+class CopyFrontExtractor(FrontExtractorOp):
+ op = 'copy'
enabled = True
@staticmethod
def extract(node):
- mapping_rule = {
- 'out-size': node.pb.num_output
+ pb = node.parameters
+ weights_size = read_binary_integer32_token(pb)
+ weights = read_blob(pb, weights_size, dtype=np.int32) - 1
+ attrs = {
+ 'infer': copy_shape_infer
}
- mapping_rule.update(layout_attrs())
- mapping_rule.update(weights_biases(node.pb.bias_term, node.pb))
- Op.get_op_class_by_name('FullyConnected').update_node_stat(node, mapping_rule)
+ embed_input(attrs, 1, 'indexes', weights)
+ Permute.update_node_stat(node, attrs)
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext.py b/model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext.py
new file mode 100644
index 000000000..eee267f16
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext.py
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import find_next_tag, read_placeholder
+from mo.front.kaldi.utils import read_binary_matrix, read_binary_vector
+from mo.ops.inner_product import InnerProduct
+from mo.utils.error import Error
+
+
+class FixedAffineComponentFrontExtractor(FrontExtractorOp):
+ op = 'fixedaffinecomponent'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ pb = node.parameters
+ tag = find_next_tag(pb)
+ if tag != '<LinearParams>':
+ raise Error('FixedAffineComponent must contain LinearParams')
+ read_placeholder(pb, 1)
+ weights, weights_shape = read_binary_matrix(pb)
+ tag = find_next_tag(pb)
+ read_placeholder(pb, 1)
+ if tag != '<BiasParams>':
+ raise Error('FixedAffineComponent must contain BiasParams')
+ biases = read_binary_vector(pb)
+
+ mapping_rule = {
+ 'out-size': weights_shape[0],
+ 'layout': 'NCHW'
+ }
+ embed_input(mapping_rule, 1, 'weights', weights)
+ embed_input(mapping_rule, 2, 'biases', biases)
+
+ InnerProduct.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/lstm_projected_streams_ext.py b/model-optimizer/mo/front/kaldi/extractors/lstm_projected_streams_ext.py
new file mode 100644
index 000000000..09e8061d3
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/lstm_projected_streams_ext.py
@@ -0,0 +1,68 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from extensions.ops.lstm_cell import LSTMCell
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import collect_until_token, collect_until_whitespace, get_uint32
+from mo.front.kaldi.utils import read_binary_matrix, read_binary_vector
+
+
+class LSTMProjectedStreamsFrontExtractor(FrontExtractorOp):
+ op = 'lstmprojectedstreams'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ clip_value = 50
+ pb = node.parameters
+ res = collect_until_whitespace(pb)
+ if res == b'<CellClip>':
+ clip_value = get_uint32(pb.read(4))
+ collect_until_token(pb, b'FM')
+ gifo_x_weights, gifo_x_weights_shape = read_binary_matrix(pb, False)
+ gifo_r_weights, gifo_r_weights_shape = read_binary_matrix(pb)
+ gifo_biases = read_binary_vector(pb)
+ input_gate_weights = read_binary_vector(pb)
+ forget_gate_weights = read_binary_vector(pb)
+ output_gate_weights = read_binary_vector(pb)
+
+ projection_weights, projection_weights_shape = read_binary_matrix(pb)
+
+ mapping_rule = {'gifo_x_weights_shape': gifo_x_weights_shape,
+ 'gifo_r_weights_shape': gifo_r_weights_shape,
+ 'projection_weights_shape': projection_weights_shape,
+ 'clip_value': clip_value
+ }
+
+ embed_input(mapping_rule, 1, 'gifo_x_weights', gifo_x_weights)
+ embed_input(mapping_rule, 2, 'gifo_r_weights', gifo_r_weights)
+ embed_input(mapping_rule, 3, 'gifo_biases', gifo_biases)
+ embed_input(mapping_rule, 4, 'input_gate_weights', input_gate_weights)
+ embed_input(mapping_rule, 5, 'forget_gate_weights', forget_gate_weights)
+ embed_input(mapping_rule, 6, 'output_gate_weights', output_gate_weights)
+ embed_input(mapping_rule, 7, 'projection_weights', projection_weights)
+
+ LSTMCell.update_node_stat(node, mapping_rule)
+ return __class__.enabled
+
+
+class LSTMProjectedFrontExtractor(FrontExtractorOp):
+ op = 'lstmprojected'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ return LSTMProjectedStreamsFrontExtractor.extract(node)
diff --git a/model-optimizer/mo/front/kaldi/extractors/max_pooling_ext.py b/model-optimizer/mo/front/kaldi/extractors/max_pooling_ext.py
new file mode 100644
index 000000000..0e38dd33e
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/max_pooling_ext.py
@@ -0,0 +1,60 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from mo.front.common.extractors.utils import layout_attrs
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace, collect_until_token, \
+ read_binary_integer32_token, find_next_tag, read_placeholder
+from mo.ops.pooling import Pooling
+from mo.utils.error import Error
+
+
+class MaxPoolingComponentFrontExtractor(FrontExtractorOp):
+ op = 'maxpoolingcomponent'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ pb = node.parameters
+ collect_until_token(pb, b'<PoolSize>')
+ kernel = read_binary_integer32_token(pb)
+ tag = find_next_tag(pb)
+ if tag == '<PoolStep>':
+ read_placeholder(pb, 1)
+ stride = read_binary_integer32_token(pb)
+ pool_step = stride
+ pool_stride = read_token_value(pb, b'<PoolStride>')
+ elif tag == '<PoolStride>':
+ stride = 1
+ pool_step = None
+ read_placeholder(pb, 1)
+ pool_stride = read_binary_integer32_token(pb)
+ else:
+ raise Error('Can not extract parameters for {}'.format(node))
+
+ mapping_rule = {
+ 'window': np.array([1, 1, 1, kernel], dtype=np.int64),
+ 'stride': np.array([1, 1, stride, stride], dtype=np.int64),
+ 'pool_stride': pool_stride,
+ 'pool_step': pool_step,
+ 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
+ 'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
+ 'pool_method': 'max',
+ }
+ mapping_rule.update(layout_attrs())
+ Pooling.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py b/model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py
new file mode 100644
index 000000000..4d1e9e930
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py
@@ -0,0 +1,43 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.caffe.extractors.utils import embed_input
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import read_binary_integer32_token, collect_until_token
+from mo.ops.scale_shift import ScaleShiftOp
+
+
+class NormalizeComponentFrontExtractor(FrontExtractorOp):
+ op = 'normalizecomponent'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ pb = node.parameters
+ collect_until_token(pb, b'<Dim>')
+ dim = read_binary_integer32_token(pb)
+ target_rms = 1
+ d_scaled = dim * target_rms ** 2
+ in_norm = np.zeros([dim], np.float64)
+ in_norm += 1.0 / d_scaled
+ in_norm = np.maximum(in_norm, 2. ** (-66))
+ in_norm = np.power(in_norm, -0.5)
+ attrs = {}
+ embed_input(attrs, 1, 'weights', in_norm)
+ ScaleShiftOp.update_node_stat(node, attrs)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/pooling_ext.py b/model-optimizer/mo/front/kaldi/extractors/pooling_ext.py
deleted file mode 100644
index 44c64a32d..000000000
--- a/model-optimizer/mo/front/kaldi/extractors/pooling_ext.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from mo.front.common.extractors.utils import layout_attrs
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
-
-
-class PoolingFrontExtractor(FrontExtractorOp):
- op = 'pooling'
- enabled = True
-
- @staticmethod
- def extract(node):
- mapping_rule = {
- 'window': int64_array([1, 1, 1, node.pb.kernel]),
- 'stride': int64_array([1, 1, node.pb.stride, node.pb.stride]),
- 'pool_stride': node.pb.pool_stride,
- 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
- 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
- }
- mapping_rule.update(layout_attrs())
- Op.get_op_class_by_name('Pooling').update_node_stat(node, mapping_rule)
- return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/clamp_ext.py b/model-optimizer/mo/front/kaldi/extractors/rectified_linear_component_ext.py
index 80157db80..713db4beb 100644
--- a/model-optimizer/mo/front/kaldi/extractors/clamp_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/rectified_linear_component_ext.py
@@ -15,19 +15,14 @@
"""
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.ops.relu import ReLU
-class ClampFrontExtractor(FrontExtractorOp):
- op = 'clamp'
+class RectifiedLinearComponentFrontExtractor(FrontExtractorOp):
+ op = 'rectifiedlinearcomponent'
enabled = True
@staticmethod
def extract(node):
- mapping_rule = {
- 'min': node.pb.min,
- 'max': node.pb.max,
- }
-
- Op.get_op_class_by_name('Clamp').update_node_stat(node, mapping_rule)
+ ReLU.update_node_stat(node, {})
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/scale_shift.py b/model-optimizer/mo/front/kaldi/extractors/rescale_ext.py
index 5ade29505..459e5582e 100644
--- a/model-optimizer/mo/front/kaldi/extractors/scale_shift.py
+++ b/model-optimizer/mo/front/kaldi/extractors/rescale_ext.py
@@ -14,19 +14,23 @@
limitations under the License.
"""
-from mo.front.caffe.extractors.utils import weights_biases
+from mo.front.caffe.extractors.utils import embed_input
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.front.kaldi.loader.utils import collect_until_whitespace, read_binary_integer32_token
+from mo.front.kaldi.utils import read_binary_vector, read_learning_info
+from mo.ops.scale_shift import ScaleShiftOp
-class ScaleShiftFrontExtractor(FrontExtractorOp):
- op = 'scaleshift'
+class RescaleFrontExtractor(FrontExtractorOp):
+ op = 'rescale'
enabled = True
@staticmethod
def extract(node):
+ pb = node.parameters
+ read_learning_info(pb)
+ weights = read_binary_vector(pb)
mapping_rule = {}
- mapping_rule.update(weights_biases(node.pb.bias_term, node.pb))
- # update the attributes of the node
- Op.get_op_class_by_name('ScaleShift').update_node_stat(node, mapping_rule)
+ embed_input(mapping_rule, 1, 'weights', weights)
+ ScaleShiftOp.update_node_stat(node, mapping_rule)
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/reshape.py b/model-optimizer/mo/front/kaldi/extractors/reshape.py
deleted file mode 100644
index cf47f67ab..000000000
--- a/model-optimizer/mo/front/kaldi/extractors/reshape.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-import numpy as np
-
-from mo.front.common.partial_infer.reshape import tf_reshape_shape_infer
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.graph.graph import Node
-from mo.ops.op import Op
-from mo.ops.reshape import Reshape
-
-
-class ReshapeFrontExtractor(FrontExtractorOp):
- op = 'reshape'
- enabled = True
-
- @staticmethod
- def extract(node):
- mapping_rule = {
- 'dim': node.pb.dim if hasattr(node.pb, 'dim') else None,
- 'axis': node.pb.axis,
- 'num_axes': node.pb.num_axes,
- 'infer': ReshapeFrontExtractor.infer
- }
- Op.get_op_class_by_name('Reshape').update_node_stat(node, mapping_rule)
- return __class__.enabled
-
- @staticmethod
- def infer(node: Node):
- in_node = node.in_node().in_node() # prev_layer_node -> data -> this_node
- input_shape = node.in_node().shape
- # Kaldi Reshape hugely depends on the layers that precedes or succeeds
- # Convolution/Pooling layers. Therefore there are 4 cases with different
- # partial inference.
- batch = input_shape[0]
- if in_node.type == 'Convolution' or in_node.type == 'Pooling':
- output_spatial = int64_array([batch, np.prod(input_shape[1:])])
- return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
- # Supports ONLY NCHW and NH layouts
- spatial_shape = input_shape[1]
- if input_shape.shape == (4,):
- spatial_shape = input_shape[2:3]
- out_node = node.out_node().out_node()
- if out_node.type == 'Convolution':
- output_spatial = int64_array([batch, int(np.ceil(spatial_shape / out_node.patch_stride)), 1, out_node.patch_stride])
- return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
- elif out_node.type == 'Pooling':
- output_spatial = int64_array([batch, out_node.pool_stride, 1, int(np.ceil(spatial_shape / out_node.pool_stride))])
- return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
-
- @staticmethod
- def set_shape_and_dim(node: Node, reshape_dim):
- Reshape.update_node_stat(node, {'dim': reshape_dim})
- node.out_node().shape = reshape_dim
-
diff --git a/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext.py b/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext.py
index 31f284574..a68ad4f3f 100644
--- a/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext.py
@@ -18,7 +18,7 @@ from mo.front.extractor import FrontExtractorOp
from mo.ops.activation import Activation
-class Sigmoid(FrontExtractorOp):
+class SigmoidFrontExtractor(FrontExtractorOp):
op = 'sigmoid'
enabled = True
diff --git a/model-optimizer/mo/front/kaldi/extractors/slice_ext.py b/model-optimizer/mo/front/kaldi/extractors/slice_ext.py
index 1acfb9bba..4235c0dbc 100644
--- a/model-optimizer/mo/front/kaldi/extractors/slice_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/slice_ext.py
@@ -13,9 +13,12 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+import numpy as np
+
from mo.front.common.partial_infer.slice import caffe_slice_infer
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.front.kaldi.loader.utils import read_binary_integer32_token, read_blob
+from mo.ops.slice import Slice
class SliceFrontExtractor(FrontExtractorOp):
@@ -24,12 +27,15 @@ class SliceFrontExtractor(FrontExtractorOp):
@staticmethod
def extract(node):
+ pb = node.parameters
+ num_slice_points = read_binary_integer32_token(pb)
mapping_rule = {
- 'axis': node.pb.axis if hasattr(node.pb, 'axis') else 1,
- 'slice_point': node.pb.slice_point,
+ 'axis': 1,
+ 'slice_point': read_blob(pb, num_slice_points, np.int32),
'batch_dims': 0,
'spatial_dims': 1,
'infer': caffe_slice_infer
}
- Op.get_op_class_by_name('Slice').update_node_stat(node, mapping_rule)
+ node.parameters.close()
+ Slice.update_node_stat(node, mapping_rule)
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/eltwise_ext.py b/model-optimizer/mo/front/kaldi/extractors/softmax_ext.py
index b44fe70e3..da9f0a1eb 100644
--- a/model-optimizer/mo/front/kaldi/extractors/eltwise_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/softmax_ext.py
@@ -13,21 +13,25 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
+from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.ops.softmax import Softmax
-class EltwiseFrontExtractor(FrontExtractorOp):
- op = 'eltwise'
+class SoftmaxComponentFrontExtractor(FrontExtractorOp):
+ op = 'softmaxcomponent'
enabled = True
@staticmethod
def extract(node):
- mapping_rule = {
- 'operation': node.pb.operation,
- }
- # update the attributes of the node
- Op.get_op_class_by_name('Eltwise').update_node_stat(node, mapping_rule)
- return __class__.enabled
+ return SoftmaxFrontExtractor.extract(node)
+
+
+class SoftmaxFrontExtractor(FrontExtractorOp):
+ op = 'softmax'
+ enabled = True
+ @staticmethod
+ def extract(node):
+ Softmax.update_node_stat(node, {'infer': copy_shape_infer})
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/splice_component_ext.py b/model-optimizer/mo/front/kaldi/extractors/splice_component_ext.py
new file mode 100644
index 000000000..47cbc23f6
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/extractors/splice_component_ext.py
@@ -0,0 +1,53 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from extensions.ops.splice import Splice
+from mo.front.extractor import FrontExtractorOp
+from mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, read_binary_integer32_token, \
+ collect_until_whitespace
+from mo.front.kaldi.utils import read_binary_vector
+from mo.utils.error import Error
+
+
+class SpliceFrontExtractor(FrontExtractorOp):
+ op = 'splicecomponent'
+ enabled = True
+
+ @staticmethod
+ def extract(node):
+ pb = node.parameters
+ mapping_rule = {
+ 'context': list()
+ }
+ tag = find_next_tag(pb)
+ if tag == '<LeftContext>':
+ read_placeholder(pb, 1)
+ l_context = read_binary_integer32_token(pb)
+ tag = find_next_tag(pb)
+ if tag != '<RightContext>':
+ raise Error('Unknown token {} in SpliceComponent node {}'.format(tag, node.id))
+ read_placeholder(pb, 1)
+ r_context = read_binary_integer32_token(pb)
+ for i in range(-l_context, r_context + 1):
+ mapping_rule['context'].append(i)
+ elif tag == '<Context>':
+ collect_until_whitespace(pb)
+ mapping_rule['context'] = read_binary_vector(pb, False, dtype=np.int32)
+ else:
+ raise Error('Unknown token {} in SpliceComponent node {}'.format(tag, node.id))
+ Splice.update_node_stat(node, mapping_rule)
+ return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/split.py b/model-optimizer/mo/front/kaldi/extractors/split.py
deleted file mode 100644
index 6e85cc889..000000000
--- a/model-optimizer/mo/front/kaldi/extractors/split.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-from mo.front.common.partial_infer.slice import caffe_slice_infer
-from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
-
-
-class SplitFrontExtractor(FrontExtractorOp):
- op = 'split'
- enabled = True
-
- @staticmethod
- def extract(node):
- mapping_rule = {
- 'axis': node.pb.axis if node.pb.axis else 1,
- 'num_split': node.pb.num_split,
- }
- Op.get_op_class_by_name('Split').update_node_stat(node, mapping_rule)
- return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/extractors/activation_ext.py b/model-optimizer/mo/front/kaldi/extractors/tanh_component_ext.py
index 3125387b0..e67f9c410 100644
--- a/model-optimizer/mo/front/kaldi/extractors/activation_ext.py
+++ b/model-optimizer/mo/front/kaldi/extractors/tanh_component_ext.py
@@ -15,18 +15,14 @@
"""
from mo.front.extractor import FrontExtractorOp
-from mo.ops.op import Op
+from mo.ops.activation import Activation
-class ActivationFrontExtractor(FrontExtractorOp):
- op = 'activation'
+class TanhFrontExtractor(FrontExtractorOp):
+ op = 'tanhcomponent'
enabled = True
@staticmethod
def extract(node):
- mapping_rule = {
- 'operation': node.pb.operation
- }
-
- Op.get_op_class_by_name('Activation').update_node_stat(node, mapping_rule)
+ Activation.update_node_stat(node, {'operation': 'tanh'})
return __class__.enabled
diff --git a/model-optimizer/mo/front/kaldi/loader.py b/model-optimizer/mo/front/kaldi/loader.py
deleted file mode 100644
index 7638a6c5a..000000000
--- a/model-optimizer/mo/front/kaldi/loader.py
+++ /dev/null
@@ -1,544 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-import os
-from io import IOBase
-
-from mo.front.kaldi.extractor import common_kaldi_fields
-from mo.front.kaldi.utils import get_uint32, get_uint16, KaldiNode
-from mo.graph.graph import unique_id, Node
-from mo.utils.error import Error
-
-import networkx as nx
-import numpy as np
-
-from mo.utils.utils import refer_to_faq_msg
-
-
-def read_placeholder(file_desc):
- """
- Placeholder is like: |FW | or |FV | - they take 3 spaces and appear before a matrix or a vector respectively
- :param file_path:
- :return:
- """
- file_desc.read(3)
-
-
-def read_binary_matrix(file_desc, skip: bool = False):
- if not skip:
- read_placeholder(file_desc)
- rows_number = read_binary_integer_token(file_desc)
- cols_number = read_binary_integer_token(file_desc)
- # to compare: ((float *)a->buffer())[10]
- return read_blob(file_desc, rows_number * cols_number), (rows_number, cols_number)
-
-
-def read_binary_vector(file_desc):
- read_placeholder(file_desc)
- elements_number = read_binary_integer_token(file_desc)
- return read_blob(file_desc, elements_number)
-
-
-def collect_until_token(f, token):
- while True:
- # usually there is the following structure <CellDim> DIM<ClipGradient> VALUEFM
- res = collect_until_whitespace(f)
- if res[-2:] == token:
- return
-
-
-class KaldiLayer:
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False,
- type='General'):
- self.f = f
- self.graph = graph
- self.type = type
- self.layer_i = layer_i
- self.layer_o = layer_o
- self.layer_name = layer_name
- self.prev_layer_name = prev_layer_name
- self.is_switch_board_style = is_switch_board_style
- self.attrs = dict(type=type)
- self.weights = None
- self.biases = None
-
- def construct_sub_graph(self):
- return add_single_node(self.graph, self.layer_name, self.prev_layer_name, self.attrs, self.weights, self.biases)
-
- def load_build(self):
- return self.construct_sub_graph()
-
-
-class SigmoidKaldiLayer(KaldiLayer):
- def load_build(self):
- self.attrs.update({
- 'operation': 'sigmoid'
- })
- return self.construct_sub_graph()
-
-
-class AffineTransformKaldiLayer(KaldiLayer):
- def load_weights_biases_attrs(self):
- collect_until_token(self.f, b'FM')
- self.weights, weights_shape = read_binary_matrix(self.f, skip=True)
- self.biases = read_binary_vector(self.f)
- self.attrs = {
- 'num_output': self.layer_o,
- 'bias_term': True,
- 'weights_shape': weights_shape,
- 'type': 'AffineTransform'
- }
-
- def load_build(self):
- self.load_weights_biases_attrs()
- return self.construct_sub_graph()
-
-
-class LSTMProjectedStreamsKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False,
- type='General'):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, type)
- self.clip_value = None
- self.gifo_x_weights = None
- self.gifo_r_weights = None
- self.gifo_biases = None
- self.input_gate_weights = None
- self.forget_gate_weights = None
- self.output_gate_weights = None
- self.projection_weights = None
- self.gifo_x_weights_shape = None
- self.gifo_r_weights_shape = None
- self.projection_weights_shape = None
-
- def load_weights_biases_attrs(self):
- self.clip_value = 1 if self.is_switch_board_style else 50
-
- if not self.is_switch_board_style:
- res = collect_until_whitespace(self.f) # <CellClip>
- if res == b'<CellClip>':
- self.clip_value = get_uint32(self.f.read(4))
-
- collect_until_token(self.f, b'FM')
-
- self.gifo_x_weights, self.gifo_x_weights_shape = read_binary_matrix(self.f, skip=True)
- self.gifo_r_weights, self.gifo_r_weights_shape = read_binary_matrix(self.f)
- self.gifo_biases = read_binary_vector(self.f)
- self.input_gate_weights = read_binary_vector(self.f)
- self.forget_gate_weights = read_binary_vector(self.f)
- self.output_gate_weights = read_binary_vector(self.f)
-
- if not self.is_switch_board_style:
- self.projection_weights, self.projection_weights_shape = read_binary_matrix(self.f)
-
- def load_build(self):
- self.load_weights_biases_attrs()
- return self.construct_sub_graph()
-
- def construct_sub_graph(self):
- self.attrs.update(dict(gifo_x_weights=self.gifo_x_weights, gifo_r_weights=self.gifo_r_weights,
- gifo_biases=self.gifo_biases, input_gate_weights=self.input_gate_weights,
- forget_gate_weights=self.forget_gate_weights,
- clip_value=self.clip_value,
- output_gate_weights=self.output_gate_weights,
- projection_weights=self.projection_weights,
- gifo_x_weights_shape=self.gifo_x_weights_shape,
- gifo_r_weights_shape=self.gifo_r_weights_shape,
- projection_weights_shape=self.projection_weights_shape,
- type='LSTMProjectedStreams'))
- return add_single_node(self.graph, self.layer_name, self.prev_layer_name, self.attrs, self.weights, self.biases)
-
-
-class ConvolutionKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, 'Convolution')
- self.kernel = None
- self.stride = None
- self.output = None
- self.weights_shape = None
- self.shape = None
- self.patch_stride = None
-
- def load_build(self):
- '''
- /* Prepare feature patches, the layout is:
- 274 |----------|----------|----------|---------| (in = spliced frames)
- 275 xxx xxx xxx xxx (x = selected elements)
- 276
- 277 xxx : patch dim
- 278 xxx
- 279 ^---: patch step
- 280 |----------| : patch stride
- 281
- 282 xxx-xxx-xxx-xxx : filter dim
- 283
- '''
- self.kernel = read_token_value(self.f, b'<PatchDim>')
- self.stride = read_token_value(self.f, b'<PatchStep>')
- self.patch_stride = read_token_value(self.f, b'<PatchStride>')
-
- if (self.patch_stride - self.kernel) % self.stride != 0:
- raise Error(
- 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
- refer_to_faq_msg(93))
-
- do_loop = True
- while do_loop:
- self.f.read(1)
- first_char = self.f.read(1)
- self.f.seek(-2, os.SEEK_CUR)
- if first_char == b'L':
- read_token_value(self.f, b'<LearnRateCoef>')
- elif first_char == b'B':
- read_token_value(self.f, b'<BiasLearnRateCoef>')
- elif first_char == b'M':
- read_token_value(self.f, b'<MaxNorm>')
- elif first_char == b'!':
- read_token_value(self.f, b'<EndOfComponent>')
- do_loop = False
- else:
- do_loop = False
- self.load_weights_biases_attrs()
-
- self.output = self.biases.shape[0]
- if self.weights_shape[0] != self.output:
- raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
- refer_to_faq_msg(93))
- self.attrs.update({
- 'kernel': self.kernel,
- 'stride': self.stride,
- 'output': self.output,
- 'bias_term': True,
- 'patch_stride': self.patch_stride
- })
- return self.construct_sub_graph()
-
- def load_weights_biases_attrs(self):
- collect_until_whitespace(self.f)
- self.weights, self.weights_shape = read_binary_matrix(self.f)
- collect_until_whitespace(self.f)
- self.biases = read_binary_vector(self.f)
-
-
-class PoolingKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False,
- pool_method='Max'):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, 'Pooling')
- self.pad = 0
- self.window = None
- self.pool_method = pool_method
- self.stride = None
-
- def load_build(self):
- self.window = read_token_value(self.f, b'<PoolSize>')
- self.stride = read_token_value(self.f, b'<PoolStep>')
- pool_stride = read_token_value(self.f, b'<PoolStride>')
-
- self.attrs.update({
- 'kernel': self.window,
- 'stride': self.stride,
- 'pool_stride': pool_stride,
- 'pool_method': self.pool_method
- })
- return self.construct_sub_graph()
-
-
-class ScaleShiftKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False,
- weights=None):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, 'ScaleShift')
- self.weights = weights
- self.bias_term = False
-
- def load_build(self):
- if collect_until_whitespace(self.f) == b'<AddShift>':
- self.layer_o = read_binary_integer_token(self.f)
- self.layer_o = read_binary_integer_token(self.f)
- self.biases = read_binary_vector(self.f)
- self.bias_term = True
- self.attrs.update({'bias_term': self.bias_term})
- return self.construct_sub_graph()
-
-
-class RescaleKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, 'ScaleShift')
- self.weights = None
- self.bias_term = False
-
- def load_build(self):
- if self.f.read(1) == b'<':
- self.f.seek(-1, os.SEEK_CUR)
- read_token_value(self.f, b'<LearnRateCoef>')
- else:
- self.f.seek(-1, os.SEEK_CUR)
- self.weights = read_binary_vector(self.f)
- next_token = collect_until_whitespace(self.f)
- if next_token == b'<!EndOfComponent>':
- next_token = collect_until_whitespace(self.f)
- if next_token == b'<AddShift>':
- read_binary_integer_token(self.f) # input layer
- self.layer_o = read_binary_integer_token(self.f)
- if self.f.read(1) == b'<':
- self.f.seek(-1, os.SEEK_CUR)
- read_token_value(self.f, b'<LearnRateCoef>')
- else:
- self.f.seek(-1, os.SEEK_CUR)
- self.biases = read_binary_vector(self.f)
- self.bias_term = True
- self.attrs.update({'bias_term': self.bias_term})
- else:
- self.f.seek(-len(next_token), os.SEEK_CUR)
- return self.construct_sub_graph()
-
-
-class ParallelKaldiLayer(KaldiLayer):
- def __init__(self, f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style=False):
- super().__init__(f, graph, layer_i, layer_o, layer_name, prev_layer_name, is_switch_board_style, 'Parallel')
- self.output_nodes = []
- self.edge_attrs = {
- 'out': None,
- 'in': 0,
- 'name': None,
- 'fw_tensor_name': None,
- 'fw_tensor_port': None,
- 'in_attrs': ['in', 'name'],
- 'out_attrs': ['out', 'name'],
- 'data_attrs': ['fw_tensor_name', 'fw_tensor_port']
- }
-
- def load_build(self):
- nnet_count = read_token_value(self.f, b'<NestedNnetCount>')
- slice_id = add_single_node(self.graph, 'Slice', self.prev_layer_name,
- {'type': 'Slice', 'axis': 1, 'slice_point': []}, None, None)
- for i in range(nnet_count):
- read_token_value(self.f, b'<NestedNnet>')
- graph, shape = load_kaldi_nnet_model(self.f, None)
- input_nodes = [n for n in graph.nodes(data=True) if n[1]['type'] == 'GlobalInput']
- for input_node in input_nodes:
- shape_subgraph = input_node[1]['shape']
- if i != nnet_count - 1:
- self.graph.node[slice_id]['pb'].slice_point.append(shape_subgraph[1])
- graph.remove_node(input_node[0])
- mapping = {node: unique_id(self.graph, node) for node in graph.nodes(data=False) if node in self.graph}
- g = nx.relabel_nodes(graph, mapping)
- for val in mapping.values():
- g.node[val]['name'] = val
- self.graph.add_nodes_from(g.nodes(data=True))
- self.graph.add_edges_from(g.edges(data=True))
- sorted_nodes = tuple(nx.topological_sort(g))
- self.edge_attrs['out'] = i
- self.edge_attrs['name'] = sorted_nodes[0]
- self.edge_attrs['fw_tensor_name'] = slice_id
- self.edge_attrs['fw_tensor_port'] = sorted_nodes[0]
- self.graph.add_edge(slice_id, sorted_nodes[0], **self.edge_attrs)
- self.output_nodes.append(sorted_nodes[-1])
- end_token = collect_until_whitespace(self.f)
- if end_token != b'</ParallelComponent>':
- raise Error('Expected token `</ParallelComponent>`, has {}'.format(end_token) + refer_to_faq_msg(99))
- return self.construct_sub_graph()
-
- def construct_sub_graph(self):
- new_id = unique_id(self.graph, '{}_'.format('Concat'))
- layer = KaldiNode(new_id)
- layer.set_attrs(dict(axis=1))
- layer.type = 'Concat'
- self.graph.add_node(new_id, pb=layer, kind='op')
- self.graph.node[layer.name].update(common_kaldi_fields(Node(self.graph, layer.name)))
- self.edge_attrs['out'] = 0
- self.edge_attrs['name'] = layer.name
- self.edge_attrs['fw_tensor_port'] = layer.name
- for i, output_node in enumerate(self.output_nodes):
- self.edge_attrs['fw_tensor_name'] = output_node
- self.edge_attrs['in'] = i
- self.graph.add_edge(output_node, layer.name, **self.edge_attrs)
- return new_id
-
-
-def read_token_value(file, token: bytes = b'', value_type: type = np.uint32):
- getters = {
- np.uint32: read_binary_integer_token
- }
- current_token = collect_until_whitespace(file)
- if token != b'' and token != current_token:
- raise Error('Can not load token {} from Kaldi model'.format(token) +
- refer_to_faq_msg(94))
- return getters[value_type](file)
-
-
-def read_binary_integer_token(file_path):
- buffer_size = file_path.read(1)
- return get_uint32(file_path.read(buffer_size[0]))
-
-
-def collect_until_whitespace(file_path):
- res = b''
- while True:
- new_sym = file_path.read(1)
- if new_sym == b' ':
- break
- res += new_sym
- return res
-
-
-def read_blob(file_path, size):
- float_size = 4
- data = file_path.read(size * float_size)
- return np.fromstring(data, dtype='<f4')
-
-
-layer_weights_biases_attrs_getter = {
- 'affinetransform': AffineTransformKaldiLayer,
- 'sigmoid': lambda f, g, i, o, name, prev, style: KaldiLayer(f, g, i, o, name, prev, style, type='Sigmoid'),
- 'softmax': lambda f, g, i, o, name, prev, style: KaldiLayer(f, g, i, o, name, prev, style, type='SoftMax'),
- 'lstmprojectedstreams': LSTMProjectedStreamsKaldiLayer,
- 'lstmprojected': LSTMProjectedStreamsKaldiLayer,
- 'maxpoolingcomponent': PoolingKaldiLayer,
- 'convolutionalcomponent': ConvolutionKaldiLayer,
- 'rescale': RescaleKaldiLayer,
- 'parallelcomponent': ParallelKaldiLayer,
-}
-
-
-def add_single_node(graph, layer_name, prev_layer_name, attrs, weights, biases):
- new_id = unique_id(graph, '{}_'.format(layer_name))
-
- layer = KaldiNode(new_id)
- layer.set_weight(weights)
- layer.set_bias(biases)
- if attrs:
- layer.set_attrs(attrs)
-
- graph.add_node(layer.name, pb=layer, kind='op')
- graph.node[layer.name].update(common_kaldi_fields(Node(graph, layer.name)))
-
- edge_attrs = {
- 'out': 0,
- 'in': 0,
- 'name': layer.name,
- 'fw_tensor_debug_info': [(prev_layer_name, layer.name)], # debug anchor for a framework tensor name and port
- 'in_attrs': ['in', 'name'],
- 'out_attrs': ['out', 'name'],
- 'data_attrs': ['fw_tensor_debug_info']
- }
-
- graph.add_edge(prev_layer_name, layer.name, **edge_attrs)
-
- return new_id
-
-
-def find_first_tag(file):
- tag = b''
- while True:
- symbol = file.read(1)
- if tag == b'' and symbol != b'<':
- continue
- tag += symbol
- if symbol != b'>':
- continue
- return tag
-
-
-def find_first_component(file):
- while True:
- tag = find_first_tag(file)
- component_name = tag.decode('ascii').lower()
- if component_name[1:-1] in layer_weights_biases_attrs_getter.keys() or tag == b'</Nnet>' or tag == b'<EndOfComponent>':
- file.read(1) # read ' '
- return component_name
-
-
-def load_kaldi_nnet_model(nnet_path, check_sum: int = 16896):
- """
- Structure of the file is the following:
- magic-number(16896)<Nnet> <Next Layer Name> weights etc.
- :param nnet_path:
- :param check_sum:
- :return:
- """
- if isinstance(nnet_path, str):
- file = open(nnet_path, "rb")
- elif isinstance(nnet_path, IOBase):
- file = nnet_path
-
- # 1. check the file
- # first element is 16896<Nnet>
- if check_sum and get_uint16(file.read(2)) != check_sum:
- raise Error('File {} does not appear to be a Kaldi file (magic number does not match). ', nnet_path,
- refer_to_faq_msg(89)
- )
-
- while True:
- name = find_first_tag(file)
- if name == b'<Nnet>':
- file.read(1)
- break
- elif len(name) == 6:
- raise Error('Kaldi model should start with <Nnet> tag. ',
- refer_to_faq_msg(89))
- graph = nx.MultiDiGraph()
- input_name = 'Input'
- graph.add_node(input_name, pb=None, type='GlobalInput', name=input_name, shape=None, kind='op')
-
- prev_layer_name = input_name
- input_shapes = {}
-
- while True:
- """
- Typical structure of the layer
- <Layer> |Size of output value in bits|Actual value of output|Size of input value in bits|Actual value of input|\
- FM Matrix|FV Vector| </Layer>
- """
- layer_name = find_first_component(file)
- if layer_name == '</nnet>':
- break
- elif layer_name == '<!endofcomponent>':
- continue
- extracted_name = layer_name[1:-1]
-
- layer_o = read_binary_integer_token(file)
- layer_i = read_binary_integer_token(file)
-
- if prev_layer_name == 'Input':
- graph.node['Input']['shape'] = np.array([1, layer_i], dtype=np.int64)
-
- cls = layer_weights_biases_attrs_getter[extracted_name]
- cls_instance = cls(file, graph, layer_i, layer_o, extracted_name, prev_layer_name, False)
-
- prev_layer_name = cls_instance.load_build()
- return graph, input_shapes
-
-
-def read_counts_file(file_path):
- with open(file_path, 'r') as f:
- file_content = f.readlines()
- if len(file_content) > 1:
- raise Error('Expect counts file to be one-line file. ' +
- refer_to_faq_msg(90))
-
- counts_line = file_content[0].strip().replace('[', '').replace(']', '')
- try:
- counts = np.fromstring(counts_line, dtype=int, sep=' ')
- except TypeError:
- raise Error('Expect counts file to contain list of integers.' +
- refer_to_faq_msg(90))
- cutoff = 1.00000001e-10
- counts = [cutoff if count < cutoff else count for count in counts]
- scale = 1.0 / np.sum(counts)
- for idx, count in enumerate(counts):
- val = np.log(scale * count)
- if count == cutoff:
- val += np.iinfo(np.float32).max / 2
- counts[idx] = val
- return counts
diff --git a/model-optimizer/mo/front/kaldi/loader/__init__.py b/model-optimizer/mo/front/kaldi/loader/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/loader/__init__.py
diff --git a/model-optimizer/mo/front/kaldi/loader/loader.py b/model-optimizer/mo/front/kaldi/loader/loader.py
new file mode 100644
index 000000000..8bf9085a1
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/loader/loader.py
@@ -0,0 +1,215 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import io
+
+import numpy as np
+import struct
+from io import IOBase
+
+import networkx as nx
+import logging as log
+
+from mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
+ find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, collect_until_token, \
+ create_edge_attrs
+from mo.graph.graph import unique_id, Node
+from mo.utils.error import Error
+from mo.utils.utils import refer_to_faq_msg
+
+
+def read_counts_file(file_path):
+ with open(file_path, 'r') as f:
+ file_content = f.readlines()
+ if len(file_content) > 1:
+ raise Error('Expect counts file to be one-line file. ' +
+ refer_to_faq_msg(90))
+
+ counts_line = file_content[0].strip().replace('[', '').replace(']', '')
+ try:
+ counts = np.fromstring(counts_line, dtype=int, sep=' ')
+ except TypeError:
+ raise Error('Expect counts file to contain list of integers.' +
+ refer_to_faq_msg(90))
+ cutoff = 1.00000001e-10
+ cutoff_idxs = np.where(counts < cutoff)
+ counts[cutoff_idxs] = cutoff
+ scale = 1.0 / np.sum(counts)
+ counts = np.log(counts * scale)
+ counts[cutoff_idxs] += np.finfo(np.float32).max / 2
+ return counts
+
+
+def load_parallel_component(file_descr, graph: nx.MultiDiGraph, prev_layer_id):
+ """
+ Load ParallelComponent of the Kaldi model.
+ ParallelComponent contains parallel nested networks.
+ Slice is inserted before nested networks.
+ Outputs of nested networks concatenate with layer Concat.
+
+ :param file_descr: descriptor of the model file
+ :param graph: graph with the topology.
+ :param prev_layer_id: id of the input layers for parallel component layer
+ :return: id of the concat layer - last layer of the parallel component layers
+ """
+ nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
+ log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
+
+ slice_id = unique_id(graph, prefix='Slice')
+ graph.add_node(slice_id, parameters=None, op='slice', kind='op')
+
+ slice_node = Node(graph, slice_id)
+ graph.add_edge(prev_layer_id, slice_id, **create_edge_attrs(prev_layer_id, slice_id))
+ slices_points = []
+
+ outputs = []
+
+ for i in range(nnet_count):
+ read_token_value(file_descr, b'<NestedNnet>')
+ collect_until_token(file_descr, b'<Nnet>')
+ g, shape = load_kalid_nnet1_model(file_descr, 'Nested_net_{}'.format(i))
+ input_nodes = [n for n in graph.nodes(data=True) if n[1]['op'] == 'Input']
+ if i != nnet_count - 1:
+ slices_points.append(shape[1])
+ g.remove_node(input_nodes[0][0])
+ mapping = {node: unique_id(graph, node) for node in g.nodes(data=False) if node in graph}
+ g = nx.relabel_nodes(g, mapping)
+ for val in mapping.values():
+ g.node[val]['name'] = val
+ graph.add_nodes_from(g.nodes(data=True))
+ graph.add_edges_from(g.edges(data=True))
+ sorted_nodes = tuple(nx.topological_sort(g))
+ edge_attrs = create_edge_attrs(slice_id, sorted_nodes[0])
+ edge_attrs['out'] = i
+ graph.add_edge(slice_id, sorted_nodes[0], **edge_attrs)
+ outputs.append(sorted_nodes[-1])
+ packed_sp = struct.pack("B", 4) + struct.pack("I", len(slices_points))
+ for i in slices_points:
+ packed_sp += struct.pack("I", i)
+ slice_node.parameters = io.BytesIO(packed_sp)
+ concat_id = unique_id(graph, prefix='Concat')
+ graph.add_node(concat_id, parameters=None, op='concat', kind='op')
+ for i, output in enumerate(outputs):
+ edge_attrs = create_edge_attrs(output, concat_id)
+ edge_attrs['in'] = i
+ graph.add_edge(output, concat_id, **edge_attrs)
+ return concat_id
+
+
+def load_kaldi_model(nnet_path):
+ """
+ Structure of the file is the following:
+ magic-number(16896)<Nnet> <Next Layer Name> weights etc.
+ :param nnet_path:
+ :param check_sum:
+ :return:
+ """
+ nnet_name = None
+ if isinstance(nnet_path, str):
+ file_desc = open(nnet_path, "rb")
+ nnet_name = get_name_from_path(nnet_path)
+ elif isinstance(nnet_path, IOBase):
+ file_desc = nnet_path
+ else:
+ raise Error('Unsupported type of Kaldi model')
+
+ name = find_next_tag(file_desc)
+ # start new model / submodel
+ if name == '<Nnet>':
+ load_function = load_kalid_nnet1_model
+ elif name == '<TransitionModel>':
+ load_function = load_kalid_nnet2_model
+ else:
+ raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
+ refer_to_faq_msg(89))
+ read_placeholder(file_desc, 1)
+
+ return load_function(file_desc, nnet_name)
+
+
+def load_kalid_nnet1_model(file_descr, name):
+ graph = nx.MultiDiGraph(name=name)
+
+ prev_layer_id = 'Input'
+ graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Input', parameters=None)
+ input_shape = []
+
+ while True:
+ component_type = find_next_component(file_descr)
+ if component_type == end_of_nnet_tag.lower()[1:-1]:
+ break
+
+ layer_o = read_binary_integer32_token(file_descr)
+ layer_i = read_binary_integer32_token(file_descr)
+
+ if component_type == 'parallelcomponent':
+ prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id)
+ continue
+
+ start_index = file_descr.tell()
+ end_tag, end_index = find_end_of_component(file_descr, component_type)
+ end_index -= len(end_tag)
+ layer_id = unique_id(graph, prefix=component_type)
+ graph.add_node(layer_id,
+ parameters=get_parameters(file_descr, start_index, end_index),
+ op=component_type,
+ kind='op',
+ layer_i=layer_i,
+ layer_o=layer_o)
+
+ prev_node = Node(graph, prev_layer_id)
+ if prev_node.op == 'Input':
+ prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)
+ input_shape = np.array([1, layer_i], dtype=np.int64)
+ graph.add_edge(prev_layer_id, layer_id, **create_edge_attrs(prev_layer_id, layer_id))
+ prev_layer_id = layer_id
+ log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
+ return graph, input_shape
+
+
+def load_kalid_nnet2_model(file_descr, nnet_name):
+ graph = nx.MultiDiGraph(name=nnet_name)
+ input_name = 'Input'
+ graph.add_node(input_name, name=input_name, kind='op', op='Input', parameters=None, shape=None)
+
+ prev_layer_id = input_name
+
+ collect_until_token(file_descr, b'<Nnet>')
+ num_components = read_token_value(file_descr, b'<NumComponents>')
+ log.debug('Network contains {} components'.format(num_components))
+ collect_until_token(file_descr, b'<Components>')
+ for _ in range(num_components):
+ component_type = find_next_component(file_descr)
+
+ if component_type == end_of_nnet_tag.lower()[1:-1]:
+ break
+ start_index = file_descr.tell()
+ end_tag, end_index = find_end_of_component(file_descr, component_type)
+ layer_id = unique_id(graph, prefix=component_type)
+ graph.add_node(layer_id,
+ parameters=get_parameters(file_descr, start_index, end_index),
+ op=component_type,
+ kind='op')
+
+ prev_node = Node(graph, prev_layer_id)
+ if prev_node.op == 'Input':
+ parameters = Node(graph, layer_id).parameters
+ input_dim = read_token_value(parameters, b'<InputDim>')
+ prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
+ input_shape = np.array([1, input_dim], dtype=np.int64)
+ graph.add_edge(prev_layer_id, layer_id, **create_edge_attrs(prev_layer_id, layer_id))
+ prev_layer_id = layer_id
+ log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
+ return graph, input_shape
diff --git a/model-optimizer/mo/front/kaldi/loader/utils.py b/model-optimizer/mo/front/kaldi/loader/utils.py
new file mode 100644
index 000000000..4dbba940d
--- /dev/null
+++ b/model-optimizer/mo/front/kaldi/loader/utils.py
@@ -0,0 +1,299 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import io
+
+import numpy as np
+import os
+import struct
+
+from mo.utils.error import Error
+from mo.utils.utils import refer_to_faq_msg
+
+end_of_nnet_tag = '</Nnet>'
+end_of_component_tag = '<!EndOfComponent>'
+
+supported_components = [
+ 'addshift',
+ 'affinecomponent',
+ 'affinetransform',
+ 'convolutional1dcomponent',
+ 'convolutionalcomponent',
+ 'copy',
+ 'fixedaffinecomponent',
+ 'lstmprojected',
+ 'lstmprojectedstreams',
+ 'maxpoolingcomponent',
+ 'parallelcomponent',
+ 'rescale',
+ 'sigmoid',
+ 'softmax',
+ 'softmaxcomponent',
+ 'splicecomponent',
+ 'tanhcomponent',
+ 'normalizecomponent',
+ 'affinecomponentpreconditionedonline',
+ 'rectifiedlinearcomponent'
+]
+
+
+def get_bool(s: bytes) -> bool:
+ """
+ Get bool value from bytes
+ :param s: bytes array contains bool value
+ :return: bool value from bytes array
+ """
+ return struct.unpack('?', s)[0]
+
+
+def get_uint16(s: bytes) -> int:
+ """
+ Get unsigned int16 value from bytes
+ :param s: bytes array contains unsigned int16 value
+ :return: unsigned int16 value from bytes array
+ """
+ return struct.unpack('H', s)[0]
+
+
+def get_uint32(s: bytes) -> int:
+ """
+ Get unsigned int32 value from bytes
+ :param s: bytes array contains unsigned int32 value
+ :return: unsigned int32 value from bytes array
+ """
+ return struct.unpack('I', s)[0]
+
+
+def get_uint64(s: bytes) -> int:
+ """
+ Get unsigned int64 value from bytes
+ :param s: bytes array contains unsigned int64 value
+ :return: unsigned int64 value from bytes array
+ """
+ return struct.unpack('q', s)[0]
+
+
+def read_binary_bool_token(file_desc: io.BufferedReader) -> bool:
+ """
+ Get next bool value from file
+ The carriage moves forward to 1 position.
+ :param file_desc: file descriptor
+ :return: next boolean value in file
+ """
+ return get_bool(file_desc.read(1))
+
+
+def read_binary_integer32_token(file_desc: io.BufferedReader) -> int:
+ """
+ Get next int32 value from file
+ The carriage moves forward to 5 position.
+ :param file_desc: file descriptor
+ :return: next uint32 value in file
+ """
+ buffer_size = file_desc.read(1)
+ return get_uint32(file_desc.read(buffer_size[0]))
+
+
+def read_binary_integer64_token(file_desc: io.BufferedReader) -> int:
+ """
+ Get next int64 value from file
+ The carriage moves forward to 9 position.
+ :param file_desc: file descriptor
+ :return: next uint64 value in file
+ """
+ buffer_size = file_desc.read(1)
+ return get_uint64(file_desc.read(buffer_size[0]))
+
+
+def find_next_tag(file_desc: io.BufferedReader) -> str:
+ """
+ Get next tag in the file
+ :param file_desc:file descriptor
+ :return: string like '<sometag>'
+ """
+ tag = b''
+ while True:
+ symbol = file_desc.read(1)
+ if symbol == b'':
+ raise Error('Unexpected end of Kaldi model')
+ if tag == b'' and symbol != b'<':
+ continue
+ elif symbol == b'<':
+ tag = b''
+ tag += symbol
+ if symbol != b'>':
+ continue
+ try:
+ return tag.decode('ascii')
+ except UnicodeDecodeError:
+ # Tag in Kaldi model always in ascii encoding
+ tag = b''
+
+
+def read_placeholder(file_desc: io.BufferedReader, size=3) -> bytes:
+ """
+ Read size bytes from file
+ :param file_desc:file descriptor
+ :param size:number of reading bytes
+ :return: bytes
+ """
+ return file_desc.read(size)
+
+
+def find_next_component(file_desc: io.BufferedReader) -> str:
+ """
+ Read next component in the file.
+ All components are contained in supported_components
+ :param file_desc:file descriptor
+ :return: string like '<component>'
+ """
+ while True:
+ tag = find_next_tag(file_desc)
+ # Tag is <NameOfTheLayer>. But we want get without '<' and '>'
+ component_name = tag[1:-1].lower()
+ if component_name in supported_components or tag == end_of_nnet_tag:
+ # There is whitespace after component's name
+ read_placeholder(file_desc, 1)
+ return component_name
+
+
+def get_name_from_path(path: str) -> str:
+ """
+ Get name from path to the file
+ :param path: path to the file
+ :return: name of the file
+ """
+ return os.path.splitext(os.path.basename(path))[0]
+
+
+def find_end_of_component(file_desc: io.BufferedReader, component: str, end_tags: tuple = ()):
+ """
+ Find an index and a tag of the ent of the component
+ :param file_desc: file descriptor
+ :param component: component from supported_components
+ :param end_tags: specific end tags
+ :return: the index and the tag of the end of the component
+ """
+ end_tags_of_component = ['</{}>'.format(component),
+ end_of_component_tag.lower(),
+ end_of_nnet_tag.lower(),
+ *end_tags,
+ *['<{}>'.format(component) for component in supported_components]]
+ next_tag = find_next_tag(file_desc)
+ while next_tag.lower() not in end_tags_of_component:
+ next_tag = find_next_tag(file_desc)
+ return next_tag, file_desc.tell()
+
+
+def get_parameters(file_desc: io.BufferedReader, start_index: int, end_index: int):
+ """
+ Get part of file
+ :param file_desc: file descriptor
+ :param start_index: Index of the start reading
+ :param end_index: Index of the end reading
+ :return: part of the file
+ """
+ file_desc.seek(start_index)
+ buffer = file_desc.read(end_index - start_index)
+ return io.BytesIO(buffer)
+
+
+def read_token_value(file_desc: io.BufferedReader, token: bytes = b'', value_type: type = np.uint32):
+ """
+ Get value of the token.
+ Read next token (until whitespace) and check if next teg equals token
+ :param file_desc: file descriptor
+ :param token: token
+ :param value_type: type of the reading value
+ :return: value of the token
+ """
+ getters = {
+ np.uint32: read_binary_integer32_token,
+ np.uint64: read_binary_integer64_token,
+ bool: read_binary_bool_token
+ }
+ current_token = collect_until_whitespace(file_desc)
+ if token != b'' and token != current_token:
+ raise Error('Can not load token {} from Kaldi model'.format(token) +
+ refer_to_faq_msg(94))
+ return getters[value_type](file_desc)
+
+
+def collect_until_whitespace(file_desc: io.BufferedReader):
+ """
+ Read from file until whitespace
+ :param file_desc: file descriptor
+ :return:
+ """
+ res = b''
+ while True:
+ new_sym = file_desc.read(1)
+ if new_sym == b' ' or new_sym == b'':
+ break
+ res += new_sym
+ return res
+
+
+def collect_until_token(file_desc: io.BufferedReader, token):
+ """
+ Read from file until the token
+ :param file_desc: file descriptor
+ :return:
+ """
+ while True:
+ # usually there is the following structure <CellDim> DIM<ClipGradient> VALUEFM
+ res = collect_until_whitespace(file_desc)
+ if res == token or res[-len(token):] == token:
+ return
+ if isinstance(file_desc, io.BytesIO):
+ size = len(file_desc.getbuffer())
+ elif isinstance(file_desc, io.BufferedReader):
+ size = os.fstat(file_desc.fileno()).st_size
+ if file_desc.tell() == size:
+ raise Error('End of the file. Token {} not found. {}'.format(token, file_desc.tell()))
+
+
+def create_edge_attrs(prev_layer_id: str, next_layer_id: str) -> dict:
+ """
+ Create common edge's attributes
+ :param prev_layer_id: id of previous layer
+ :param next_layer_id: id of next layer
+ :return: dictionary contains common attributes for edge
+ """
+ return {
+ 'out': 0,
+ 'in': 0,
+ 'name': next_layer_id,
+ 'fw_tensor_debug_info': [(prev_layer_id, next_layer_id)],
+ 'in_attrs': ['in', 'name'],
+ 'out_attrs': ['out', 'name'],
+ 'data_attrs': ['fw_tensor_debug_info']
+ }
+
+
+def read_blob(file_desc: io.BufferedReader, size: int, dtype=np.float32):
+ """
+ Read blob from the file
+ :param file_desc: file descriptor
+ :param size: size of the blob
+ :param dtype: type of values of the blob
+ :return: np array contains blob
+ """
+ dsizes = {
+ np.float32: 4,
+ np.int32: 4
+ }
+ data = file_desc.read(size * dsizes[dtype])
+ return np.fromstring(data, dtype=dtype)
diff --git a/model-optimizer/mo/front/kaldi/register_custom_ops.py b/model-optimizer/mo/front/kaldi/register_custom_ops.py
index 1dfb5455e..237ee9121 100644
--- a/model-optimizer/mo/front/kaldi/register_custom_ops.py
+++ b/model-optimizer/mo/front/kaldi/register_custom_ops.py
@@ -1,5 +1,5 @@
"""
- Copyright (c) 2017-2018 Intel Corporation
+ Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/model-optimizer/mo/front/kaldi/utils.py b/model-optimizer/mo/front/kaldi/utils.py
index ef251feff..f29a643fd 100644
--- a/model-optimizer/mo/front/kaldi/utils.py
+++ b/model-optimizer/mo/front/kaldi/utils.py
@@ -13,29 +13,57 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+import io
+import numpy as np
+import os
+import logging as log
-import struct
+from mo.front.kaldi.loader.utils import read_placeholder, read_binary_integer32_token, read_blob, read_token_value, find_next_tag
+from mo.utils.error import Error
-def get_uint16(s):
- return struct.unpack('H', s)[0]
+def read_binary_matrix(file_desc: io.BufferedReader, read_token: bool = True):
+ if read_token:
+ read_placeholder(file_desc)
+ rows_number = read_binary_integer32_token(file_desc)
+ cols_number = read_binary_integer32_token(file_desc)
+ # to compare: ((float *)a->buffer())[10]
+ return read_blob(file_desc, rows_number * cols_number), (rows_number, cols_number)
-def get_uint32(s):
- return struct.unpack('I', s)[0]
+def read_binary_vector(file_desc: io.BufferedReader, read_token: bool = True, dtype=np.float32):
+ if read_token:
+ read_placeholder(file_desc)
+ elements_number = read_binary_integer32_token(file_desc)
+ return read_blob(file_desc, elements_number, dtype)
-class KaldiNode:
- def __init__(self, name):
- self.name = name
- self.blobs = [None, None]
+def read_learning_info(pb: io.BufferedReader):
+ while True:
+ read_placeholder(pb, 1)
+ first_char = pb.read(1)
+ pb.seek(-2, os.SEEK_CUR)
+ position = pb.tell()
+ if first_char == b'L':
+ cur_pos = pb.tell()
+ token = find_next_tag(pb)
+ pb.seek(cur_pos)
+ if token in ['<LearnRateCoef>', '<LearningRate>']:
+ token = bytes(token, 'ascii')
+ else:
+ log.debug('Unexpected tag: {}'.format(token))
+ break
+ elif first_char == b'B':
+ token = b'<BiasLearnRateCoef>'
+ elif first_char == b'M':
+ token = b'<MaxNorm>'
+ elif first_char == b'!': # token = b'<EndOfComponent>'
+ break
+ else:
+ break
+ try:
+ read_token_value(pb, token)
+ except Error:
+ pb.seek(position)
+ break
- def set_weight(self, w):
- self.blobs[0] = w
-
- def set_bias(self, b):
- self.blobs[1] = b
-
- def set_attrs(self, attrs: dict):
- for k, v in attrs.items():
- setattr(self, k, v)
diff --git a/model-optimizer/mo/front/mxnet/extractor.py b/model-optimizer/mo/front/mxnet/extractor.py
index e9783de4e..ad613f8b9 100644
--- a/model-optimizer/mo/front/mxnet/extractor.py
+++ b/model-optimizer/mo/front/mxnet/extractor.py
@@ -13,28 +13,26 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-from mo.front.common.partial_infer.elemental import copy_shape_infer
+
from mo.front.mxnet.extractors.batchnorm import batch_norm_ext
from mo.front.mxnet.extractors.concat import concat_ext
from mo.front.mxnet.extractors.crop import crop_ext
from mo.front.mxnet.extractors.eltwise import eltwise_ext
-from mo.front.mxnet.extractors.flatten import flatten_ext
from mo.front.mxnet.extractors.fully_connected import fully_connected_ext
from mo.front.mxnet.extractors.l2_normalization import l2_normalization_ext
from mo.front.mxnet.extractors.lrn import lrn_ext
from mo.front.mxnet.extractors.multibox_detection import multi_box_detection_ext
from mo.front.mxnet.extractors.multibox_prior import multi_box_prior_ext
from mo.front.mxnet.extractors.null import null_ext
-from mo.front.mxnet.extractors.reshape import reshape_ext
from mo.front.mxnet.extractors.scaleshift import scale_shift_ext
+from mo.front.mxnet.extractors.slice_axis import slice_axis_ext
from mo.front.mxnet.extractors.transpose import transpose_ext
-from mo.front.mxnet.extractors.up_sampling import up_sampling_ext
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
-from mo.front.mxnet.extractors.slice_axis import slice_axis_ext
-from mo.utils.error import Error
from mo.graph.graph import Node
+from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
+
def extractor_wrapper(mxnet_extractor):
return lambda node: mxnet_extractor(get_mxnet_layer_attrs(node.symbol_dict))
@@ -49,15 +47,10 @@ mxnet_op_extractors = {
'elemwise_add': extractor_wrapper(lambda attrs: eltwise_ext(attrs, infer=lambda a, b: a + b, op_type="sum")),
'elemwise_mul': extractor_wrapper(lambda attrs: eltwise_ext(attrs, infer=lambda a, b: a * b, op_type="mul")),
'_Plus': extractor_wrapper(lambda attrs: eltwise_ext(attrs, infer=lambda a, b: a + b, op_type="sum")),
- 'Flatten': extractor_wrapper(flatten_ext),
'FullyConnected': extractor_wrapper(fully_connected_ext),
- 'Reshape': extractor_wrapper(reshape_ext),
- 'UpSampling': extractor_wrapper(up_sampling_ext),
'transpose': extractor_wrapper(transpose_ext),
'LRN': extractor_wrapper(lrn_ext),
'L2Normalization': extractor_wrapper(l2_normalization_ext),
- 'Dropout': extractor_wrapper(lambda _: dict(infer=copy_shape_infer)),
- '_copy': extractor_wrapper(lambda _: dict(infer=copy_shape_infer)),
'_contrib_MultiBoxPrior': extractor_wrapper(multi_box_prior_ext),
'_contrib_MultiBoxDetection': extractor_wrapper(multi_box_detection_ext),
'broadcast_add': extractor_wrapper(lambda attrs: eltwise_ext(attrs, infer=lambda a, b: a + b, op_type="sum")),
@@ -84,6 +77,11 @@ def mxnet_op_extractor(node: Node):
refer_to_faq_msg(86),
op)
result_attr = mxnet_op_extractors[op](node)
+
+ if result_attr is None:
+ raise Error('Model Optimizer does not support layer "{}". Please, implement extension. '.format(node.name) +
+ refer_to_faq_msg(45))
+
result.update(result_attr)
supported = bool(result_attr)
return supported, result
diff --git a/model-optimizer/mo/front/mxnet/extractors/flatten.py b/model-optimizer/mo/front/mxnet/extractors/flatten.py
deleted file mode 100644
index 1f6f22e28..000000000
--- a/model-optimizer/mo/front/mxnet/extractors/flatten.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import numpy as np
-
-from mo.front.common.partial_infer.flatten import flatten_infer
-
-
-def flatten_ext(attrs):
- node_attrs = {
- 'type': 'Flatten',
- 'axis': 1,
- 'num_axes': 0,
- 'infer': flatten_infer
- }
- return node_attrs
diff --git a/model-optimizer/mo/front/mxnet/extractors/reshape.py b/model-optimizer/mo/front/mxnet/extractors/reshape.py
deleted file mode 100644
index 6c5b65b7d..000000000
--- a/model-optimizer/mo/front/mxnet/extractors/reshape.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import numpy as np
-
-from mo.front.common.partial_infer.elemental import single_output_infer
-from mo.front.common.partial_infer.reshape import tf_reshape_shape_infer
-
-
-def reshape_ext(attr):
- dim = attr.tuple("shape", int, None)
- node_attrs = {
- 'type': 'Reshape',
- 'axis': 0,
- 'num_axes': -1,
- 'dim': np.array(dim),
- 'infer': lambda node: single_output_infer(node, tf_reshape_shape_infer)
- }
- return node_attrs
diff --git a/model-optimizer/mo/front/mxnet/extractors/up_sampling.py b/model-optimizer/mo/front/mxnet/extractors/up_sampling.py
deleted file mode 100644
index 5804e1977..000000000
--- a/model-optimizer/mo/front/mxnet/extractors/up_sampling.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from mo.front.common.partial_infer.up_sampling import up_sampling_infer
-
-
-def up_sampling_ext(attrs):
- node_attrs = {
- 'type': 'Resample',
- 'scale': attrs.int("scale", 1),
- 'sample_type': 'caffe.ResampleParameter.NEAREST',
- 'antialias': 0,
- 'infer': up_sampling_infer
- }
- return node_attrs
diff --git a/model-optimizer/mo/front/mxnet/extractors/utils.py b/model-optimizer/mo/front/mxnet/extractors/utils.py
index e77306b71..8c8d23df9 100644
--- a/model-optimizer/mo/front/mxnet/extractors/utils.py
+++ b/model-optimizer/mo/front/mxnet/extractors/utils.py
@@ -69,6 +69,8 @@ class AttrDictionary(object):
if attr is None:
return default
if isinstance(attr, str):
+ if (not '(' in attr and not ')' in attr) and (not '[' in attr and not ']' in attr):
+ return (valtype(attr),)
if (not attr) or (not attr[1:-1].split(',')[0]):
return tuple([valtype(x) for x in default])
return StrTo.tuple(valtype, attr)
@@ -104,9 +106,10 @@ def get_mxnet_node_edges(node: dict, node_id: [int, str], nodes_list: list, inde
edge_list = []
for in_port, src_node_id in enumerate(node['inputs']):
src_node = src_node_id[0]
+ dest_port = src_node_id[1]
edge_attrs = {
'in': in_port,
- 'out': 0, # TODO Check if src_node_id[1] should be here (already used as fw_tensor_debug_info)
+ 'out': dest_port,
# debug anchor for name of tensor consumed at this input port
'fw_tensor_debug_info': [(nodes_list[src_node]['name'], src_node_id[1])],
'in_attrs': ['in'],
@@ -152,6 +155,9 @@ def load_params(input_model, data_names = ('data',)):
elif len(keys)>1 and 'arg' == keys[0]:
arg_keys.append(keys[1])
arg_params[keys[1]] = loaded_weight[key]
+ else:
+ arg_keys.append(key)
+ arg_params[key] = loaded_weight[key]
elif file_format == 'nd':
for key in loaded_weight:
if 'auxs' in input_model:
diff --git a/model-optimizer/mo/front/onnx/extractor.py b/model-optimizer/mo/front/onnx/extractor.py
index e4f631858..76a666c92 100644
--- a/model-optimizer/mo/front/onnx/extractor.py
+++ b/model-optimizer/mo/front/onnx/extractor.py
@@ -14,20 +14,17 @@
limitations under the License.
"""
-import logging as log
-
import numpy as np
+from mo.front.onnx.extractors.concat import concat_ext
from mo.front.onnx.extractors.const import onnx_const_ext
from mo.front.onnx.extractors.constant import onnx_constant_ext
+from mo.front.onnx.extractors.dropout import dropout_ext
from mo.front.onnx.extractors.eltwise import make_tf_eltwise
from mo.front.onnx.extractors.fused_bn import tf_fused_bn_extractor
from mo.front.onnx.extractors.matmul import onnx_gemm_ext
from mo.front.onnx.extractors.placeholder import onnx_placeholder_ext
-from mo.front.onnx.extractors.concat import concat_ext
-from mo.front.onnx.extractors.dropout import dropout_ext
from mo.front.onnx.extractors.reshape import onnx_reshape_ext
-from mo.front.tf.extractors.softmax import tf_softmax_ext
from mo.graph.graph import Node
@@ -48,14 +45,14 @@ onnx_op_extractors = {
make_tf_eltwise(lambda a, b: a + b, attrs={'type': 'Eltwise', 'operation': 'sum', 'can_be_bias': True})),
'Relu': node_pb_arg(make_tf_eltwise(lambda v: np.maximum(0, v), attrs={'type': 'ReLU'})), # 0 is an integer
'Reshape': onnx_reshape_ext,
- 'Softmax': node_pb_arg(tf_softmax_ext),
}
def common_onnx_fields(node: Node):
return {
'kind': 'op',
- 'name': node.id, # no reliable name for an onnx node, name can be empty, so we use that surrogate built as ID in the loaader
+ 'name': node.id,
+ # no reliable name for an onnx node, name can be empty, so we use that surrogate built as ID in the loaader
'op': node.op if node.has_valid('op') else node.pb.op_type,
'precision': 'FP32' # TODO use real precision derived from the model
}
diff --git a/model-optimizer/mo/front/tf/change_placeholder_type.py b/model-optimizer/mo/front/tf/change_placeholder_type.py
index f798f1569..8c35bc3a5 100644
--- a/model-optimizer/mo/front/tf/change_placeholder_type.py
+++ b/model-optimizer/mo/front/tf/change_placeholder_type.py
@@ -36,6 +36,8 @@ def change_placeholders_types_to_FP32(graph: nx.MultiDiGraph):
if all([is_node_casts_to_float(op) and len(op.in_nodes()) == 1 for op in next_ops]):
change_node_type(node, tf_types.DT_FLOAT)
remove_node_preserving_edges(node, next_ops) # remove 'Cast' nodes
+ elif all([is_node_gather(op) for op in next_ops] for op in next_ops):
+ change_node_type(node, tf_types.DT_FLOAT)
else:
raise Error(
('Cannot convert type of placeholder "{}" because not all of its outputs are "Cast" to float '
@@ -52,6 +54,11 @@ def is_node_casts_to_float(node: Node):
return 'pb' in attrs and attrs['pb'].op == 'Cast' and attrs['pb'].attr['DstT'].type == tf_types.DT_FLOAT
+def is_node_gather(node: Node):
+ attrs = node.graph.node[node.id]
+ return 'pb' in attrs and attrs['pb'].op == 'GatherV2' and attrs['precision'] == 'FP32'
+
+
def change_node_type(node: Node, new_type: type):
node.graph.node[node.id]['pb'].attr['dtype'].type = new_type
diff --git a/model-optimizer/mo/front/tf/extractor.py b/model-optimizer/mo/front/tf/extractor.py
index a653a25f1..d7af0d506 100644
--- a/model-optimizer/mo/front/tf/extractor.py
+++ b/model-optimizer/mo/front/tf/extractor.py
@@ -34,7 +34,6 @@ from mo.front.tf.extractors.random_uniform import tf_random_uniform_ext
from mo.front.tf.extractors.range import tf_range_ext
from mo.front.tf.extractors.reshape import tf_reshape_ext
from mo.front.tf.extractors.shape import tf_shape_ext
-from mo.front.tf.extractors.softmax import tf_softmax_ext
from mo.front.tf.extractors.space_to_batch import tf_space_to_batch_ext, tf_batch_to_space_ext
from mo.front.tf.extractors.split import tf_split_ext
from mo.front.tf.extractors.squeeze import tf_squeeze_ext
@@ -95,7 +94,7 @@ tf_op_extractors = {
'Prod': node_pb_arg(tf_reduce_prod_ext),
'Const': node_pb_arg(tf_const_ext),
'Placeholder': node_pb_arg(tf_placeholder_ext),
- 'Identity': node_pb_arg(make_tf_eltwise(lambda v: v)),
+ 'Identity': node_pb_arg(make_tf_eltwise(lambda v: v, attrs={'identity': True})),
'Add': node_pb_arg(
make_tf_eltwise(lambda a, b: a + b, attrs={'type': 'Eltwise', 'operation': 'sum', 'can_be_bias': True})),
'Mul': node_pb_arg(make_tf_eltwise(lambda a, b: a * b, attrs={'type': 'Eltwise', 'operation': 'mul'})),
@@ -111,18 +110,16 @@ tf_op_extractors = {
'Reshape': node_pb_arg(tf_reshape_ext),
'Squeeze': node_pb_arg(tf_squeeze_ext),
'Shape': node_pb_arg(tf_shape_ext),
- 'Softmax': node_pb_arg(tf_softmax_ext),
'SpaceToBatchND': node_pb_arg(tf_space_to_batch_ext),
'BatchToSpaceND': node_pb_arg(tf_batch_to_space_ext),
- 'StopGradient': node_pb_arg(make_tf_eltwise(lambda v: v)),
'Square': node_pb_arg(make_tf_eltwise(lambda a: a * a)),
'Minimum': node_pb_arg(make_tf_eltwise(lambda a, b: np.minimum(a, b))), # can use clamp if one argument is const
'Maximum': node_pb_arg(make_tf_eltwise(lambda a, b: np.maximum(a, b), attrs={'type': 'Eltwise',
'operation': 'max'})),
'Sum': node_pb_arg(tf_sum_ext),
'Range': node_pb_arg(tf_range_ext),
- 'ReadVariableOp': node_pb_arg(make_tf_eltwise(lambda v: v, attrs={'op': 'Identity'})),
- 'PlaceholderWithDefault': node_pb_arg(make_tf_eltwise(lambda v: v, attrs={'op': 'Identity'}))
+ 'ReadVariableOp': node_pb_arg(make_tf_eltwise(lambda v: v, attrs={'identity': True})),
+ 'PlaceholderWithDefault': node_pb_arg(make_tf_eltwise(lambda v: v, attrs={'identity': True}))
}
diff --git a/model-optimizer/mo/front/tf/extractors/softmax.py b/model-optimizer/mo/front/tf/extractors/softmax.py
deleted file mode 100644
index bc2eb7c8f..000000000
--- a/model-optimizer/mo/front/tf/extractors/softmax.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from mo.front.common.partial_infer.elemental import copy_shape_infer
-
-
-def tf_softmax_ext(pb):
- return {
- 'type': 'SoftMax',
- 'infer': copy_shape_infer
- }
diff --git a/model-optimizer/mo/front/tf/extractors/utils.py b/model-optimizer/mo/front/tf/extractors/utils.py
index a60d3e82a..5b736df0c 100644
--- a/model-optimizer/mo/front/tf/extractors/utils.py
+++ b/model-optimizer/mo/front/tf/extractors/utils.py
@@ -80,7 +80,7 @@ def tf_tensor_content(tf_dtype, shape, pb_tensor):
# return np.array(type_helper[1](pb_tensor), dtype=type_helper[0])
else:
if pb_tensor.tensor_content:
- flat = np.frombuffer(pb_tensor.tensor_content, type_helper[0])
+ flat = np.array(np.frombuffer(pb_tensor.tensor_content, type_helper[0]))
if len(flat) == shape.prod():
return flat.reshape(shape)
else:
diff --git a/model-optimizer/mo/front/tf/graph_utils.py b/model-optimizer/mo/front/tf/graph_utils.py
index 67335a245..2a8454c67 100644
--- a/model-optimizer/mo/front/tf/graph_utils.py
+++ b/model-optimizer/mo/front/tf/graph_utils.py
@@ -133,7 +133,7 @@ def add_activation_function_after_node(graph: nx.MultiDiGraph, node: Node, activ
"""
if activation_function == 'SOFTMAX':
# softmax to be applied to the confidence
- softmax_conf_op = Softmax(graph, dict(axis=1, nchw_layout=True))
+ softmax_conf_op = Softmax(graph, dict(axis=-1, nchw_layout=True))
activation_node = softmax_conf_op.create_node([node], dict(name=node.name + '/softmax'))
elif activation_function == 'SIGMOID':
# sigmoid activation function to be applied to the confidence
diff --git a/model-optimizer/mo/front/tf/loader.py b/model-optimizer/mo/front/tf/loader.py
index b8d8ca192..8310e0acb 100644
--- a/model-optimizer/mo/front/tf/loader.py
+++ b/model-optimizer/mo/front/tf/loader.py
@@ -14,9 +14,12 @@
limitations under the License.
"""
+import logging as log
import os
import re
+import networkx as nx
+
from mo.utils.error import Error, FrameworkError
from mo.utils.utils import refer_to_faq_msg
@@ -31,6 +34,55 @@ from mo.graph.graph import create_graph_with_nodes
from mo.utils.summarize_graph import summarize_graph
+def freeze_checkpoints(graph_def: tf.GraphDef, checkpoint_dir: str, output_node_names: list):
+ """
+ Loads all the variables in a graph and stores them in a separate dictionary. Freezes output nodes in the graph
+ :param graph_def: GraphDef object holding the network.
+ :param checkpoint_dir: path to directory with checkpoint files with values of graph variables.
+ :param output_node_names: list of output node names.
+ :return: GraphDef containing a simplified version of the original.
+ """
+ log.debug("Loading checkpoint files from directory: {}".format(checkpoint_dir))
+ checkpoint_files = []
+ for checkpoint_name in sorted(os.listdir(checkpoint_dir)):
+ checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
+ if os.path.isfile(checkpoint_path):
+ checkpoint_files.append(checkpoint_path)
+ log.debug("File {} will be loaded".format(checkpoint_path))
+ else:
+ log.debug("Path {} is not a file. Skipping")
+
+ if len(checkpoint_files) == 0:
+ raise Error("There are no checkpoint files in directory: {}".format(checkpoint_dir))
+
+ tf.import_graph_def(graph_def, name='')
+
+ with tf.Session() as sess:
+ uninitialized_variables = [str(v, 'utf-8') for v in set(sess.run(tf.report_uninitialized_variables()))]
+ all_variables = [n.name for n in sess.graph.as_graph_def().node if n.op in ['Variable', 'VariableV2']]
+ white_list = [v for v in all_variables if v not in uninitialized_variables]
+ black_list = [v for v in all_variables if v in uninitialized_variables]
+ output_graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names,
+ variable_names_whitelist=white_list,
+ variable_names_blacklist=black_list)
+ variable_values = {}
+ for checkpoint_file in checkpoint_files:
+ log.debug("Loading {}".format(checkpoint_file))
+ with tf.Session() as sess:
+ var_list = {}
+ var_to_shape_map = tf.pywrap_tensorflow.NewCheckpointReader(checkpoint_file).get_variable_to_shape_map()
+ for key in var_to_shape_map:
+ try:
+ tensor = sess.graph.get_operation_by_name(key).outputs[0]
+ except KeyError:
+ continue
+ var_list[key] = tensor
+ tf.train.Saver(var_list=var_list).restore(sess, checkpoint_file)
+ for name, tensor in var_list.items():
+ variable_values[name] = sess.run(tensor)
+ return output_graph_def, variable_values
+
+
def freeze_checkpoint(graph_def, checkpoint, output_node_names):
"""
Replaces all the variables in a graph with constants of the same values.
@@ -40,6 +92,7 @@ def freeze_checkpoint(graph_def, checkpoint, output_node_names):
:return: GraphDef containing a simplified version of the original.
"""
tf.import_graph_def(graph_def, name="")
+
with tf.Session() as sess:
var_list = {}
var_to_shape_map = tf.pywrap_tensorflow.NewCheckpointReader(checkpoint).get_variable_to_shape_map()
@@ -54,7 +107,8 @@ def freeze_checkpoint(graph_def, checkpoint, output_node_names):
return output_graph_def
-def read_file_to_graph_def(graph_def: [tf.GraphDef, tf.MetaGraphDef], graph_file_name: str = "", is_binary: bool = True):
+def read_file_to_graph_def(graph_def: [tf.GraphDef, tf.MetaGraphDef], graph_file_name: str = "",
+ is_binary: bool = True):
"""
Reads file to protobuf
:param graph_def: GraphDef orr MetaGraphDef object to store the network
@@ -141,16 +195,22 @@ def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpo
'--input_checkpoint "path/to/*.ckpt"'
'\n\n2. For "*.meta" file:'
'\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
-
+ variables_values = {}
try:
if graph_file_name and not meta_graph_file and not checkpoint:
# frozen graph
- return read_file_to_graph_def(graph_def, graph_file_name, is_binary)
+ return read_file_to_graph_def(graph_def, graph_file_name, is_binary), variables_values
if graph_file_name and not meta_graph_file and checkpoint:
# inference graph and checkpoint
graph_def = read_file_to_graph_def(graph_def, graph_file_name, is_binary)
outputs = get_output_node_names_list(graph_def, user_output_node_names_list)
- return freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs)
+ if os.path.isfile(checkpoint):
+ graph_def = freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs)
+ elif os.path.isdir(checkpoint):
+ graph_def, variables_values = freeze_checkpoints(graph_def=graph_def, checkpoint_dir=checkpoint,
+ output_node_names=outputs)
+ # we are sure that checkpoint is existing file or directory due to cli_parser configuration
+ return graph_def, variables_values
if not graph_file_name and meta_graph_file:
meta_graph_file = deducing_metagraph_path(meta_graph_file)
input_meta_graph_def = read_file_to_graph_def(tf.MetaGraphDef(), meta_graph_file, is_binary)
@@ -159,14 +219,16 @@ def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpo
restorer = tf.train.import_meta_graph(input_meta_graph_def)
restorer.restore(sess, re.sub('\.meta$', '', meta_graph_file))
outputs = get_output_node_names_list(input_meta_graph_def.graph_def, user_output_node_names_list)
- return tf.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def, outputs)
+ graph_def = tf.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def, outputs)
+ return graph_def, variables_values
if model_dir:
# saved model directory
tags = saved_model_tags if saved_model_tags is not None else [tf.saved_model.tag_constants.SERVING]
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(sess, tags, model_dir)
outputs = get_output_node_names_list(meta_graph_def.graph_def, user_output_node_names_list)
- return tf.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs)
+ graph_def = tf.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs)
+ return graph_def, variables_values
except Exception as e:
raise FrameworkError('Cannot load input model: {}', e) from e
raise Error("Unknown configuration of input model parameters")
@@ -194,3 +256,24 @@ def protobuf2nx(pb: tf.GraphDef):
index = index + 1
return graph
+
+
+def variables_to_constants(graph: nx.MultiDiGraph, variables_values: dict):
+ """
+ Converts `Variable<V2>` operations to FakeConst operations with `value` from `variables_values` dictionary
+ :param graph: graph to operate on
+ :param variables_values: dictionary with variable names as keys and np.array data as values
+ """
+ variable_operations = ['Variable', 'VariableV2']
+ for node_name in graph.nodes():
+ node_attr_dict = graph.node[node_name]
+ if 'op' not in node_attr_dict:
+ continue
+ op_name = node_attr_dict['op']
+ if op_name not in variable_operations:
+ continue
+ if node_name not in variables_values:
+ log.debug("There is no value for '{}': {} in checkpoint variable values".format(op_name, node_name))
+ continue
+ graph.node[node_name]['op'] = 'FakeConst'
+ graph.node[node_name]['value'] = variables_values[node_name]