1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import numpy as np
from mo.front.common.partial_infer.transpose import transpose_infer
from mo.front.extractor import attr_getter
from mo.ops.op import Op
class Pad(Op):
''' Pad operation that explicitly extends an input tensor at edges.
This operation frequently appears in TF and rarely in ONNX models
followed by some windowed operation like convolution or pooling.
The operation extends each (not only spatial) dimensions of input
tensors by new elements increasing output shape. The filling values
is defined by 'mode' and 'fill_value' attributes, but usually it is zero
padding.
The operation has two forms: with one or two input arguments.
The first aruments is an input tensor to be padded. The second
argument is an optional padding values of shape Nx2, where N is
a number of dimensions in an input tensor:
[[pad_begin_dim1, pad_end_dim1],
[pad_begin_dim2, pad_end_dim2],
...
[pad_begin_dimN, pad_end_dimN]]
where pad_begin_dim1 etc. are padding margins in elements. If the second
input argument is omitted, then it is in 'pads' attribute in the same
format.
'''
op = 'Pad'
enabled = True
def __init__(self, graph: nx.MultiDiGraph, attrs: dict):
super().__init__(graph, {
# no 'type' as this operation is not directly supported by IE
'op': __class__.op,
'infer': __class__.infer,
'mode': 'constant',
'fill_value': float(0),
'pads': None
}, attrs)
def supported_attrs(self):
return ['mode', 'fill_value', 'pads']
def backend_attrs(self):
# it shouldn't be translated to IE layer
return []
@staticmethod
def infer(node):
if node.has_valid('pads'):
assert len(node.in_nodes()) == 1, "Pad operation has pads attribute and unexpected additional input argument for node {}.".format(node.name)
padding = node.pads
else:
assert len(node.in_nodes()) == 2, "Missing required second input argument for node {} and pads attribute is missing.".format(node.name)
padding = node.in_node(1).value
input_shape = node.in_node(0).shape
if padding is None or input_shape is None:
return None
# paddings can be defined, partially defined or undefined
# TODO for now we only handle fully defined paddings
# That means that intermediate tensor that delivers padding
# should have defined value and size Nx2
# TODO possible broadcasts are not supported
assert (padding.ndim == 2 and padding.shape[1] == 2)
# make sure that input has the same number of dimensions as the number of padding dimensions
assert (padding.shape[0] == len(input_shape)), \
"Input tensor shape {} and pads values {} do not match for Pad node {}".format(
input_shape, padding.shape, node.name
)
# sum low and high padding values to calculate the shape modification vector
shape_change = np.add.reduce(padding, 1)
assert (shape_change.shape == input_shape.shape)
# preserve non-positive values in the input shape, because it has a special meaning
shape = np.array(
[shape_change[i] + input_shape[i] if input_shape[i] > 0 else input_shape[i] for i in range(len(input_shape))])
assert len(node.out_nodes()) == 1
node.out_node().shape = shape
|